summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/apei/ghes.c36
-rw-r--r--drivers/acpi/pci_link.c10
-rw-r--r--drivers/acpi/sleep.c12
-rw-r--r--drivers/amba/tegra-ahb.c1
-rw-r--r--drivers/android/binder/process.rs64
-rw-r--r--drivers/ata/libata-acpi.c67
-rw-r--r--drivers/ata/libata-core.c13
-rw-r--r--drivers/ata/libata-scsi.c1
-rw-r--r--drivers/ata/libata.h4
-rw-r--r--drivers/base/arch_topology.c96
-rw-r--r--drivers/base/base.h16
-rw-r--r--drivers/base/bus.c41
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/cpu.c26
-rw-r--r--drivers/base/dd.c12
-rw-r--r--drivers/base/devres.c25
-rw-r--r--drivers/base/firmware_loader/main.c12
-rw-r--r--drivers/base/firmware_loader/sysfs.c10
-rw-r--r--drivers/base/firmware_loader/sysfs_upload.c6
-rw-r--r--drivers/base/syscore.c82
-rw-r--r--drivers/bus/mvebu-mbus.c16
-rw-r--r--drivers/bus/stm32_rifsc.c597
-rw-r--r--drivers/bus/sunxi-rsb.c2
-rw-r--r--drivers/bus/ti-sysc.c11
-rw-r--r--drivers/cache/Kconfig37
-rw-r--r--drivers/cache/Makefile2
-rw-r--r--drivers/cache/hisi_soc_hha.c194
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c24
-rw-r--r--drivers/char/tpm/tpm2-cmd.c42
-rw-r--r--drivers/char/tpm/tpm2-sessions.c199
-rw-r--r--drivers/clk/at91/clk-peripheral.c1
-rw-r--r--drivers/clk/at91/pmc.c12
-rw-r--r--drivers/clk/at91/pmc.h3
-rw-r--r--drivers/clk/davinci/psc-da850.c7
-rw-r--r--drivers/clk/imx/clk-vf610.c12
-rw-r--r--drivers/clk/ingenic/jz4725b-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4740-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4755-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4760-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4770-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4780-cgu.c2
-rw-r--r--drivers/clk/ingenic/pm.c14
-rw-r--r--drivers/clk/ingenic/pm.h2
-rw-r--r--drivers/clk/ingenic/tcu.c12
-rw-r--r--drivers/clk/ingenic/x1000-cgu.c2
-rw-r--r--drivers/clk/ingenic/x1830-cgu.c2
-rw-r--r--drivers/clk/mvebu/common.c12
-rw-r--r--drivers/clk/mvebu/cp110-system-controller.c20
-rw-r--r--drivers/clk/renesas/clk-div6.c6
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c15
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.c9
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c12
-rw-r--r--drivers/clk/samsung/clk-s5pv210-audss.c12
-rw-r--r--drivers/clk/samsung/clk.c12
-rw-r--r--drivers/clk/tegra/clk-tegra210.c12
-rw-r--r--drivers/clocksource/timer-armada-370-xp.c12
-rw-r--r--drivers/cpufreq/rcpufreq_dt.rs4
-rw-r--r--drivers/cpuidle/cpuidle-psci.c12
-rw-r--r--drivers/crypto/ccp/Kconfig1
-rw-r--r--drivers/crypto/ccp/Makefile4
-rw-r--r--drivers/crypto/ccp/sev-dev-tio.c864
-rw-r--r--drivers/crypto/ccp/sev-dev-tio.h123
-rw-r--r--drivers/crypto/ccp/sev-dev-tsm.c405
-rw-r--r--drivers/crypto/ccp/sev-dev.c103
-rw-r--r--drivers/crypto/ccp/sev-dev.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c8
-rw-r--r--drivers/cxl/core/region.c5
-rw-r--r--drivers/edac/ie31200_edac.c4
-rw-r--r--drivers/firmware/imx/imx-scu-irq.c32
-rw-r--r--drivers/firmware/imx/imx-scu.c11
-rw-r--r--drivers/firmware/ti_sci.c155
-rw-r--r--drivers/firmware/ti_sci.h7
-rw-r--r--drivers/firmware/xilinx/Makefile2
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c13
-rw-r--r--drivers/firmware/xilinx/zynqmp-ufs.c118
-rw-r--r--drivers/firmware/xilinx/zynqmp.c160
-rw-r--r--drivers/gpio/gpio-aspeed.c5
-rw-r--r--drivers/gpio/gpio-mxc.c12
-rw-r--r--drivers/gpio/gpio-pxa.c12
-rw-r--r--drivers/gpio/gpio-sa1100.c12
-rw-r--r--drivers/gpu/drm/nova/driver.rs4
-rw-r--r--drivers/gpu/drm/nova/file.rs2
-rw-r--r--drivers/gpu/drm/tyr/driver.rs4
-rw-r--r--drivers/gpu/nova-core/driver.rs50
-rw-r--r--drivers/hv/vmbus_drv.c14
-rw-r--r--drivers/iio/dac/ad3530r.c3
-rw-r--r--drivers/iio/temperature/mlx90614.c5
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h1
-rw-r--r--drivers/iommu/amd/init.c25
-rw-r--r--drivers/iommu/intel/iommu.c12
-rw-r--r--drivers/irqchip/exynos-combiner.c14
-rw-r--r--drivers/irqchip/irq-apple-aic.c7
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c12
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c12
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c12
-rw-r--r--drivers/irqchip/irq-gic.c3
-rw-r--r--drivers/irqchip/irq-i8259.c12
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c16
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c12
-rw-r--r--drivers/irqchip/irq-loongson-htpic.c10
-rw-r--r--drivers/irqchip/irq-loongson-htvec.c12
-rw-r--r--drivers/irqchip/irq-loongson-pch-lpc.c12
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c12
-rw-r--r--drivers/irqchip/irq-mchp-eic.c12
-rw-r--r--drivers/irqchip/irq-mst-intc.c12
-rw-r--r--drivers/irqchip/irq-mtk-cirq.c12
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c12
-rw-r--r--drivers/irqchip/irq-sa11x0.c12
-rw-r--r--drivers/irqchip/irq-sifive-plic.c12
-rw-r--r--drivers/irqchip/irq-sun6i-r.c18
-rw-r--r--drivers/irqchip/irq-tegra.c12
-rw-r--r--drivers/irqchip/irq-vic.c12
-rw-r--r--drivers/leds/trigger/ledtrig-cpu.c14
-rw-r--r--drivers/macintosh/mac_hid.c3
-rw-r--r--drivers/macintosh/via-pmu-backlight.c2
-rw-r--r--drivers/macintosh/via-pmu.c12
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c12
-rw-r--r--drivers/memory/renesas-rpc-if.c58
-rw-r--r--drivers/memory/tegra/tegra124-emc.c140
-rw-r--r--drivers/memory/tegra/tegra186-emc.c35
-rw-r--r--drivers/memory/tegra/tegra20-emc.c150
-rw-r--r--drivers/memory/tegra/tegra30-emc.c119
-rw-r--r--drivers/message/fusion/mptbase.c7
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/nvdimm/Kconfig19
-rw-r--r--drivers/nvdimm/Makefile1
-rw-r--r--drivers/nvdimm/ramdax.c282
-rw-r--r--drivers/nvdimm/region.c2
-rw-r--r--drivers/nvdimm/region_devs.c2
-rw-r--r--drivers/nvdimm/security.c4
-rw-r--r--drivers/nvme/common/auth.c4
-rw-r--r--drivers/parisc/ccio-dma.c54
-rw-r--r--drivers/parisc/gsc.c4
-rw-r--r--drivers/parisc/iommu-helpers.h10
-rw-r--r--drivers/parisc/sba_iommu.c54
-rw-r--r--drivers/pci/Kconfig18
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/bus.c39
-rw-r--r--drivers/pci/doe.c2
-rw-r--r--drivers/pci/ide.c815
-rw-r--r--drivers/pci/pci-sysfs.c4
-rw-r--r--drivers/pci/pci.h21
-rw-r--r--drivers/pci/pcie/aer.c2
-rw-r--r--drivers/pci/probe.c31
-rw-r--r--drivers/pci/remove.c7
-rw-r--r--drivers/pci/search.c62
-rw-r--r--drivers/pci/tsm.c900
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35.c4
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c7
-rw-r--r--drivers/power/reset/sc27xx-poweroff.c10
-rw-r--r--drivers/pwm/pwm_th1520.rs4
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c404
-rw-r--r--drivers/remoteproc/imx_rproc.c238
-rw-r--r--drivers/remoteproc/imx_rproc.h16
-rw-r--r--drivers/remoteproc/mtk_scp.c65
-rw-r--r--drivers/remoteproc/omap_remoteproc.c3
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c29
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c60
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c80
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c40
-rw-r--r--drivers/remoteproc/qcom_wcnss.c27
-rw-r--r--drivers/remoteproc/rcar_rproc.c38
-rw-r--r--drivers/remoteproc/remoteproc_core.c31
-rw-r--r--drivers/remoteproc/st_remoteproc.c44
-rw-r--r--drivers/remoteproc/stm32_rproc.c46
-rw-r--r--drivers/remoteproc/ti_k3_common.c28
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c53
-rw-r--r--drivers/reset/Kconfig13
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c124
-rw-r--r--drivers/reset/reset-eic7700.c429
-rw-r--r--drivers/reset/reset-rzg2l-usbphy-ctrl.c60
-rw-r--r--drivers/reset/reset-th1520.c835
-rw-r--r--drivers/rpmsg/qcom_glink_native.c35
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/advansys.c3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c3
-rw-r--r--drivers/scsi/be2iscsi/be_main.c3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--drivers/scsi/fnic/fnic_res.c1
-rw-r--r--drivers/scsi/hosts.c19
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c3
-rw-r--r--drivers/scsi/isci/task.h10
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c36
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c249
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h25
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c79
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h17
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c15
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/scsi/qla1280.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c32
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c39
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c1777
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h112
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c17
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c4
-rw-r--r--drivers/scsi/scsi.c12
-rw-r--r--drivers/scsi/scsi_debug.c132
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_lib.c104
-rw-r--r--drivers/scsi/scsi_logging.c21
-rw-r--r--drivers/scsi/scsi_pm.c1
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_scan.c74
-rw-r--r--drivers/scsi/scsi_sysfs.c79
-rw-r--r--drivers/scsi/scsi_transport_fc.c5
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/sd.c34
-rw-r--r--drivers/scsi/sim710.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c49
-rw-r--r--drivers/scsi/st.c89
-rw-r--r--drivers/scsi/stex.c1
-rw-r--r--drivers/sh/clk/core.c10
-rw-r--r--drivers/sh/intc/core.c12
-rw-r--r--drivers/soc/amlogic/meson-canvas.c12
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c6
-rw-r--r--drivers/soc/apple/mailbox.c15
-rw-r--r--drivers/soc/apple/sart.c13
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c12
-rw-r--r--drivers/soc/fsl/qbman/qman.c2
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c2
-rw-r--r--drivers/soc/mediatek/mtk-socinfo.c3
-rw-r--r--drivers/soc/microchip/Kconfig12
-rw-r--r--drivers/soc/microchip/Makefile1
-rw-r--r--drivers/soc/microchip/mpfs-control-scb.c38
-rw-r--r--drivers/soc/microchip/mpfs-mss-top-sysreg.c44
-rw-r--r--drivers/soc/qcom/ice.c81
-rw-r--r--drivers/soc/qcom/llcc-qcom.c373
-rw-r--r--drivers/soc/qcom/mdt_loader.c52
-rw-r--r--drivers/soc/qcom/ocmem.c2
-rw-r--r--drivers/soc/qcom/pmic_glink.c9
-rw-r--r--drivers/soc/qcom/qcom-pbs.c2
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c8
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c10
-rw-r--r--drivers/soc/qcom/smem.c33
-rw-r--r--drivers/soc/qcom/socinfo.c102
-rw-r--r--drivers/soc/qcom/ubwc_config.c24
-rw-r--r--drivers/soc/renesas/r9a08g045-sysc.c69
-rw-r--r--drivers/soc/renesas/r9a09g047-sys.c79
-rw-r--r--drivers/soc/renesas/r9a09g056-sys.c69
-rw-r--r--drivers/soc/renesas/r9a09g057-sys.c101
-rw-r--r--drivers/soc/renesas/rcar-rst.c3
-rw-r--r--drivers/soc/renesas/renesas-soc.c4
-rw-r--r--drivers/soc/renesas/rz-sysc.c5
-rw-r--r--drivers/soc/renesas/rz-sysc.h4
-rw-r--r--drivers/soc/rockchip/grf.c15
-rw-r--r--drivers/soc/samsung/Makefile3
-rw-r--r--drivers/soc/samsung/exynos-chipid.c18
-rw-r--r--drivers/soc/samsung/exynos-pmu.c147
-rw-r--r--drivers/soc/samsung/exynos-pmu.h37
-rw-r--r--drivers/soc/samsung/gs101-pmu.c446
-rw-r--r--drivers/soc/tegra/cbb/tegra194-cbb.c2
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c2
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra210.c63
-rw-r--r--drivers/soc/tegra/pmc.c38
-rw-r--r--drivers/soc/xilinx/xlnx_event_manager.c8
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c10
-rw-r--r--drivers/target/sbp/sbp_target.c8
-rw-r--r--drivers/target/target_core_configfs.c38
-rw-r--r--drivers/target/target_core_device.c24
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_file.c4
-rw-r--r--drivers/target/target_core_iblock.c9
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_sbc.c51
-rw-r--r--drivers/target/target_core_spc.c49
-rw-r--r--drivers/target/target_core_stat.c268
-rw-r--r--drivers/target/target_core_tpg.c23
-rw-r--r--drivers/target/target_core_transport.c26
-rw-r--r--drivers/target/target_core_xcopy.c2
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c2
-rw-r--r--drivers/thermal/intel/intel_hfi.c12
-rw-r--r--drivers/ufs/core/Makefile1
-rw-r--r--drivers/ufs/core/ufs-mcq.c62
-rw-r--r--drivers/ufs/core/ufs-rpmb.c254
-rw-r--r--drivers/ufs/core/ufs-sysfs.c3
-rw-r--r--drivers/ufs/core/ufs_bsg.c2
-rw-r--r--drivers/ufs/core/ufs_trace.h1
-rw-r--r--drivers/ufs/core/ufs_trace_types.h1
-rw-r--r--drivers/ufs/core/ufshcd-crypto.h18
-rw-r--r--drivers/ufs/core/ufshcd-priv.h54
-rw-r--r--drivers/ufs/core/ufshcd.c934
-rw-r--r--drivers/ufs/host/Kconfig13
-rw-r--r--drivers/ufs/host/Makefile1
-rw-r--r--drivers/ufs/host/ti-j721e-ufs.c37
-rw-r--r--drivers/ufs/host/ufs-amd-versal2.c564
-rw-r--r--drivers/ufs/host/ufs-mediatek.c130
-rw-r--r--drivers/ufs/host/ufs-mediatek.h4
-rw-r--r--drivers/ufs/host/ufs-qcom.c3
-rw-r--r--drivers/ufs/host/ufs-rockchip.c20
-rw-r--r--drivers/ufs/host/ufshcd-dwc.h46
-rw-r--r--drivers/video/fbdev/gbefb.c5
-rw-r--r--drivers/video/fbdev/gxt4500.c2
-rw-r--r--drivers/video/fbdev/i810/i810_main.c46
-rw-r--r--drivers/video/fbdev/pxafb.c12
-rw-r--r--drivers/video/fbdev/ssd1307fb.c4
-rw-r--r--drivers/video/fbdev/tcx.c2
-rw-r--r--drivers/video/fbdev/tridentfb.c4
-rw-r--r--drivers/video/fbdev/vesafb.c29
-rw-r--r--drivers/video/fbdev/vga16fb.c21
-rw-r--r--drivers/virt/Kconfig4
-rw-r--r--drivers/virt/coco/Kconfig5
-rw-r--r--drivers/virt/coco/Makefile1
-rw-r--r--drivers/virt/coco/tsm-core.c163
-rw-r--r--drivers/watchdog/Kconfig12
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/aspeed_wdt.c30
-rw-r--r--drivers/watchdog/diag288_wdt.c6
-rw-r--r--drivers/watchdog/loongson1_wdt.c89
-rw-r--r--drivers/watchdog/renesas_wwdt.c163
-rw-r--r--drivers/watchdog/starfive-wdt.c4
-rw-r--r--drivers/watchdog/via_wdt.c1
-rw-r--r--drivers/watchdog/wdat_wdt.c64
-rw-r--r--drivers/xen/grant-dma-ops.c20
-rw-r--r--drivers/xen/grant-table.c2
-rw-r--r--drivers/xen/swiotlb-xen.c63
-rw-r--r--drivers/xen/xen-acpi-processor.c12
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c16
337 files changed, 14957 insertions, 3602 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 20eb17596b89..b9f70e01f269 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -160,7 +160,7 @@ obj-$(CONFIG_RPMSG) += rpmsg/
obj-$(CONFIG_SOUNDWIRE) += soundwire/
# Virtualization drivers
-obj-$(CONFIG_VIRT_DRIVERS) += virt/
+obj-y += virt/
obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_PM_DEVFREQ) += devfreq/
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 626908491d8f..0dc767392a6c 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -44,6 +44,7 @@
#include <linux/uuid.h>
#include <linux/ras.h>
#include <linux/task_work.h>
+#include <linux/vmcore_info.h>
#include <acpi/actbl1.h>
#include <acpi/ghes.h>
@@ -864,6 +865,40 @@ int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
}
EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, "CXL");
+static void ghes_log_hwerr(int sev, guid_t *sec_type)
+{
+ if (sev != CPER_SEV_RECOVERABLE)
+ return;
+
+ if (guid_equal(sec_type, &CPER_SEC_PROC_ARM) ||
+ guid_equal(sec_type, &CPER_SEC_PROC_GENERIC) ||
+ guid_equal(sec_type, &CPER_SEC_PROC_IA)) {
+ hwerr_log_error_type(HWERR_RECOV_CPU);
+ return;
+ }
+
+ if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR) ||
+ guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID) ||
+ guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID) ||
+ guid_equal(sec_type, &CPER_SEC_CXL_MEM_MODULE_GUID)) {
+ hwerr_log_error_type(HWERR_RECOV_CXL);
+ return;
+ }
+
+ if (guid_equal(sec_type, &CPER_SEC_PCIE) ||
+ guid_equal(sec_type, &CPER_SEC_PCI_X_BUS)) {
+ hwerr_log_error_type(HWERR_RECOV_PCI);
+ return;
+ }
+
+ if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
+ hwerr_log_error_type(HWERR_RECOV_MEMORY);
+ return;
+ }
+
+ hwerr_log_error_type(HWERR_RECOV_OTHERS);
+}
+
static void ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
@@ -885,6 +920,7 @@ static void ghes_do_proc(struct ghes *ghes,
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
fru_text = gdata->fru_text;
+ ghes_log_hwerr(sev, sec_type);
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index e4560b33b8ad..bed7dc85612e 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -761,7 +761,7 @@ static int acpi_pci_link_resume(struct acpi_pci_link *link)
return 0;
}
-static void irqrouter_resume(void)
+static void irqrouter_resume(void *data)
{
struct acpi_pci_link *link;
@@ -888,10 +888,14 @@ static int __init acpi_irq_balance_set(char *str)
__setup("acpi_irq_balance", acpi_irq_balance_set);
-static struct syscore_ops irqrouter_syscore_ops = {
+static const struct syscore_ops irqrouter_syscore_ops = {
.resume = irqrouter_resume,
};
+static struct syscore irqrouter_syscore = {
+ .ops = &irqrouter_syscore_ops,
+};
+
void __init acpi_pci_link_init(void)
{
if (acpi_noirq)
@@ -904,6 +908,6 @@ void __init acpi_pci_link_init(void)
else
acpi_irq_balance = 0;
}
- register_syscore_ops(&irqrouter_syscore_ops);
+ register_syscore(&irqrouter_syscore);
acpi_scan_add_handler(&pci_link_handler);
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 68943b98333d..66ec81e306d4 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -884,13 +884,13 @@ bool acpi_s2idle_wakeup(void)
#ifdef CONFIG_PM_SLEEP
static u32 saved_bm_rld;
-static int acpi_save_bm_rld(void)
+static int acpi_save_bm_rld(void *data)
{
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
return 0;
}
-static void acpi_restore_bm_rld(void)
+static void acpi_restore_bm_rld(void *data)
{
u32 resumed_bm_rld = 0;
@@ -901,14 +901,18 @@ static void acpi_restore_bm_rld(void)
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
}
-static struct syscore_ops acpi_sleep_syscore_ops = {
+static const struct syscore_ops acpi_sleep_syscore_ops = {
.suspend = acpi_save_bm_rld,
.resume = acpi_restore_bm_rld,
};
+static struct syscore acpi_sleep_syscore = {
+ .ops = &acpi_sleep_syscore_ops,
+};
+
static void acpi_sleep_syscore_init(void)
{
- register_syscore_ops(&acpi_sleep_syscore_ops);
+ register_syscore(&acpi_sleep_syscore);
}
#else
static inline void acpi_sleep_syscore_init(void) {}
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index c0e8b765522d..f23c3ed01810 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -144,6 +144,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn)
if (!dev)
return -EPROBE_DEFER;
ahb = dev_get_drvdata(dev);
+ put_device(dev);
val = gizmo_readl(ahb, AHB_ARBITRATION_XBAR_CTRL);
val |= AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE;
gizmo_writel(ahb, val, AHB_ARBITRATION_XBAR_CTRL);
diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
index e5237e9ec552..ac981614544e 100644
--- a/drivers/android/binder/process.rs
+++ b/drivers/android/binder/process.rs
@@ -19,6 +19,7 @@ use kernel::{
cred::Credential,
error::Error,
fs::file::{self, File},
+ id_pool::IdPool,
list::{List, ListArc, ListArcField, ListLinks},
mm,
prelude::*,
@@ -394,6 +395,8 @@ kernel::list::impl_list_item! {
struct ProcessNodeRefs {
/// Used to look up nodes using the 32-bit id that this process knows it by.
by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
+ /// Used to quickly find unused ids in `by_handle`.
+ handle_is_present: IdPool,
/// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
/// the underlying `Node` struct as returned by `Node::global_id`.
by_node: RBTree<usize, u32>,
@@ -408,6 +411,7 @@ impl ProcessNodeRefs {
fn new() -> Self {
Self {
by_handle: RBTree::new(),
+ handle_is_present: IdPool::new(),
by_node: RBTree::new(),
freeze_listeners: RBTree::new(),
}
@@ -802,7 +806,7 @@ impl Process {
pub(crate) fn insert_or_update_handle(
self: ArcBorrow<'_, Process>,
node_ref: NodeRef,
- is_mananger: bool,
+ is_manager: bool,
) -> Result<u32> {
{
let mut refs = self.node_refs.lock();
@@ -821,7 +825,33 @@ impl Process {
let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
let info = UniqueArc::new_uninit(GFP_KERNEL)?;
- let mut refs = self.node_refs.lock();
+ let mut refs_lock = self.node_refs.lock();
+ let mut refs = &mut *refs_lock;
+
+ let (unused_id, by_handle_slot) = loop {
+ // ID 0 may only be used by the manager.
+ let start = if is_manager { 0 } else { 1 };
+
+ if let Some(res) = refs.handle_is_present.find_unused_id(start) {
+ match refs.by_handle.entry(res.as_u32()) {
+ rbtree::Entry::Vacant(entry) => break (res, entry),
+ rbtree::Entry::Occupied(_) => {
+ pr_err!("Detected mismatch between handle_is_present and by_handle");
+ res.acquire();
+ kernel::warn_on!(true);
+ return Err(EINVAL);
+ }
+ }
+ }
+
+ let grow_request = refs.handle_is_present.grow_request().ok_or(ENOMEM)?;
+ drop(refs_lock);
+ let resizer = grow_request.realloc(GFP_KERNEL)?;
+ refs_lock = self.node_refs.lock();
+ refs = &mut *refs_lock;
+ refs.handle_is_present.grow(resizer);
+ };
+ let handle = unused_id.as_u32();
// Do a lookup again as node may have been inserted before the lock was reacquired.
if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
@@ -831,20 +861,9 @@ impl Process {
return Ok(handle);
}
- // Find id.
- let mut target: u32 = if is_mananger { 0 } else { 1 };
- for handle in refs.by_handle.keys() {
- if *handle > target {
- break;
- }
- if *handle == target {
- target = target.checked_add(1).ok_or(ENOMEM)?;
- }
- }
-
let gid = node_ref.node.global_id();
let (info_proc, info_node) = {
- let info_init = NodeRefInfo::new(node_ref, target, self.into());
+ let info_init = NodeRefInfo::new(node_ref, handle, self.into());
match info.pin_init_with(info_init) {
Ok(info) => ListArc::pair_from_pin_unique(info),
// error is infallible
@@ -865,9 +884,10 @@ impl Process {
// `info_node` into the right node's `refs` list.
unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
- refs.by_node.insert(reserve1.into_node(gid, target));
- refs.by_handle.insert(reserve2.into_node(target, info_proc));
- Ok(target)
+ refs.by_node.insert(reserve1.into_node(gid, handle));
+ by_handle_slot.insert(info_proc, reserve2);
+ unused_id.acquire();
+ Ok(handle)
}
pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
@@ -932,6 +952,16 @@ impl Process {
let id = info.node_ref().node.global_id();
refs.by_handle.remove(&handle);
refs.by_node.remove(&id);
+ refs.handle_is_present.release_id(handle as usize);
+
+ if let Some(shrink) = refs.handle_is_present.shrink_request() {
+ drop(refs);
+ // This intentionally ignores allocation failures.
+ if let Ok(new_bitmap) = shrink.realloc(GFP_KERNEL) {
+ refs = self.node_refs.lock();
+ refs.handle_is_present.shrink(new_bitmap);
+ }
+ }
}
} else {
// All refs are cleared in process exit, so this warning is expected in that case.
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index f2140fc06ba0..15e18d50dcc6 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -246,6 +246,73 @@ void ata_acpi_bind_dev(struct ata_device *dev)
}
/**
+ * ata_acpi_dev_manage_restart - if the disk should be stopped (spun down) on
+ * system restart.
+ * @dev: target ATA device
+ *
+ * RETURNS:
+ * true if the disk should be stopped, otherwise false.
+ */
+bool ata_acpi_dev_manage_restart(struct ata_device *dev)
+{
+ struct device *tdev;
+
+ /*
+ * If ATA_FLAG_ACPI_SATA is set, the acpi fwnode is attached to the
+ * ata_device instead of the ata_port.
+ */
+ if (dev->link->ap->flags & ATA_FLAG_ACPI_SATA)
+ tdev = &dev->tdev;
+ else
+ tdev = &dev->link->ap->tdev;
+
+ if (!is_acpi_device_node(tdev->fwnode))
+ return false;
+ return acpi_bus_power_manageable(ACPI_HANDLE(tdev));
+}
+
+/**
+ * ata_acpi_port_power_on - set the power state of the ata port to D0
+ * @ap: target ATA port
+ *
+ * This function is called at the beginning of ata_port_probe().
+ */
+void ata_acpi_port_power_on(struct ata_port *ap)
+{
+ acpi_handle handle;
+ int i;
+
+ /*
+ * If ATA_FLAG_ACPI_SATA is set, the acpi fwnode is attached to the
+ * ata_device instead of the ata_port.
+ */
+ if (ap->flags & ATA_FLAG_ACPI_SATA) {
+ for (i = 0; i < ATA_MAX_DEVICES; i++) {
+ struct ata_device *dev = &ap->link.device[i];
+
+ if (!is_acpi_device_node(dev->tdev.fwnode))
+ continue;
+ handle = ACPI_HANDLE(&dev->tdev);
+ if (!acpi_bus_power_manageable(handle))
+ continue;
+ if (acpi_bus_set_power(handle, ACPI_STATE_D0))
+ ata_dev_err(dev,
+ "acpi: failed to set power state to D0\n");
+ }
+ return;
+ }
+
+ if (!is_acpi_device_node(ap->tdev.fwnode))
+ return;
+ handle = ACPI_HANDLE(&ap->tdev);
+ if (!acpi_bus_power_manageable(handle))
+ return;
+
+ if (acpi_bus_set_power(handle, ACPI_STATE_D0))
+ ata_port_err(ap, "acpi: failed to set power state to D0\n");
+}
+
+/**
* ata_acpi_dissociate - dissociate ATA host from ACPI objects
* @host: target ATA host
*
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 462217935558..0b24bd169d61 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3146,6 +3146,10 @@ int ata_dev_configure(struct ata_device *dev)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
dev->max_sectors);
+ if (dev->quirks & ATA_QUIRK_MAX_SEC_8191)
+ dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_8191,
+ dev->max_sectors);
+
if (dev->quirks & ATA_QUIRK_MAX_SEC_LBA48)
dev->max_sectors = ATA_MAX_SECTORS_LBA48;
@@ -3998,6 +4002,7 @@ static const char * const ata_quirk_names[] = {
[__ATA_QUIRK_NO_DMA_LOG] = "nodmalog",
[__ATA_QUIRK_NOTRIM] = "notrim",
[__ATA_QUIRK_MAX_SEC_1024] = "maxsec1024",
+ [__ATA_QUIRK_MAX_SEC_8191] = "maxsec8191",
[__ATA_QUIRK_MAX_TRIM_128M] = "maxtrim128m",
[__ATA_QUIRK_NO_NCQ_ON_ATI] = "noncqonati",
[__ATA_QUIRK_NO_LPM_ON_ATI] = "nolpmonati",
@@ -4104,6 +4109,12 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
{ "LITEON CX1-JB*-HP", NULL, ATA_QUIRK_MAX_SEC_1024 },
{ "LITEON EP1-*", NULL, ATA_QUIRK_MAX_SEC_1024 },
+ /*
+ * These devices time out with higher max sects.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=220693
+ */
+ { "DELLBOSS VD", "MV.R00-0", ATA_QUIRK_MAX_SEC_8191 },
+
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
@@ -5915,6 +5926,8 @@ void ata_port_probe(struct ata_port *ap)
struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned long flags;
+ ata_acpi_port_power_on(ap);
+
/* kick EH for boot probing */
spin_lock_irqsave(ap->lock, flags);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 434774e71fe6..721d3f270c8e 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1102,6 +1102,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
*/
sdev->manage_runtime_start_stop = 1;
sdev->manage_shutdown = 1;
+ sdev->manage_restart = ata_acpi_dev_manage_restart(dev);
sdev->force_runtime_start_on_system_start = 1;
}
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index e5b977a8d3e1..0e7ecac73680 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -130,6 +130,8 @@ extern void ata_acpi_on_disable(struct ata_device *dev);
extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
extern void ata_acpi_bind_port(struct ata_port *ap);
extern void ata_acpi_bind_dev(struct ata_device *dev);
+extern void ata_acpi_port_power_on(struct ata_port *ap);
+extern bool ata_acpi_dev_manage_restart(struct ata_device *dev);
extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
#else
static inline void ata_acpi_dissociate(struct ata_host *host) { }
@@ -140,6 +142,8 @@ static inline void ata_acpi_set_state(struct ata_port *ap,
pm_message_t state) { }
static inline void ata_acpi_bind_port(struct ata_port *ap) {}
static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
+static inline void ata_acpi_port_power_on(struct ata_port *ap) {}
+static inline bool ata_acpi_dev_manage_restart(struct ata_device *dev) { return 0; }
#endif
/* libata-scsi.c */
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index e1eff05bea4a..84ec92bff642 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -823,12 +823,106 @@ void remove_cpu_topology(unsigned int cpu)
clear_cpu_topology(cpu);
}
+#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+struct cpu_smt_info {
+ unsigned int thread_num;
+ int core_id;
+};
+
+static bool __init acpi_cpu_is_threaded(int cpu)
+{
+ int is_threaded = acpi_pptt_cpu_is_thread(cpu);
+
+ /*
+ * if the PPTT doesn't have thread information, check for architecture
+ * specific fallback if available
+ */
+ if (is_threaded < 0)
+ is_threaded = arch_cpu_is_threaded();
+
+ return !!is_threaded;
+}
+
+/*
+ * Propagate the topology information of the processor_topology_node tree to the
+ * cpu_topology array.
+ */
__weak int __init parse_acpi_topology(void)
{
+ unsigned int max_smt_thread_num = 1;
+ struct cpu_smt_info *entry;
+ struct xarray hetero_cpu;
+ unsigned long hetero_id;
+ int cpu, topology_id;
+
+ if (acpi_disabled)
+ return 0;
+
+ xa_init(&hetero_cpu);
+
+ for_each_possible_cpu(cpu) {
+ topology_id = find_acpi_cpu_topology(cpu, 0);
+ if (topology_id < 0)
+ return topology_id;
+
+ if (acpi_cpu_is_threaded(cpu)) {
+ cpu_topology[cpu].thread_id = topology_id;
+ topology_id = find_acpi_cpu_topology(cpu, 1);
+ cpu_topology[cpu].core_id = topology_id;
+
+ /*
+ * In the PPTT, CPUs below a node with the 'identical
+ * implementation' flag have the same number of threads.
+ * Count the number of threads for only one CPU (i.e.
+ * one core_id) among those with the same hetero_id.
+ * See the comment of find_acpi_cpu_topology_hetero_id()
+ * for more details.
+ *
+ * One entry is created for each node having:
+ * - the 'identical implementation' flag
+ * - its parent not having the flag
+ */
+ hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
+ entry = xa_load(&hetero_cpu, hetero_id);
+ if (!entry) {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ WARN_ON_ONCE(!entry);
+
+ if (entry) {
+ entry->core_id = topology_id;
+ entry->thread_num = 1;
+ xa_store(&hetero_cpu, hetero_id,
+ entry, GFP_KERNEL);
+ }
+ } else if (entry->core_id == topology_id) {
+ entry->thread_num++;
+ }
+ } else {
+ cpu_topology[cpu].thread_id = -1;
+ cpu_topology[cpu].core_id = topology_id;
+ }
+ topology_id = find_acpi_cpu_topology_cluster(cpu);
+ cpu_topology[cpu].cluster_id = topology_id;
+ topology_id = find_acpi_cpu_topology_package(cpu);
+ cpu_topology[cpu].package_id = topology_id;
+ }
+
+ /*
+ * This is a short loop since the number of XArray elements is the
+ * number of heterogeneous CPU clusters. On a homogeneous system
+ * there's only one entry in the XArray.
+ */
+ xa_for_each(&hetero_cpu, hetero_id, entry) {
+ max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
+ xa_erase(&hetero_cpu, hetero_id);
+ kfree(entry);
+ }
+
+ cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
+ xa_destroy(&hetero_cpu);
return 0;
}
-#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
void __init init_cpu_topology(void)
{
int cpu, ret;
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 86fa7fbb3548..430cbefbc97f 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -85,6 +85,18 @@ struct driver_private {
};
#define to_driver(obj) container_of(obj, struct driver_private, kobj)
+#ifdef CONFIG_RUST
+/**
+ * struct driver_type - Representation of a Rust driver type.
+ */
+struct driver_type {
+ /**
+ * @id: Representation of core::any::TypeId.
+ */
+ u8 id[16];
+} __packed;
+#endif
+
/**
* struct device_private - structure to hold the private to the driver core portions of the device structure.
*
@@ -100,6 +112,7 @@ struct driver_private {
* @async_driver - pointer to device driver awaiting probe via async_probe
* @device - pointer back to the struct device that this structure is
* associated with.
+ * @driver_type - The type of the bound Rust driver.
* @dead - This device is currently either in the process of or has been
* removed from the system. Any asynchronous events scheduled for this
* device should exit without taking any action.
@@ -116,6 +129,9 @@ struct device_private {
const struct device_driver *async_driver;
char *deferred_probe_reason;
struct device *device;
+#ifdef CONFIG_RUST
+ struct driver_type driver_type;
+#endif
u8 dead:1;
};
#define to_device_private_parent(obj) \
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 5e75e1bce551..9eb7771706f0 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -334,6 +334,19 @@ static struct device *next_device(struct klist_iter *i)
return dev;
}
+static struct device *prev_device(struct klist_iter *i)
+{
+ struct klist_node *n = klist_prev(i);
+ struct device *dev = NULL;
+ struct device_private *dev_prv;
+
+ if (n) {
+ dev_prv = to_device_private_bus(n);
+ dev = dev_prv->device;
+ }
+ return dev;
+}
+
/**
* bus_for_each_dev - device iterator.
* @bus: bus type.
@@ -414,6 +427,31 @@ struct device *bus_find_device(const struct bus_type *bus,
}
EXPORT_SYMBOL_GPL(bus_find_device);
+struct device *bus_find_device_reverse(const struct bus_type *bus,
+ struct device *start, const void *data,
+ device_match_t match)
+{
+ struct subsys_private *sp = bus_to_subsys(bus);
+ struct klist_iter i;
+ struct device *dev;
+
+ if (!sp)
+ return NULL;
+
+ klist_iter_init_node(&sp->klist_devices, &i,
+ (start ? &start->p->knode_bus : NULL));
+ while ((dev = prev_device(&i))) {
+ if (match(dev, data)) {
+ get_device(dev);
+ break;
+ }
+ }
+ klist_iter_exit(&i);
+ subsys_put(sp);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(bus_find_device_reverse);
+
static struct device_driver *next_driver(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
@@ -533,8 +571,7 @@ void bus_probe_device(struct device *dev)
if (!sp)
return;
- if (sp->drivers_autoprobe)
- device_initial_probe(dev);
+ device_initial_probe(dev);
mutex_lock(&sp->mutex);
list_for_each_entry(sif, &sp->interfaces, node)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index f69dc9c85954..40de2f51a1b1 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -4138,7 +4138,7 @@ int __init devices_init(void)
sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
if (!sysfs_dev_char_kobj)
goto char_kobj_err;
- device_link_wq = alloc_workqueue("device_link_wq", 0, 0);
+ device_link_wq = alloc_workqueue("device_link_wq", WQ_PERCPU, 0);
if (!device_link_wq)
goto wq_err;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index fa0a2eef93ac..c6c57b6f61c6 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -300,13 +300,30 @@ static ssize_t print_cpus_isolated(struct device *dev,
}
static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);
+static ssize_t housekeeping_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct cpumask *hk_mask;
+
+ hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE);
+
+ if (housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
+ return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(hk_mask));
+ return sysfs_emit(buf, "\n");
+}
+static DEVICE_ATTR_RO(housekeeping);
+
#ifdef CONFIG_NO_HZ_FULL
-static ssize_t print_cpus_nohz_full(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t nohz_full_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
+ if (cpumask_available(tick_nohz_full_mask))
+ return sysfs_emit(buf, "%*pbl\n",
+ cpumask_pr_args(tick_nohz_full_mask));
+ return sysfs_emit(buf, "\n");
}
-static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
+static DEVICE_ATTR_RO(nohz_full);
#endif
#ifdef CONFIG_CRASH_HOTPLUG
@@ -505,6 +522,7 @@ static struct attribute *cpu_root_attrs[] = {
&dev_attr_offline.attr,
&dev_attr_enabled.attr,
&dev_attr_isolated.attr,
+ &dev_attr_housekeeping.attr,
#ifdef CONFIG_NO_HZ_FULL
&dev_attr_nohz_full.attr,
#endif
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 13ab98e033ea..349f31bedfa1 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -193,7 +193,7 @@ void driver_deferred_probe_trigger(void)
* Kick the re-probe thread. It may already be scheduled, but it is
* safe to kick it again.
*/
- queue_work(system_unbound_wq, &deferred_probe_work);
+ queue_work(system_dfl_wq, &deferred_probe_work);
}
/**
@@ -1077,7 +1077,15 @@ EXPORT_SYMBOL_GPL(device_attach);
void device_initial_probe(struct device *dev)
{
- __device_attach(dev, true);
+ struct subsys_private *sp = bus_to_subsys(dev->bus);
+
+ if (!sp)
+ return;
+
+ if (sp->drivers_autoprobe)
+ __device_attach(dev, true);
+
+ subsys_put(sp);
}
/*
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index c948c88d3956..f54db6d138ab 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -1222,13 +1222,6 @@ static void devm_percpu_release(struct device *dev, void *pdata)
free_percpu(p);
}
-static int devm_percpu_match(struct device *dev, void *data, void *p)
-{
- struct devres *devr = container_of(data, struct devres, data);
-
- return *(void **)devr->data == p;
-}
-
/**
* __devm_alloc_percpu - Resource-managed alloc_percpu
* @dev: Device to allocate per-cpu memory for
@@ -1264,21 +1257,3 @@ void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
return pcpu;
}
EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
-
-/**
- * devm_free_percpu - Resource-managed free_percpu
- * @dev: Device this memory belongs to
- * @pdata: Per-cpu memory to free
- *
- * Free memory allocated with devm_alloc_percpu().
- */
-void devm_free_percpu(struct device *dev, void __percpu *pdata)
-{
- /*
- * Use devres_release() to prevent memory leakage as
- * devm_free_pages() does.
- */
- WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match,
- (void *)(__force unsigned long)pdata));
-}
-EXPORT_SYMBOL_GPL(devm_free_percpu);
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index bee3050a20d9..4ebdca9e4da4 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -1576,16 +1576,20 @@ static int fw_pm_notify(struct notifier_block *notify_block,
}
/* stop caching firmware once syscore_suspend is reached */
-static int fw_suspend(void)
+static int fw_suspend(void *data)
{
fw_cache.state = FW_LOADER_NO_CACHE;
return 0;
}
-static struct syscore_ops fw_syscore_ops = {
+static const struct syscore_ops fw_syscore_ops = {
.suspend = fw_suspend,
};
+static struct syscore fw_syscore = {
+ .ops = &fw_syscore_ops,
+};
+
static int __init register_fw_pm_ops(void)
{
int ret;
@@ -1601,14 +1605,14 @@ static int __init register_fw_pm_ops(void)
if (ret)
return ret;
- register_syscore_ops(&fw_syscore_ops);
+ register_syscore(&fw_syscore);
return ret;
}
static inline void unregister_fw_pm_ops(void)
{
- unregister_syscore_ops(&fw_syscore_ops);
+ unregister_syscore(&fw_syscore);
unregister_pm_notifier(&fw_cache.pm_notify);
}
#else
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
index add0b9b75edd..92e91050f96a 100644
--- a/drivers/base/firmware_loader/sysfs.c
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -47,7 +47,10 @@ static ssize_t timeout_show(const struct class *class, const struct class_attrib
static ssize_t timeout_store(const struct class *class, const struct class_attribute *attr,
const char *buf, size_t count)
{
- int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
+ int tmp_loading_timeout;
+
+ if (kstrtoint(buf, 10, &tmp_loading_timeout))
+ return -EINVAL;
if (tmp_loading_timeout < 0)
tmp_loading_timeout = 0;
@@ -157,7 +160,10 @@ static ssize_t firmware_loading_store(struct device *dev,
struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
struct fw_priv *fw_priv;
ssize_t written = count;
- int loading = simple_strtol(buf, NULL, 10);
+ int loading;
+
+ if (kstrtoint(buf, 10, &loading))
+ return -EINVAL;
mutex_lock(&fw_lock);
fw_priv = fw_sysfs->fw_priv;
diff --git a/drivers/base/firmware_loader/sysfs_upload.c b/drivers/base/firmware_loader/sysfs_upload.c
index 829270067d16..c3797b93c5f5 100644
--- a/drivers/base/firmware_loader/sysfs_upload.c
+++ b/drivers/base/firmware_loader/sysfs_upload.c
@@ -100,8 +100,10 @@ static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&fwlp->lock);
- if (fwlp->progress == FW_UPLOAD_PROG_IDLE)
- ret = -ENODEV;
+ if (fwlp->progress == FW_UPLOAD_PROG_IDLE) {
+ mutex_unlock(&fwlp->lock);
+ return -ENODEV;
+ }
fwlp->ops->cancel(fwlp->fw_upload);
mutex_unlock(&fwlp->lock);
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index 13db1f78d2ce..483adb796654 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -11,32 +11,32 @@
#include <linux/suspend.h>
#include <trace/events/power.h>
-static LIST_HEAD(syscore_ops_list);
-static DEFINE_MUTEX(syscore_ops_lock);
+static LIST_HEAD(syscore_list);
+static DEFINE_MUTEX(syscore_lock);
/**
- * register_syscore_ops - Register a set of system core operations.
- * @ops: System core operations to register.
+ * register_syscore - Register a set of system core operations.
+ * @syscore: System core operations to register.
*/
-void register_syscore_ops(struct syscore_ops *ops)
+void register_syscore(struct syscore *syscore)
{
- mutex_lock(&syscore_ops_lock);
- list_add_tail(&ops->node, &syscore_ops_list);
- mutex_unlock(&syscore_ops_lock);
+ mutex_lock(&syscore_lock);
+ list_add_tail(&syscore->node, &syscore_list);
+ mutex_unlock(&syscore_lock);
}
-EXPORT_SYMBOL_GPL(register_syscore_ops);
+EXPORT_SYMBOL_GPL(register_syscore);
/**
- * unregister_syscore_ops - Unregister a set of system core operations.
- * @ops: System core operations to unregister.
+ * unregister_syscore - Unregister a set of system core operations.
+ * @syscore: System core operations to unregister.
*/
-void unregister_syscore_ops(struct syscore_ops *ops)
+void unregister_syscore(struct syscore *syscore)
{
- mutex_lock(&syscore_ops_lock);
- list_del(&ops->node);
- mutex_unlock(&syscore_ops_lock);
+ mutex_lock(&syscore_lock);
+ list_del(&syscore->node);
+ mutex_unlock(&syscore_lock);
}
-EXPORT_SYMBOL_GPL(unregister_syscore_ops);
+EXPORT_SYMBOL_GPL(unregister_syscore);
#ifdef CONFIG_PM_SLEEP
/**
@@ -46,7 +46,7 @@ EXPORT_SYMBOL_GPL(unregister_syscore_ops);
*/
int syscore_suspend(void)
{
- struct syscore_ops *ops;
+ struct syscore *syscore;
int ret = 0;
trace_suspend_resume(TPS("syscore_suspend"), 0, true);
@@ -59,25 +59,27 @@ int syscore_suspend(void)
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core suspend.\n");
- list_for_each_entry_reverse(ops, &syscore_ops_list, node)
- if (ops->suspend) {
- pm_pr_dbg("Calling %pS\n", ops->suspend);
- ret = ops->suspend();
+ list_for_each_entry_reverse(syscore, &syscore_list, node)
+ if (syscore->ops->suspend) {
+ pm_pr_dbg("Calling %pS\n", syscore->ops->suspend);
+ ret = syscore->ops->suspend(syscore->data);
if (ret)
goto err_out;
WARN_ONCE(!irqs_disabled(),
- "Interrupts enabled after %pS\n", ops->suspend);
+ "Interrupts enabled after %pS\n",
+ syscore->ops->suspend);
}
trace_suspend_resume(TPS("syscore_suspend"), 0, false);
return 0;
err_out:
- pr_err("PM: System core suspend callback %pS failed.\n", ops->suspend);
+ pr_err("PM: System core suspend callback %pS failed.\n",
+ syscore->ops->suspend);
- list_for_each_entry_continue(ops, &syscore_ops_list, node)
- if (ops->resume)
- ops->resume();
+ list_for_each_entry_continue(syscore, &syscore_list, node)
+ if (syscore->ops->resume)
+ syscore->ops->resume(syscore->data);
return ret;
}
@@ -90,18 +92,19 @@ EXPORT_SYMBOL_GPL(syscore_suspend);
*/
void syscore_resume(void)
{
- struct syscore_ops *ops;
+ struct syscore *syscore;
trace_suspend_resume(TPS("syscore_resume"), 0, true);
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core resume.\n");
- list_for_each_entry(ops, &syscore_ops_list, node)
- if (ops->resume) {
- pm_pr_dbg("Calling %pS\n", ops->resume);
- ops->resume();
+ list_for_each_entry(syscore, &syscore_list, node)
+ if (syscore->ops->resume) {
+ pm_pr_dbg("Calling %pS\n", syscore->ops->resume);
+ syscore->ops->resume(syscore->data);
WARN_ONCE(!irqs_disabled(),
- "Interrupts enabled after %pS\n", ops->resume);
+ "Interrupts enabled after %pS\n",
+ syscore->ops->resume);
}
trace_suspend_resume(TPS("syscore_resume"), 0, false);
}
@@ -113,16 +116,17 @@ EXPORT_SYMBOL_GPL(syscore_resume);
*/
void syscore_shutdown(void)
{
- struct syscore_ops *ops;
+ struct syscore *syscore;
- mutex_lock(&syscore_ops_lock);
+ mutex_lock(&syscore_lock);
- list_for_each_entry_reverse(ops, &syscore_ops_list, node)
- if (ops->shutdown) {
+ list_for_each_entry_reverse(syscore, &syscore_list, node)
+ if (syscore->ops->shutdown) {
if (initcall_debug)
- pr_info("PM: Calling %pS\n", ops->shutdown);
- ops->shutdown();
+ pr_info("PM: Calling %pS\n",
+ syscore->ops->shutdown);
+ syscore->ops->shutdown(syscore->data);
}
- mutex_unlock(&syscore_ops_lock);
+ mutex_unlock(&syscore_lock);
}
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 00cb792bda18..dd94145c9b22 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -1006,7 +1006,7 @@ static __init int mvebu_mbus_debugfs_init(void)
}
fs_initcall(mvebu_mbus_debugfs_init);
-static int mvebu_mbus_suspend(void)
+static int mvebu_mbus_suspend(void *data)
{
struct mvebu_mbus_state *s = &mbus_state;
int win;
@@ -1040,7 +1040,7 @@ static int mvebu_mbus_suspend(void)
return 0;
}
-static void mvebu_mbus_resume(void)
+static void mvebu_mbus_resume(void *data)
{
struct mvebu_mbus_state *s = &mbus_state;
int win;
@@ -1069,9 +1069,13 @@ static void mvebu_mbus_resume(void)
}
}
-static struct syscore_ops mvebu_mbus_syscore_ops = {
- .suspend = mvebu_mbus_suspend,
- .resume = mvebu_mbus_resume,
+static const struct syscore_ops mvebu_mbus_syscore_ops = {
+ .suspend = mvebu_mbus_suspend,
+ .resume = mvebu_mbus_resume,
+};
+
+static struct syscore mvebu_mbus_syscore = {
+ .ops = &mvebu_mbus_syscore_ops,
};
static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
@@ -1118,7 +1122,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
writel(UNIT_SYNC_BARRIER_ALL,
mbus->mbuswins_base + UNIT_SYNC_BARRIER_OFF);
- register_syscore_ops(&mvebu_mbus_syscore_ops);
+ register_syscore(&mvebu_mbus_syscore);
return 0;
}
diff --git a/drivers/bus/stm32_rifsc.c b/drivers/bus/stm32_rifsc.c
index 4cf1b60014b7..debeaf8ea1bd 100644
--- a/drivers/bus/stm32_rifsc.c
+++ b/drivers/bus/stm32_rifsc.c
@@ -5,6 +5,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -25,6 +26,8 @@
#define RIFSC_RISC_PRIVCFGR0 0x30
#define RIFSC_RISC_PER0_CIDCFGR 0x100
#define RIFSC_RISC_PER0_SEMCR 0x104
+#define RIFSC_RISC_REG0_ACFGR 0x900
+#define RIFSC_RISC_REG3_AADDR 0x924
#define RIFSC_RISC_HWCFGR2 0xFEC
/*
@@ -70,6 +73,565 @@
#define RIF_CID0 0x0
#define RIF_CID1 0x1
+#if defined(CONFIG_DEBUG_FS)
+#define RIFSC_RISUP_ENTRIES 128
+#define RIFSC_RIMU_ENTRIES 16
+#define RIFSC_RISAL_SUBREGIONS 2
+#define RIFSC_RISAL_GRANULARITY 8
+
+#define RIFSC_RIMC_ATTR0 0xC10
+
+#define RIFSC_RIMC_CIDSEL BIT(2)
+#define RIFSC_RIMC_MCID_MASK GENMASK(6, 4)
+#define RIFSC_RIMC_MSEC BIT(8)
+#define RIFSC_RIMC_MPRIV BIT(9)
+
+#define RIFSC_RISC_SRCID_MASK GENMASK(6, 4)
+#define RIFSC_RISC_SRPRIV BIT(9)
+#define RIFSC_RISC_SRSEC BIT(8)
+#define RIFSC_RISC_SRRLOCK BIT(1)
+#define RIFSC_RISC_SREN BIT(0)
+#define RIFSC_RISC_SRLENGTH_MASK GENMASK(27, 16)
+#define RIFSC_RISC_SRSTART_MASK GENMASK(10, 0)
+
+static const char *stm32mp21_rifsc_rimu_names[RIFSC_RIMU_ENTRIES] = {
+ "ETR",
+ "SDMMC1",
+ "SDMMC2",
+ "SDMMC3",
+ "OTG_HS",
+ "USBH",
+ "ETH1",
+ "ETH2",
+ "RESERVED",
+ "RESERVED",
+ "DCMIPP",
+ "LTDC_L1/L2",
+ "LTDC_L3",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+};
+
+static const char *stm32mp25_rifsc_rimu_names[RIFSC_RIMU_ENTRIES] = {
+ "ETR",
+ "SDMMC1",
+ "SDMMC2",
+ "SDMMC3",
+ "USB3DR",
+ "USBH",
+ "ETH1",
+ "ETH2",
+ "PCIE",
+ "GPU",
+ "DMCIPP",
+ "LTDC_L0/L1",
+ "LTDC_L2",
+ "LTDC_ROT",
+ "VDEC",
+ "VENC"
+};
+
+static const char *stm32mp21_rifsc_risup_names[RIFSC_RISUP_ENTRIES] = {
+ "TIM1",
+ "TIM2",
+ "TIM3",
+ "TIM4",
+ "TIM5",
+ "TIM6",
+ "TIM7",
+ "TIM8",
+ "TIM10",
+ "TIM11",
+ "TIM12",
+ "TIM13",
+ "TIM14",
+ "TIM15",
+ "TIM16",
+ "TIM17",
+ "RESERVED",
+ "LPTIM1",
+ "LPTIM2",
+ "LPTIM3",
+ "LPTIM4",
+ "LPTIM5",
+ "SPI1",
+ "SPI2",
+ "SPI3",
+ "SPI4",
+ "SPI5",
+ "SPI6",
+ "RESERVED",
+ "RESERVED",
+ "SPDIFRX",
+ "USART1",
+ "USART2",
+ "USART3",
+ "UART4",
+ "UART5",
+ "USART6",
+ "UART7",
+ "RESERVED",
+ "RESERVED",
+ "LPUART1",
+ "I2C1",
+ "I2C2",
+ "I2C3",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "SAI1",
+ "SAI2",
+ "SAI3",
+ "SAI4",
+ "RESERVED",
+ "MDF1",
+ "RESERVED",
+ "FDCAN",
+ "HDP",
+ "ADC1",
+ "ADC2",
+ "ETH1",
+ "ETH2",
+ "RESERVED",
+ "USBH",
+ "RESERVED",
+ "RESERVED",
+ "OTG_HS",
+ "DDRPERFM",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "STGEN",
+ "OCTOSPI1",
+ "RESERVED",
+ "SDMMC1",
+ "SDMMC2",
+ "SDMMC3",
+ "RESERVED",
+ "LTDC_CMN",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "CSI",
+ "DCMIPP",
+ "DCMI_PSSI",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RNG1",
+ "RNG2",
+ "PKA",
+ "SAES",
+ "HASH1",
+ "HASH2",
+ "CRYP1",
+ "CRYP2",
+ "IWDG1",
+ "IWDG2",
+ "IWDG3",
+ "IWDG4",
+ "WWDG1",
+ "RESERVED",
+ "VREFBUF",
+ "DTS",
+ "RAMCFG",
+ "CRC",
+ "SERC",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "I3C1",
+ "I3C2",
+ "I3C3",
+ "RESERVED",
+ "ICACHE_DCACHE",
+ "LTDC_L1L2",
+ "LTDC_L3",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "OTFDEC1",
+ "RESERVED",
+ "IAC",
+};
+
+static const char *stm32mp25_rifsc_risup_names[RIFSC_RISUP_ENTRIES] = {
+ "TIM1",
+ "TIM2",
+ "TIM3",
+ "TIM4",
+ "TIM5",
+ "TIM6",
+ "TIM7",
+ "TIM8",
+ "TIM10",
+ "TIM11",
+ "TIM12",
+ "TIM13",
+ "TIM14",
+ "TIM15",
+ "TIM16",
+ "TIM17",
+ "TIM20",
+ "LPTIM1",
+ "LPTIM2",
+ "LPTIM3",
+ "LPTIM4",
+ "LPTIM5",
+ "SPI1",
+ "SPI2",
+ "SPI3",
+ "SPI4",
+ "SPI5",
+ "SPI6",
+ "SPI7",
+ "SPI8",
+ "SPDIFRX",
+ "USART1",
+ "USART2",
+ "USART3",
+ "UART4",
+ "UART5",
+ "USART6",
+ "UART7",
+ "UART8",
+ "UART9",
+ "LPUART1",
+ "I2C1",
+ "I2C2",
+ "I2C3",
+ "I2C4",
+ "I2C5",
+ "I2C6",
+ "I2C7",
+ "I2C8",
+ "SAI1",
+ "SAI2",
+ "SAI3",
+ "SAI4",
+ "RESERVED",
+ "MDF1",
+ "ADF1",
+ "FDCAN",
+ "HDP",
+ "ADC12",
+ "ADC3",
+ "ETH1",
+ "ETH2",
+ "RESERVED",
+ "USBH",
+ "RESERVED",
+ "RESERVED",
+ "USB3DR",
+ "COMBOPHY",
+ "PCIE",
+ "UCPD1",
+ "ETHSW_DEIP",
+ "ETHSW_ACM_CF",
+ "ETHSW_ACM_MSGBU",
+ "STGEN",
+ "OCTOSPI1",
+ "OCTOSPI2",
+ "SDMMC1",
+ "SDMMC2",
+ "SDMMC3",
+ "GPU",
+ "LTDC_CMN",
+ "DSI_CMN",
+ "RESERVED",
+ "RESERVED",
+ "LVDS",
+ "RESERVED",
+ "CSI",
+ "DCMIPP",
+ "DCMI_PSSI",
+ "VDEC",
+ "VENC",
+ "RESERVED",
+ "RNG",
+ "PKA",
+ "SAES",
+ "HASH",
+ "CRYP1",
+ "CRYP2",
+ "IWDG1",
+ "IWDG2",
+ "IWDG3",
+ "IWDG4",
+ "IWDG5",
+ "WWDG1",
+ "WWDG2",
+ "RESERVED",
+ "VREFBUF",
+ "DTS",
+ "RAMCFG",
+ "CRC",
+ "SERC",
+ "OCTOSPIM",
+ "GICV2M",
+ "RESERVED",
+ "I3C1",
+ "I3C2",
+ "I3C3",
+ "I3C4",
+ "ICACHE_DCACHE",
+ "LTDC_L0L1",
+ "LTDC_L2",
+ "LTDC_ROT",
+ "DSI_TRIG",
+ "DSI_RDFIFO",
+ "RESERVED",
+ "OTFDEC1",
+ "OTFDEC2",
+ "IAC",
+};
+struct rifsc_risup_debug_data {
+ char dev_name[15];
+ u8 dev_cid;
+ u8 dev_sem_cids;
+ u8 dev_id;
+ bool dev_cid_filt_en;
+ bool dev_sem_en;
+ bool dev_priv;
+ bool dev_sec;
+};
+
+struct rifsc_rimu_debug_data {
+ char m_name[11];
+ u8 m_cid;
+ bool cidsel;
+ bool m_sec;
+ bool m_priv;
+};
+
+struct rifsc_subreg_debug_data {
+ bool sr_sec;
+ bool sr_priv;
+ u8 sr_cid;
+ bool sr_rlock;
+ bool sr_enable;
+ u16 sr_start;
+ u16 sr_length;
+};
+
+struct stm32_rifsc_resources_names {
+ const char **device_names;
+ const char **initiator_names;
+};
+struct rifsc_dbg_private {
+ const struct stm32_rifsc_resources_names *res_names;
+ void __iomem *mmio;
+ unsigned int nb_risup;
+ unsigned int nb_rimu;
+ unsigned int nb_risal;
+};
+
+static const struct stm32_rifsc_resources_names rifsc_mp21_res_names = {
+ .device_names = stm32mp21_rifsc_risup_names,
+ .initiator_names = stm32mp21_rifsc_rimu_names,
+};
+
+static const struct stm32_rifsc_resources_names rifsc_mp25_res_names = {
+ .device_names = stm32mp25_rifsc_risup_names,
+ .initiator_names = stm32mp25_rifsc_rimu_names,
+};
+
+static void stm32_rifsc_fill_rimu_dbg_entry(struct rifsc_dbg_private *rifsc,
+ struct rifsc_rimu_debug_data *dbg_entry, int i)
+{
+ const struct stm32_rifsc_resources_names *dbg_names = rifsc->res_names;
+ u32 rimc_attr = readl_relaxed(rifsc->mmio + RIFSC_RIMC_ATTR0 + 0x4 * i);
+
+ snprintf(dbg_entry->m_name, sizeof(dbg_entry->m_name), "%s", dbg_names->initiator_names[i]);
+ dbg_entry->m_cid = FIELD_GET(RIFSC_RIMC_MCID_MASK, rimc_attr);
+ dbg_entry->cidsel = rimc_attr & RIFSC_RIMC_CIDSEL;
+ dbg_entry->m_sec = rimc_attr & RIFSC_RIMC_MSEC;
+ dbg_entry->m_priv = rimc_attr & RIFSC_RIMC_MPRIV;
+}
+
+static void stm32_rifsc_fill_dev_dbg_entry(struct rifsc_dbg_private *rifsc,
+ struct rifsc_risup_debug_data *dbg_entry, int i)
+{
+ const struct stm32_rifsc_resources_names *dbg_names = rifsc->res_names;
+ u32 cid_cfgr, sec_cfgr, priv_cfgr;
+ u8 reg_id = i / IDS_PER_RISC_SEC_PRIV_REGS;
+ u8 reg_offset = i % IDS_PER_RISC_SEC_PRIV_REGS;
+
+ cid_cfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_PER0_CIDCFGR + 0x8 * i);
+ sec_cfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_SECCFGR0 + 0x4 * reg_id);
+ priv_cfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_PRIVCFGR0 + 0x4 * reg_id);
+
+ snprintf(dbg_entry->dev_name, sizeof(dbg_entry->dev_name), "%s",
+ dbg_names->device_names[i]);
+ dbg_entry->dev_id = i;
+ dbg_entry->dev_cid_filt_en = cid_cfgr & CIDCFGR_CFEN;
+ dbg_entry->dev_sem_en = cid_cfgr & CIDCFGR_SEMEN;
+ dbg_entry->dev_cid = FIELD_GET(RIFSC_RISC_SCID_MASK, cid_cfgr);
+ dbg_entry->dev_sem_cids = FIELD_GET(RIFSC_RISC_SEMWL_MASK, cid_cfgr);
+ dbg_entry->dev_sec = sec_cfgr & BIT(reg_offset) ? true : false;
+ dbg_entry->dev_priv = priv_cfgr & BIT(reg_offset) ? true : false;
+}
+
+
+static void stm32_rifsc_fill_subreg_dbg_entry(struct rifsc_dbg_private *rifsc,
+ struct rifsc_subreg_debug_data *dbg_entry, int i,
+ int j)
+{
+ u32 risc_xcfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_REG0_ACFGR + 0x10 * i + 0x8 * j);
+ u32 risc_xaddr;
+
+ dbg_entry->sr_sec = risc_xcfgr & RIFSC_RISC_SRSEC;
+ dbg_entry->sr_priv = risc_xcfgr & RIFSC_RISC_SRPRIV;
+ dbg_entry->sr_cid = FIELD_GET(RIFSC_RISC_SRCID_MASK, risc_xcfgr);
+ dbg_entry->sr_rlock = risc_xcfgr & RIFSC_RISC_SRRLOCK;
+ dbg_entry->sr_enable = risc_xcfgr & RIFSC_RISC_SREN;
+ if (i == 2) {
+ risc_xaddr = readl_relaxed(rifsc->mmio + RIFSC_RISC_REG3_AADDR + 0x8 * j);
+ dbg_entry->sr_length = FIELD_GET(RIFSC_RISC_SRLENGTH_MASK, risc_xaddr);
+ dbg_entry->sr_start = FIELD_GET(RIFSC_RISC_SRSTART_MASK, risc_xaddr);
+ } else {
+ dbg_entry->sr_start = 0;
+ dbg_entry->sr_length = U16_MAX;
+ }
+}
+
+static int stm32_rifsc_conf_dump_show(struct seq_file *s, void *data)
+{
+ struct rifsc_dbg_private *rifsc = (struct rifsc_dbg_private *)s->private;
+ int i, j;
+
+ seq_puts(s, "\n=============================================\n");
+ seq_puts(s, " RIFSC dump\n");
+ seq_puts(s, "=============================================\n\n");
+
+ seq_puts(s, "\n=============================================\n");
+ seq_puts(s, " RISUP dump\n");
+ seq_puts(s, "=============================================\n");
+
+ seq_printf(s, "\n| %-15s |", "Peripheral name");
+ seq_puts(s, "| Firewall ID |");
+ seq_puts(s, "| N/SECURE |");
+ seq_puts(s, "| N/PRIVILEGED |");
+ seq_puts(s, "| CID filtering |");
+ seq_puts(s, "| Semaphore mode |");
+ seq_puts(s, "| SCID |");
+ seq_printf(s, "| %7s |\n", "SEMWL");
+
+ for (i = 0; i < RIFSC_RISUP_ENTRIES && i < rifsc->nb_risup; i++) {
+ struct rifsc_risup_debug_data d_dbg_entry;
+
+ stm32_rifsc_fill_dev_dbg_entry(rifsc, &d_dbg_entry, i);
+
+ seq_printf(s, "| %-15s |", d_dbg_entry.dev_name);
+ seq_printf(s, "| %-11d |", d_dbg_entry.dev_id);
+ seq_printf(s, "| %-8s |", d_dbg_entry.dev_sec ? "SEC" : "NSEC");
+ seq_printf(s, "| %-12s |", d_dbg_entry.dev_priv ? "PRIV" : "NPRIV");
+ seq_printf(s, "| %-13s |", str_enabled_disabled(d_dbg_entry.dev_cid_filt_en));
+ seq_printf(s, "| %-14s |", str_enabled_disabled(d_dbg_entry.dev_sem_en));
+ seq_printf(s, "| %-4d |", d_dbg_entry.dev_cid);
+ seq_printf(s, "| %#-7x |\n", d_dbg_entry.dev_sem_cids);
+ }
+
+ seq_puts(s, "\n=============================================\n");
+ seq_puts(s, " RIMU dump\n");
+ seq_puts(s, "=============================================\n");
+
+ seq_puts(s, "| RIMU's name |");
+ seq_puts(s, "| CIDSEL |");
+ seq_puts(s, "| MCID |");
+ seq_puts(s, "| N/SECURE |");
+ seq_puts(s, "| N/PRIVILEGED |\n");
+
+ for (i = 0; i < RIFSC_RIMU_ENTRIES && rifsc->nb_rimu; i++) {
+ struct rifsc_rimu_debug_data m_dbg_entry;
+
+ stm32_rifsc_fill_rimu_dbg_entry(rifsc, &m_dbg_entry, i);
+
+ seq_printf(s, "| %-11s |", m_dbg_entry.m_name);
+ seq_printf(s, "| %-6s |", m_dbg_entry.cidsel ? "CIDSEL" : "");
+ seq_printf(s, "| %-4d |", m_dbg_entry.m_cid);
+ seq_printf(s, "| %-8s |", m_dbg_entry.m_sec ? "SEC" : "NSEC");
+ seq_printf(s, "| %-12s |\n", m_dbg_entry.m_priv ? "PRIV" : "NPRIV");
+ }
+
+ if (rifsc->nb_risal > 0) {
+ seq_puts(s, "\n=============================================\n");
+ seq_puts(s, " RISAL dump\n");
+ seq_puts(s, "=============================================\n");
+
+ seq_puts(s, "| Memory |");
+ seq_puts(s, "| Subreg. |");
+ seq_puts(s, "| N/SECURE |");
+ seq_puts(s, "| N/PRIVILEGED |");
+ seq_puts(s, "| Subreg. CID |");
+ seq_puts(s, "| Resource lock |");
+ seq_puts(s, "| Subreg. enable |");
+ seq_puts(s, "| Subreg. start |");
+ seq_puts(s, "| Subreg. end |\n");
+
+ for (i = 0; i < rifsc->nb_risal; i++) {
+ for (j = 0; j < RIFSC_RISAL_SUBREGIONS; j++) {
+ struct rifsc_subreg_debug_data sr_dbg_entry;
+
+ stm32_rifsc_fill_subreg_dbg_entry(rifsc, &sr_dbg_entry, i, j);
+
+ seq_printf(s, "| LPSRAM%1d |", i + 1);
+ seq_printf(s, "| %1s |", (j == 0) ? "A" : "B");
+ seq_printf(s, "| %-8s |", sr_dbg_entry.sr_sec ? "SEC" : "NSEC");
+ seq_printf(s, "| %-12s |", sr_dbg_entry.sr_priv ? "PRIV" : "NPRIV");
+ seq_printf(s, "| 0x%-9x |", sr_dbg_entry.sr_cid);
+ seq_printf(s, "| %-13s |",
+ sr_dbg_entry.sr_rlock ? "locked (1)" : "unlocked (0)");
+ seq_printf(s, "| %-14s |",
+ str_enabled_disabled(sr_dbg_entry.sr_enable));
+ seq_printf(s, "| 0x%-11x |", sr_dbg_entry.sr_start);
+ seq_printf(s, "| 0x%-11x |\n", sr_dbg_entry.sr_start +
+ sr_dbg_entry.sr_length - 1);
+ }
+ }
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(stm32_rifsc_conf_dump);
+
+static int stm32_rifsc_register_debugfs(struct stm32_firewall_controller *rifsc_controller,
+ u32 nb_risup, u32 nb_rimu, u32 nb_risal)
+{
+ struct rifsc_dbg_private *rifsc_priv;
+ struct dentry *root = NULL;
+
+ rifsc_priv = devm_kzalloc(rifsc_controller->dev, sizeof(*rifsc_priv), GFP_KERNEL);
+ if (!rifsc_priv)
+ return -ENOMEM;
+
+ rifsc_priv->mmio = rifsc_controller->mmio;
+ rifsc_priv->nb_risup = nb_risup;
+ rifsc_priv->nb_rimu = nb_rimu;
+ rifsc_priv->nb_risal = nb_risal;
+ rifsc_priv->res_names = of_device_get_match_data(rifsc_controller->dev);
+
+ root = debugfs_lookup("stm32_firewall", NULL);
+ if (!root)
+ root = debugfs_create_dir("stm32_firewall", NULL);
+
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ debugfs_create_file("rifsc", 0444, root, rifsc_priv, &stm32_rifsc_conf_dump_fops);
+
+ return 0;
+}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
static bool stm32_rifsc_is_semaphore_available(void __iomem *addr)
{
return !(readl(addr) & SEMCR_MUTEX);
@@ -207,9 +769,19 @@ static int stm32_rifsc_probe(struct platform_device *pdev)
rifsc_controller->release_access = stm32_rifsc_release_access;
/* Get number of RIFSC entries*/
- nb_risup = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF1_MASK;
- nb_rimu = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF2_MASK;
- nb_risal = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF3_MASK;
+ nb_risup = FIELD_GET(HWCFGR2_CONF1_MASK,
+ readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2));
+ nb_rimu = FIELD_GET(HWCFGR2_CONF2_MASK,
+ readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2));
+ nb_risal = FIELD_GET(HWCFGR2_CONF3_MASK,
+ readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2));
+ /*
+ * On STM32MP21, RIFSC_RISC_HWCFGR2 shows an incorrect number of RISAL (NUM_RISAL is 3
+ * instead of 0). A software workaround is implemented using the st,mem-map property in the
+ * device tree. This property is absent or left empty if there is no RISAL.
+ */
+ if (of_device_is_compatible(np, "st,stm32mp21-rifsc"))
+ nb_risal = 0;
rifsc_controller->max_entries = nb_risup + nb_rimu + nb_risal;
platform_set_drvdata(pdev, rifsc_controller);
@@ -228,12 +800,29 @@ static int stm32_rifsc_probe(struct platform_device *pdev)
return rc;
}
+#if defined(CONFIG_DEBUG_FS)
+ rc = stm32_rifsc_register_debugfs(rifsc_controller, nb_risup, nb_rimu, nb_risal);
+ if (rc)
+ return dev_err_probe(rifsc_controller->dev, rc, "Failed creating debugfs entry\n");
+#endif
+
/* Populate all allowed nodes */
return of_platform_populate(np, NULL, NULL, &pdev->dev);
}
static const struct of_device_id stm32_rifsc_of_match[] = {
- { .compatible = "st,stm32mp25-rifsc" },
+ {
+ .compatible = "st,stm32mp25-rifsc",
+#if defined(CONFIG_DEBUG_FS)
+ .data = &rifsc_mp25_res_names,
+#endif
+ },
+ {
+ .compatible = "st,stm32mp21-rifsc",
+#if defined(CONFIG_DEBUG_FS)
+ .data = &rifsc_mp21_res_names,
+#endif
+ },
{}
};
MODULE_DEVICE_TABLE(of, stm32_rifsc_of_match);
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 7a33c3b31d1e..82735c58be11 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -373,7 +373,6 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
unlock:
mutex_unlock(&rsb->lock);
- pm_runtime_mark_last_busy(rsb->dev);
pm_runtime_put_autosuspend(rsb->dev);
return ret;
@@ -417,7 +416,6 @@ static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
mutex_unlock(&rsb->lock);
- pm_runtime_mark_last_busy(rsb->dev);
pm_runtime_put_autosuspend(rsb->dev);
return ret;
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 5566ad11399e..610354ce7f8f 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -48,6 +48,7 @@ enum sysc_soc {
SOC_UNKNOWN,
SOC_2420,
SOC_2430,
+ SOC_AM33,
SOC_3430,
SOC_AM35,
SOC_3630,
@@ -2912,6 +2913,7 @@ static void ti_sysc_idle(struct work_struct *work)
static const struct soc_device_attribute sysc_soc_match[] = {
SOC_FLAG("OMAP242*", SOC_2420),
SOC_FLAG("OMAP243*", SOC_2430),
+ SOC_FLAG("AM33*", SOC_AM33),
SOC_FLAG("AM35*", SOC_AM35),
SOC_FLAG("OMAP3[45]*", SOC_3430),
SOC_FLAG("OMAP3[67]*", SOC_3630),
@@ -3117,10 +3119,15 @@ static int sysc_check_active_timer(struct sysc *ddata)
* can be dropped if we stop supporting old beagleboard revisions
* A to B4 at some point.
*/
- if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35)
+ switch (sysc_soc->soc) {
+ case SOC_AM33:
+ case SOC_3430:
+ case SOC_AM35:
error = -ENXIO;
- else
+ break;
+ default:
error = -EBUSY;
+ }
if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
(ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
diff --git a/drivers/cache/Kconfig b/drivers/cache/Kconfig
index db51386c663a..1518449d47b5 100644
--- a/drivers/cache/Kconfig
+++ b/drivers/cache/Kconfig
@@ -1,9 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Cache Drivers"
+
+menuconfig CACHEMAINT_FOR_DMA
+ bool "Cache management for noncoherent DMA"
+ depends on RISCV
+ default y
+ help
+ These drivers implement support for noncoherent DMA master devices
+ on platforms that lack the standard CPU interfaces for this.
+
+if CACHEMAINT_FOR_DMA
config AX45MP_L2_CACHE
bool "Andes Technology AX45MP L2 Cache controller"
- depends on RISCV
select RISCV_NONSTANDARD_CACHE_OPS
help
Support for the L2 cache controller on Andes Technology AX45MP platforms.
@@ -16,7 +24,6 @@ config SIFIVE_CCACHE
config STARFIVE_STARLINK_CACHE
bool "StarFive StarLink Cache controller"
- depends on RISCV
depends on ARCH_STARFIVE
depends on 64BIT
select RISCV_DMA_NONCOHERENT
@@ -24,4 +31,26 @@ config STARFIVE_STARLINK_CACHE
help
Support for the StarLink cache controller IP from StarFive.
-endmenu
+endif #CACHEMAINT_FOR_DMA
+
+menuconfig CACHEMAINT_FOR_HOTPLUG
+ bool "Cache management for memory hot plug like operations"
+ depends on GENERIC_CPU_CACHE_MAINTENANCE
+ help
+ These drivers implement cache management for flows where it is necessary
+ to flush data from all host caches.
+
+if CACHEMAINT_FOR_HOTPLUG
+
+config HISI_SOC_HHA
+ tristate "HiSilicon Hydra Home Agent (HHA) device driver"
+ depends on (ARM64 && ACPI) || COMPILE_TEST
+ help
+ The Hydra Home Agent (HHA) is responsible for cache coherency
+ on the SoC. This drivers enables the cache maintenance functions of
+ the HHA.
+
+ This driver can be built as a module. If so, the module will be
+ called hisi_soc_hha.
+
+endif #CACHEMAINT_FOR_HOTPLUG
diff --git a/drivers/cache/Makefile b/drivers/cache/Makefile
index 55c5e851034d..b3362b15d6c1 100644
--- a/drivers/cache/Makefile
+++ b/drivers/cache/Makefile
@@ -3,3 +3,5 @@
obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o
obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o
obj-$(CONFIG_STARFIVE_STARLINK_CACHE) += starfive_starlink_cache.o
+
+obj-$(CONFIG_HISI_SOC_HHA) += hisi_soc_hha.o
diff --git a/drivers/cache/hisi_soc_hha.c b/drivers/cache/hisi_soc_hha.c
new file mode 100644
index 000000000000..25ff0f5ae79b
--- /dev/null
+++ b/drivers/cache/hisi_soc_hha.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for HiSilicon Hydra Home Agent (HHA).
+ *
+ * Copyright (c) 2025 HiSilicon Technologies Co., Ltd.
+ * Author: Yicong Yang <yangyicong@hisilicon.com>
+ * Yushan Wang <wangyushan12@huawei.com>
+ *
+ * A system typically contains multiple HHAs. Each is responsible for a subset
+ * of the physical addresses in the system, but interleave can make the mapping
+ * from a particular cache line to a responsible HHA complex. As such no
+ * filtering is done in the driver, with the hardware being responsible for
+ * responding with success for even if it was not responsible for any addresses
+ * in the range on which the operation was requested.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cache_coherency.h>
+#include <linux/dev_printk.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/memregion.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#define HISI_HHA_CTRL 0x5004
+#define HISI_HHA_CTRL_EN BIT(0)
+#define HISI_HHA_CTRL_RANGE BIT(1)
+#define HISI_HHA_CTRL_TYPE GENMASK(3, 2)
+#define HISI_HHA_START_L 0x5008
+#define HISI_HHA_START_H 0x500c
+#define HISI_HHA_LEN_L 0x5010
+#define HISI_HHA_LEN_H 0x5014
+
+/* The maintain operation performs in a 128 Byte granularity */
+#define HISI_HHA_MAINT_ALIGN 128
+
+#define HISI_HHA_POLL_GAP_US 10
+#define HISI_HHA_POLL_TIMEOUT_US 50000
+
+struct hisi_soc_hha {
+ /* Must be first element */
+ struct cache_coherency_ops_inst cci;
+ /* Locks HHA instance to forbid overlapping access. */
+ struct mutex lock;
+ void __iomem *base;
+};
+
+static bool hisi_hha_cache_maintain_wait_finished(struct hisi_soc_hha *soc_hha)
+{
+ u32 val;
+
+ return !readl_poll_timeout_atomic(soc_hha->base + HISI_HHA_CTRL, val,
+ !(val & HISI_HHA_CTRL_EN),
+ HISI_HHA_POLL_GAP_US,
+ HISI_HHA_POLL_TIMEOUT_US);
+}
+
+static int hisi_soc_hha_wbinv(struct cache_coherency_ops_inst *cci,
+ struct cc_inval_params *invp)
+{
+ struct hisi_soc_hha *soc_hha =
+ container_of(cci, struct hisi_soc_hha, cci);
+ phys_addr_t top, addr = invp->addr;
+ size_t size = invp->size;
+ u32 reg;
+
+ if (!size)
+ return -EINVAL;
+
+ addr = ALIGN_DOWN(addr, HISI_HHA_MAINT_ALIGN);
+ top = ALIGN(addr + size, HISI_HHA_MAINT_ALIGN);
+ size = top - addr;
+
+ guard(mutex)(&soc_hha->lock);
+
+ if (!hisi_hha_cache_maintain_wait_finished(soc_hha))
+ return -EBUSY;
+
+ /*
+ * Hardware will search for addresses ranging [addr, addr + size - 1],
+ * last byte included, and perform maintenance in 128 byte granules
+ * on those cachelines which contain the addresses. If a given instance
+ * is either not responsible for a cacheline or that cacheline is not
+ * currently present then the search will fail, no operation will be
+ * necessary and the device will report success.
+ */
+ size -= 1;
+
+ writel(lower_32_bits(addr), soc_hha->base + HISI_HHA_START_L);
+ writel(upper_32_bits(addr), soc_hha->base + HISI_HHA_START_H);
+ writel(lower_32_bits(size), soc_hha->base + HISI_HHA_LEN_L);
+ writel(upper_32_bits(size), soc_hha->base + HISI_HHA_LEN_H);
+
+ reg = FIELD_PREP(HISI_HHA_CTRL_TYPE, 1); /* Clean Invalid */
+ reg |= HISI_HHA_CTRL_RANGE | HISI_HHA_CTRL_EN;
+ writel(reg, soc_hha->base + HISI_HHA_CTRL);
+
+ return 0;
+}
+
+static int hisi_soc_hha_done(struct cache_coherency_ops_inst *cci)
+{
+ struct hisi_soc_hha *soc_hha =
+ container_of(cci, struct hisi_soc_hha, cci);
+
+ guard(mutex)(&soc_hha->lock);
+ if (!hisi_hha_cache_maintain_wait_finished(soc_hha))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static const struct cache_coherency_ops hha_ops = {
+ .wbinv = hisi_soc_hha_wbinv,
+ .done = hisi_soc_hha_done,
+};
+
+static int hisi_soc_hha_probe(struct platform_device *pdev)
+{
+ struct hisi_soc_hha *soc_hha;
+ struct resource *mem;
+ int ret;
+
+ soc_hha = cache_coherency_ops_instance_alloc(&hha_ops,
+ struct hisi_soc_hha, cci);
+ if (!soc_hha)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, soc_hha);
+
+ mutex_init(&soc_hha->lock);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ ret = -ENOMEM;
+ goto err_free_cci;
+ }
+
+ soc_hha->base = ioremap(mem->start, resource_size(mem));
+ if (!soc_hha->base) {
+ ret = dev_err_probe(&pdev->dev, -ENOMEM,
+ "failed to remap io memory");
+ goto err_free_cci;
+ }
+
+ ret = cache_coherency_ops_instance_register(&soc_hha->cci);
+ if (ret)
+ goto err_iounmap;
+
+ return 0;
+
+err_iounmap:
+ iounmap(soc_hha->base);
+err_free_cci:
+ cache_coherency_ops_instance_put(&soc_hha->cci);
+ return ret;
+}
+
+static void hisi_soc_hha_remove(struct platform_device *pdev)
+{
+ struct hisi_soc_hha *soc_hha = platform_get_drvdata(pdev);
+
+ cache_coherency_ops_instance_unregister(&soc_hha->cci);
+ iounmap(soc_hha->base);
+ cache_coherency_ops_instance_put(&soc_hha->cci);
+}
+
+static const struct acpi_device_id hisi_soc_hha_ids[] = {
+ { "HISI0511", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_soc_hha_ids);
+
+static struct platform_driver hisi_soc_hha_driver = {
+ .driver = {
+ .name = "hisi_soc_hha",
+ .acpi_match_table = hisi_soc_hha_ids,
+ },
+ .probe = hisi_soc_hha_probe,
+ .remove = hisi_soc_hha_remove,
+};
+
+module_platform_driver(hisi_soc_hha_driver);
+
+MODULE_IMPORT_NS("CACHE_COHERENCY");
+MODULE_DESCRIPTION("HiSilicon Hydra Home Agent driver supporting cache maintenance");
+MODULE_AUTHOR("Yicong Yang <yangyicong@hisilicon.com>");
+MODULE_AUTHOR("Yushan Wang <wangyushan12@huawei.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 3700ab4eba3e..3f48fc6ab596 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -599,7 +599,8 @@ static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
static int __ipmi_bmc_register(struct ipmi_smi *intf,
struct ipmi_device_id *id,
bool guid_set, guid_t *guid, int intf_num);
-static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
+static int __scan_channels(struct ipmi_smi *intf,
+ struct ipmi_device_id *id, bool rescan);
static void free_ipmi_user(struct kref *ref)
{
@@ -2668,7 +2669,7 @@ retry_bmc_lock:
if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
need_waiter(intf); /* Retry later on an error. */
else
- __scan_channels(intf, &id);
+ __scan_channels(intf, &id, false);
if (!intf_set) {
@@ -2688,7 +2689,7 @@ retry_bmc_lock:
goto out_noprocessing;
} else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
/* Version info changes, scan the channels again. */
- __scan_channels(intf, &bmc->fetch_id);
+ __scan_channels(intf, &bmc->fetch_id, true);
bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
@@ -3417,8 +3418,6 @@ channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
intf->channels_ready = true;
wake_up(&intf->waitq);
} else {
- intf->channel_list = intf->wchannels + set;
- intf->channels_ready = true;
rv = send_channel_info_cmd(intf, intf->curr_channel);
}
@@ -3440,10 +3439,21 @@ channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
/*
* Must be holding intf->bmc_reg_mutex to call this.
*/
-static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
+static int __scan_channels(struct ipmi_smi *intf,
+ struct ipmi_device_id *id,
+ bool rescan)
{
int rv;
+ if (rescan) {
+ /* Clear channels_ready to force channels rescan. */
+ intf->channels_ready = false;
+ }
+
+ /* Skip channel scan if channels are already marked ready */
+ if (intf->channels_ready)
+ return 0;
+
if (ipmi_version_major(id) > 1
|| (ipmi_version_major(id) == 1
&& ipmi_version_minor(id) >= 5)) {
@@ -3658,7 +3668,7 @@ int ipmi_add_smi(struct module *owner,
}
mutex_lock(&intf->bmc_reg_mutex);
- rv = __scan_channels(intf, &id);
+ rv = __scan_channels(intf, &id, false);
mutex_unlock(&intf->bmc_reg_mutex);
if (rv)
goto out_err_bmc_reg;
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index dd502322f499..3a77be7ebf4a 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -11,8 +11,11 @@
* used by the kernel internally.
*/
+#include "linux/dev_printk.h"
+#include "linux/tpm.h"
#include "tpm.h"
#include <crypto/hash_info.h>
+#include <linux/unaligned.h>
static bool disable_pcr_integrity;
module_param(disable_pcr_integrity, bool, 0444);
@@ -199,11 +202,15 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
}
if (!disable_pcr_integrity) {
- tpm_buf_append_name(chip, &buf, pcr_idx, NULL);
+ rc = tpm_buf_append_name(chip, &buf, pcr_idx, NULL);
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ return rc;
+ }
tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0);
} else {
tpm_buf_append_handle(chip, &buf, pcr_idx);
- tpm_buf_append_auth(chip, &buf, 0, NULL, 0);
+ tpm_buf_append_auth(chip, &buf, NULL, 0);
}
tpm_buf_append_u32(&buf, chip->nr_allocated_banks);
@@ -214,8 +221,14 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
chip->allocated_banks[i].digest_size);
}
- if (!disable_pcr_integrity)
- tpm_buf_fill_hmac_session(chip, &buf);
+ if (!disable_pcr_integrity) {
+ rc = tpm_buf_fill_hmac_session(chip, &buf);
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ return rc;
+ }
+ }
+
rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value");
if (!disable_pcr_integrity)
rc = tpm_buf_check_hmac_response(chip, &buf, rc);
@@ -269,11 +282,24 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
do {
tpm_buf_reset(&buf, TPM2_ST_SESSIONS, TPM2_CC_GET_RANDOM);
- tpm_buf_append_hmac_session_opt(chip, &buf, TPM2_SA_ENCRYPT
- | TPM2_SA_CONTINUE_SESSION,
- NULL, 0);
+ if (tpm2_chip_auth(chip)) {
+ tpm_buf_append_hmac_session(chip, &buf,
+ TPM2_SA_ENCRYPT |
+ TPM2_SA_CONTINUE_SESSION,
+ NULL, 0);
+ } else {
+ offset = buf.handles * 4 + TPM_HEADER_SIZE;
+ head = (struct tpm_header *)buf.data;
+ if (tpm_buf_length(&buf) == offset)
+ head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
+ }
tpm_buf_append_u16(&buf, num_bytes);
- tpm_buf_fill_hmac_session(chip, &buf);
+ err = tpm_buf_fill_hmac_session(chip, &buf);
+ if (err) {
+ tpm_buf_destroy(&buf);
+ return err;
+ }
+
err = tpm_transmit_cmd(chip, &buf,
offsetof(struct tpm2_get_random_out,
buffer),
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index 6d03c224e6b2..4149379665c4 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -144,59 +144,80 @@ struct tpm2_auth {
/*
* Name Size based on TPM algorithm (assumes no hash bigger than 255)
*/
-static u8 name_size(const u8 *name)
+static int name_size(const u8 *name)
{
- static u8 size_map[] = {
- [TPM_ALG_SHA1] = SHA1_DIGEST_SIZE,
- [TPM_ALG_SHA256] = SHA256_DIGEST_SIZE,
- [TPM_ALG_SHA384] = SHA384_DIGEST_SIZE,
- [TPM_ALG_SHA512] = SHA512_DIGEST_SIZE,
- };
- u16 alg = get_unaligned_be16(name);
- return size_map[alg] + 2;
-}
-
-static int tpm2_parse_read_public(char *name, struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
- off_t offset = TPM_HEADER_SIZE;
- u32 tot_len = be32_to_cpu(head->length);
- u32 val;
-
- /* we're starting after the header so adjust the length */
- tot_len -= TPM_HEADER_SIZE;
-
- /* skip public */
- val = tpm_buf_read_u16(buf, &offset);
- if (val > tot_len)
- return -EINVAL;
- offset += val;
- /* name */
- val = tpm_buf_read_u16(buf, &offset);
- if (val != name_size(&buf->data[offset]))
+ u16 hash_alg = get_unaligned_be16(name);
+
+ switch (hash_alg) {
+ case TPM_ALG_SHA1:
+ return SHA1_DIGEST_SIZE + 2;
+ case TPM_ALG_SHA256:
+ return SHA256_DIGEST_SIZE + 2;
+ case TPM_ALG_SHA384:
+ return SHA384_DIGEST_SIZE + 2;
+ case TPM_ALG_SHA512:
+ return SHA512_DIGEST_SIZE + 2;
+ default:
+ pr_warn("tpm: unsupported name algorithm: 0x%04x\n", hash_alg);
return -EINVAL;
- memcpy(name, &buf->data[offset], val);
- /* forget the rest */
- return 0;
+ }
}
-static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name)
+static int tpm2_read_public(struct tpm_chip *chip, u32 handle, void *name)
{
+ u32 mso = tpm2_handle_mso(handle);
+ off_t offset = TPM_HEADER_SIZE;
+ int rc, name_size_alg;
struct tpm_buf buf;
- int rc;
+
+ if (mso != TPM2_MSO_PERSISTENT && mso != TPM2_MSO_VOLATILE &&
+ mso != TPM2_MSO_NVRAM) {
+ memcpy(name, &handle, sizeof(u32));
+ return sizeof(u32);
+ }
rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC);
if (rc)
return rc;
tpm_buf_append_u32(&buf, handle);
- rc = tpm_transmit_cmd(chip, &buf, 0, "read public");
- if (rc == TPM2_RC_SUCCESS)
- rc = tpm2_parse_read_public(name, &buf);
- tpm_buf_destroy(&buf);
+ rc = tpm_transmit_cmd(chip, &buf, 0, "TPM2_ReadPublic");
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ return tpm_ret_to_err(rc);
+ }
- return rc;
+ /* Skip TPMT_PUBLIC: */
+ offset += tpm_buf_read_u16(&buf, &offset);
+
+ /*
+ * Ensure space for the length field of TPM2B_NAME and hashAlg field of
+ * TPMT_HA (the extra four bytes).
+ */
+ if (offset + 4 > tpm_buf_length(&buf)) {
+ tpm_buf_destroy(&buf);
+ return -EIO;
+ }
+
+ rc = tpm_buf_read_u16(&buf, &offset);
+ name_size_alg = name_size(&buf.data[offset]);
+
+ if (name_size_alg < 0)
+ return name_size_alg;
+
+ if (rc != name_size_alg) {
+ tpm_buf_destroy(&buf);
+ return -EIO;
+ }
+
+ if (offset + rc > tpm_buf_length(&buf)) {
+ tpm_buf_destroy(&buf);
+ return -EIO;
+ }
+
+ memcpy(name, &buf.data[offset], rc);
+ return name_size_alg;
}
#endif /* CONFIG_TCG_TPM2_HMAC */
@@ -221,52 +242,76 @@ static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name)
* As with most tpm_buf operations, success is assumed because failure
* will be caused by an incorrect programming model and indicated by a
* kernel message.
+ *
+ * Ends the authorization session on failure.
*/
-void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
- u32 handle, u8 *name)
+int tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
+ u32 handle, u8 *name)
{
#ifdef CONFIG_TCG_TPM2_HMAC
enum tpm2_mso_type mso = tpm2_handle_mso(handle);
struct tpm2_auth *auth;
+ u16 name_size_alg;
int slot;
+ int ret;
#endif
if (!tpm2_chip_auth(chip)) {
tpm_buf_append_handle(chip, buf, handle);
- return;
+ return 0;
}
#ifdef CONFIG_TCG_TPM2_HMAC
slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE) / 4;
if (slot >= AUTH_MAX_NAMES) {
- dev_err(&chip->dev, "TPM: too many handles\n");
- return;
+ dev_err(&chip->dev, "too many handles\n");
+ ret = -EIO;
+ goto err;
}
auth = chip->auth;
- WARN(auth->session != tpm_buf_length(buf),
- "name added in wrong place\n");
+ if (auth->session != tpm_buf_length(buf)) {
+ dev_err(&chip->dev, "session state malformed");
+ ret = -EIO;
+ goto err;
+ }
tpm_buf_append_u32(buf, handle);
auth->session += 4;
if (mso == TPM2_MSO_PERSISTENT ||
mso == TPM2_MSO_VOLATILE ||
mso == TPM2_MSO_NVRAM) {
- if (!name)
- tpm2_read_public(chip, handle, auth->name[slot]);
+ if (!name) {
+ ret = tpm2_read_public(chip, handle, auth->name[slot]);
+ if (ret < 0)
+ goto err;
+
+ name_size_alg = ret;
+ }
} else {
- if (name)
- dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n");
+ if (name) {
+ dev_err(&chip->dev, "handle 0x%08x does not use a name\n",
+ handle);
+ ret = -EIO;
+ goto err;
+ }
}
auth->name_h[slot] = handle;
if (name)
- memcpy(auth->name[slot], name, name_size(name));
+ memcpy(auth->name[slot], name, name_size_alg);
+#endif
+ return 0;
+
+#ifdef CONFIG_TCG_TPM2_HMAC
+err:
+ tpm2_end_auth_session(chip);
+ return tpm_ret_to_err(ret);
#endif
}
EXPORT_SYMBOL_GPL(tpm_buf_append_name);
void tpm_buf_append_auth(struct tpm_chip *chip, struct tpm_buf *buf,
- u8 attributes, u8 *passphrase, int passphrase_len)
+ u8 *passphrase, int passphrase_len)
{
/* offset tells us where the sessions area begins */
int offset = buf->handles * 4 + TPM_HEADER_SIZE;
@@ -327,8 +372,7 @@ void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
#endif
if (!tpm2_chip_auth(chip)) {
- tpm_buf_append_auth(chip, buf, attributes, passphrase,
- passphrase_len);
+ tpm_buf_append_auth(chip, buf, passphrase, passphrase_len);
return;
}
@@ -533,11 +577,9 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip,
* encryption key and encrypts the first parameter of the command
* buffer with it.
*
- * As with most tpm_buf operations, success is assumed because failure
- * will be caused by an incorrect programming model and indicated by a
- * kernel message.
+ * Ends the authorization session on failure.
*/
-void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
+int tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
{
u32 cc, handles, val;
struct tpm2_auth *auth = chip->auth;
@@ -549,9 +591,12 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
u8 cphash[SHA256_DIGEST_SIZE];
struct sha256_ctx sctx;
struct hmac_sha256_ctx hctx;
+ int ret;
- if (!auth)
- return;
+ if (!auth) {
+ ret = -EIO;
+ goto err;
+ }
/* save the command code in BE format */
auth->ordinal = head->ordinal;
@@ -560,9 +605,11 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
i = tpm2_find_cc(chip, cc);
if (i < 0) {
- dev_err(&chip->dev, "Command 0x%x not found in TPM\n", cc);
- return;
+ dev_err(&chip->dev, "command 0x%08x not found\n", cc);
+ ret = -EIO;
+ goto err;
}
+
attrs = chip->cc_attrs_tbl[i];
handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0);
@@ -576,9 +623,9 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
u32 handle = tpm_buf_read_u32(buf, &offset_s);
if (auth->name_h[i] != handle) {
- dev_err(&chip->dev, "TPM: handle %d wrong for name\n",
- i);
- return;
+ dev_err(&chip->dev, "invalid handle 0x%08x\n", handle);
+ ret = -EIO;
+ goto err;
}
}
/* point offset_s to the start of the sessions */
@@ -609,12 +656,14 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
offset_s += len;
}
if (offset_s != offset_p) {
- dev_err(&chip->dev, "TPM session length is incorrect\n");
- return;
+ dev_err(&chip->dev, "session length is incorrect\n");
+ ret = -EIO;
+ goto err;
}
if (!hmac) {
- dev_err(&chip->dev, "TPM could not find HMAC session\n");
- return;
+ dev_err(&chip->dev, "could not find HMAC session\n");
+ ret = -EIO;
+ goto err;
}
/* encrypt before HMAC */
@@ -646,8 +695,11 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
if (mso == TPM2_MSO_PERSISTENT ||
mso == TPM2_MSO_VOLATILE ||
mso == TPM2_MSO_NVRAM) {
- sha256_update(&sctx, auth->name[i],
- name_size(auth->name[i]));
+ ret = name_size(auth->name[i]);
+ if (ret < 0)
+ goto err;
+
+ sha256_update(&sctx, auth->name[i], ret);
} else {
__be32 h = cpu_to_be32(auth->name_h[i]);
@@ -668,6 +720,11 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
hmac_sha256_update(&hctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
hmac_sha256_update(&hctx, &auth->attrs, 1);
hmac_sha256_final(&hctx, hmac);
+ return 0;
+
+err:
+ tpm2_end_auth_session(chip);
+ return ret;
}
EXPORT_SYMBOL(tpm_buf_fill_hmac_session);
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index e700f40fd87f..e7208c47268b 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -3,6 +3,7 @@
* Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index acf780a81589..2310f6f73162 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -115,7 +115,7 @@ struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem,
/* Address in SECURAM that say if we suspend to backup mode. */
static void __iomem *at91_pmc_backup_suspend;
-static int at91_pmc_suspend(void)
+static int at91_pmc_suspend(void *data)
{
unsigned int backup;
@@ -129,7 +129,7 @@ static int at91_pmc_suspend(void)
return clk_save_context();
}
-static void at91_pmc_resume(void)
+static void at91_pmc_resume(void *data)
{
unsigned int backup;
@@ -143,11 +143,15 @@ static void at91_pmc_resume(void)
clk_restore_context();
}
-static struct syscore_ops pmc_syscore_ops = {
+static const struct syscore_ops pmc_syscore_ops = {
.suspend = at91_pmc_suspend,
.resume = at91_pmc_resume,
};
+static struct syscore pmc_syscore = {
+ .ops = &pmc_syscore_ops,
+};
+
static const struct of_device_id pmc_dt_ids[] = {
{ .compatible = "atmel,sama5d2-pmc" },
{ .compatible = "microchip,sama7g5-pmc", },
@@ -185,7 +189,7 @@ static int __init pmc_register_ops(void)
return -ENOMEM;
}
- register_syscore_ops(&pmc_syscore_ops);
+ register_syscore(&pmc_syscore);
return 0;
}
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 5daa32c4cf25..543d7aee8d24 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -117,9 +117,6 @@ struct at91_clk_pms {
unsigned int parent;
};
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
#define ndck(a, s) (a[s - 1].id + 1)
#define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1)
diff --git a/drivers/clk/davinci/psc-da850.c b/drivers/clk/davinci/psc-da850.c
index 5a18bca464cd..94081ab1e688 100644
--- a/drivers/clk/davinci/psc-da850.c
+++ b/drivers/clk/davinci/psc-da850.c
@@ -6,7 +6,6 @@
*/
#include <linux/clk-provider.h>
-#include <linux/reset-controller.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/init.h>
@@ -66,14 +65,8 @@ LPSC_CLKDEV3(ecap_clkdev, "fck", "ecap.0",
"fck", "ecap.1",
"fck", "ecap.2");
-static struct reset_control_lookup da850_psc0_reset_lookup_table[] = {
- RESET_LOOKUP("da850-psc0", 15, "davinci-rproc.0", NULL),
-};
-
static int da850_psc0_init(struct device *dev, void __iomem *base)
{
- reset_controller_add_lookup(da850_psc0_reset_lookup_table,
- ARRAY_SIZE(da850_psc0_reset_lookup_table));
return davinci_psc_register_clocks(dev, da850_psc0_info, 16, base);
}
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index 9e11f1c7c397..41eb38552a9c 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -139,7 +139,7 @@ static struct clk * __init vf610_get_fixed_clock(
return clk;
};
-static int vf610_clk_suspend(void)
+static int vf610_clk_suspend(void *data)
{
int i;
@@ -156,7 +156,7 @@ static int vf610_clk_suspend(void)
return 0;
}
-static void vf610_clk_resume(void)
+static void vf610_clk_resume(void *data)
{
int i;
@@ -171,11 +171,15 @@ static void vf610_clk_resume(void)
writel_relaxed(ccgr[i], CCM_CCGRx(i));
}
-static struct syscore_ops vf610_clk_syscore_ops = {
+static const struct syscore_ops vf610_clk_syscore_ops = {
.suspend = vf610_clk_suspend,
.resume = vf610_clk_resume,
};
+static struct syscore vf610_clk_syscore = {
+ .ops = &vf610_clk_syscore_ops,
+};
+
static void __init vf610_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
@@ -462,7 +466,7 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
clk_prepare_enable(clk[clks_init_on[i]]);
- register_syscore_ops(&vf610_clk_syscore_ops);
+ register_syscore(&vf610_clk_syscore);
/* Add the clocks to provider list */
clk_data.clks = clk;
diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c
index 590e9c85cb25..94cee44c854f 100644
--- a/drivers/clk/ingenic/jz4725b-cgu.c
+++ b/drivers/clk/ingenic/jz4725b-cgu.c
@@ -268,6 +268,6 @@ static void __init jz4725b_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
CLK_OF_DECLARE_DRIVER(jz4725b_cgu, "ingenic,jz4725b-cgu", jz4725b_cgu_init);
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
index 3e0a30574ebb..2def3aedc8dd 100644
--- a/drivers/clk/ingenic/jz4740-cgu.c
+++ b/drivers/clk/ingenic/jz4740-cgu.c
@@ -266,6 +266,6 @@ static void __init jz4740_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
CLK_OF_DECLARE_DRIVER(jz4740_cgu, "ingenic,jz4740-cgu", jz4740_cgu_init);
diff --git a/drivers/clk/ingenic/jz4755-cgu.c b/drivers/clk/ingenic/jz4755-cgu.c
index f2c2d848dab7..17cf5dcaece9 100644
--- a/drivers/clk/ingenic/jz4755-cgu.c
+++ b/drivers/clk/ingenic/jz4755-cgu.c
@@ -337,7 +337,7 @@ static void __init jz4755_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/*
* CGU has some children devices, this is useful for probing children devices
diff --git a/drivers/clk/ingenic/jz4760-cgu.c b/drivers/clk/ingenic/jz4760-cgu.c
index e407f00bd594..372fe4b07992 100644
--- a/drivers/clk/ingenic/jz4760-cgu.c
+++ b/drivers/clk/ingenic/jz4760-cgu.c
@@ -436,7 +436,7 @@ static void __init jz4760_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/* We only probe via devicetree, no need for a platform driver */
diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c
index 6ae1740367f9..58f1d3bad677 100644
--- a/drivers/clk/ingenic/jz4770-cgu.c
+++ b/drivers/clk/ingenic/jz4770-cgu.c
@@ -456,7 +456,7 @@ static void __init jz4770_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/* We only probe via devicetree, no need for a platform driver */
diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c
index 07e2f3c5c454..1e88aef7ac0f 100644
--- a/drivers/clk/ingenic/jz4780-cgu.c
+++ b/drivers/clk/ingenic/jz4780-cgu.c
@@ -803,6 +803,6 @@ static void __init jz4780_cgu_init(struct device_node *np)
return;
}
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
CLK_OF_DECLARE_DRIVER(jz4780_cgu, "ingenic,jz4780-cgu", jz4780_cgu_init);
diff --git a/drivers/clk/ingenic/pm.c b/drivers/clk/ingenic/pm.c
index 341752b640d2..206d5cf2872f 100644
--- a/drivers/clk/ingenic/pm.c
+++ b/drivers/clk/ingenic/pm.c
@@ -15,7 +15,7 @@
static void __iomem * __maybe_unused ingenic_cgu_base;
-static int __maybe_unused ingenic_cgu_pm_suspend(void)
+static int __maybe_unused ingenic_cgu_pm_suspend(void *data)
{
u32 val = readl(ingenic_cgu_base + CGU_REG_LCR);
@@ -24,22 +24,26 @@ static int __maybe_unused ingenic_cgu_pm_suspend(void)
return 0;
}
-static void __maybe_unused ingenic_cgu_pm_resume(void)
+static void __maybe_unused ingenic_cgu_pm_resume(void *data)
{
u32 val = readl(ingenic_cgu_base + CGU_REG_LCR);
writel(val & ~LCR_LOW_POWER_MODE, ingenic_cgu_base + CGU_REG_LCR);
}
-static struct syscore_ops __maybe_unused ingenic_cgu_pm_ops = {
+static const struct syscore_ops __maybe_unused ingenic_cgu_pm_ops = {
.suspend = ingenic_cgu_pm_suspend,
.resume = ingenic_cgu_pm_resume,
};
-void ingenic_cgu_register_syscore_ops(struct ingenic_cgu *cgu)
+static struct syscore __maybe_unused ingenic_cgu_pm = {
+ .ops = &ingenic_cgu_pm_ops,
+};
+
+void ingenic_cgu_register_syscore(struct ingenic_cgu *cgu)
{
if (IS_ENABLED(CONFIG_PM_SLEEP)) {
ingenic_cgu_base = cgu->base;
- register_syscore_ops(&ingenic_cgu_pm_ops);
+ register_syscore(&ingenic_cgu_pm);
}
}
diff --git a/drivers/clk/ingenic/pm.h b/drivers/clk/ingenic/pm.h
index fa7540407b6b..0dcb57dc64cb 100644
--- a/drivers/clk/ingenic/pm.h
+++ b/drivers/clk/ingenic/pm.h
@@ -7,6 +7,6 @@
struct ingenic_cgu;
-void ingenic_cgu_register_syscore_ops(struct ingenic_cgu *cgu);
+void ingenic_cgu_register_syscore(struct ingenic_cgu *cgu);
#endif /* DRIVERS_CLK_INGENIC_PM_H */
diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c
index 7d04ef40b7cf..bc6a51da2072 100644
--- a/drivers/clk/ingenic/tcu.c
+++ b/drivers/clk/ingenic/tcu.c
@@ -455,7 +455,7 @@ err_free_tcu:
return ret;
}
-static int __maybe_unused tcu_pm_suspend(void)
+static int __maybe_unused tcu_pm_suspend(void *data)
{
struct ingenic_tcu *tcu = ingenic_tcu;
@@ -465,7 +465,7 @@ static int __maybe_unused tcu_pm_suspend(void)
return 0;
}
-static void __maybe_unused tcu_pm_resume(void)
+static void __maybe_unused tcu_pm_resume(void *data)
{
struct ingenic_tcu *tcu = ingenic_tcu;
@@ -473,11 +473,15 @@ static void __maybe_unused tcu_pm_resume(void)
clk_enable(tcu->clk);
}
-static struct syscore_ops __maybe_unused tcu_pm_ops = {
+static const struct syscore_ops __maybe_unused tcu_pm_ops = {
.suspend = tcu_pm_suspend,
.resume = tcu_pm_resume,
};
+static struct syscore __maybe_unused tcu_pm = {
+ .ops = &tcu_pm_ops,
+};
+
static void __init ingenic_tcu_init(struct device_node *np)
{
int ret = ingenic_tcu_probe(np);
@@ -486,7 +490,7 @@ static void __init ingenic_tcu_init(struct device_node *np)
pr_crit("Failed to initialize TCU clocks: %d\n", ret);
if (IS_ENABLED(CONFIG_PM_SLEEP))
- register_syscore_ops(&tcu_pm_ops);
+ register_syscore(&tcu_pm);
}
CLK_OF_DECLARE_DRIVER(jz4740_cgu, "ingenic,jz4740-tcu", ingenic_tcu_init);
diff --git a/drivers/clk/ingenic/x1000-cgu.c b/drivers/clk/ingenic/x1000-cgu.c
index d80886caf393..d89bdfb7c219 100644
--- a/drivers/clk/ingenic/x1000-cgu.c
+++ b/drivers/clk/ingenic/x1000-cgu.c
@@ -556,7 +556,7 @@ static void __init x1000_cgu_init(struct device_node *np)
return;
}
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/*
* CGU has some children devices, this is useful for probing children devices
diff --git a/drivers/clk/ingenic/x1830-cgu.c b/drivers/clk/ingenic/x1830-cgu.c
index 0fd46e50a513..acf856e5009e 100644
--- a/drivers/clk/ingenic/x1830-cgu.c
+++ b/drivers/clk/ingenic/x1830-cgu.c
@@ -463,7 +463,7 @@ static void __init x1830_cgu_init(struct device_node *np)
return;
}
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/*
* CGU has some children devices, this is useful for probing children devices
diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c
index 785dbede4835..5adbbd91a6db 100644
--- a/drivers/clk/mvebu/common.c
+++ b/drivers/clk/mvebu/common.c
@@ -215,22 +215,26 @@ static struct clk *clk_gating_get_src(
return ERR_PTR(-ENODEV);
}
-static int mvebu_clk_gating_suspend(void)
+static int mvebu_clk_gating_suspend(void *data)
{
ctrl->saved_reg = readl(ctrl->base);
return 0;
}
-static void mvebu_clk_gating_resume(void)
+static void mvebu_clk_gating_resume(void *data)
{
writel(ctrl->saved_reg, ctrl->base);
}
-static struct syscore_ops clk_gate_syscore_ops = {
+static const struct syscore_ops clk_gate_syscore_ops = {
.suspend = mvebu_clk_gating_suspend,
.resume = mvebu_clk_gating_resume,
};
+static struct syscore clk_gate_syscore = {
+ .ops = &clk_gate_syscore_ops,
+};
+
void __init mvebu_clk_gating_setup(struct device_node *np,
const struct clk_gating_soc_desc *desc)
{
@@ -284,7 +288,7 @@ void __init mvebu_clk_gating_setup(struct device_node *np,
of_clk_add_provider(np, clk_gating_get_src, ctrl);
- register_syscore_ops(&clk_gate_syscore_ops);
+ register_syscore(&clk_gate_syscore);
return;
gates_out:
diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
index 03c59bf22106..b47c86906046 100644
--- a/drivers/clk/mvebu/cp110-system-controller.c
+++ b/drivers/clk/mvebu/cp110-system-controller.c
@@ -110,6 +110,25 @@ static const char * const gate_base_names[] = {
[CP110_GATE_EIP197] = "eip197"
};
+static unsigned long gate_flags(const u8 bit_idx)
+{
+ switch (bit_idx) {
+ case CP110_GATE_PCIE_X1_0:
+ case CP110_GATE_PCIE_X1_1:
+ case CP110_GATE_PCIE_X4:
+ /*
+ * If a port had an active link at boot time, stopping
+ * the clock creates a failed state from which controller
+ * driver can not recover.
+ * Prevent stopping this clock till after a driver has taken
+ * ownership.
+ */
+ return CLK_IGNORE_UNUSED;
+ default:
+ return 0;
+ }
+};
+
struct cp110_gate_clk {
struct clk_hw hw;
struct regmap *regmap;
@@ -171,6 +190,7 @@ static struct clk_hw *cp110_register_gate(const char *name,
init.ops = &cp110_gate_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
+ init.flags = gate_flags(bit_idx);
gate->regmap = regmap;
gate->bit_idx = bit_idx;
diff --git a/drivers/clk/renesas/clk-div6.c b/drivers/clk/renesas/clk-div6.c
index 3abd6e5400ad..f7b827b5e9b2 100644
--- a/drivers/clk/renesas/clk-div6.c
+++ b/drivers/clk/renesas/clk-div6.c
@@ -7,6 +7,7 @@
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -171,8 +172,7 @@ static u8 cpg_div6_clock_get_parent(struct clk_hw *hw)
if (clock->src_mask == 0)
return 0;
- hw_index = (readl(clock->reg) & clock->src_mask) >>
- __ffs(clock->src_mask);
+ hw_index = field_get(clock->src_mask, readl(clock->reg));
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
if (clock->parents[i] == hw_index)
return i;
@@ -191,7 +191,7 @@ static int cpg_div6_clock_set_parent(struct clk_hw *hw, u8 index)
if (index >= clk_hw_get_num_parents(hw))
return -EINVAL;
- src = clock->parents[index] << __ffs(clock->src_mask);
+ src = field_prep(clock->src_mask, clock->parents[index]);
writel((readl(clock->reg) & ~clock->src_mask) | src, clock->reg);
return 0;
}
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 10ae20489df9..b954278ddd9d 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -54,10 +54,8 @@ static unsigned long cpg_pll_clk_recalc_rate(struct clk_hw *hw,
{
struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
unsigned int mult;
- u32 val;
- val = readl(pll_clk->pllcr_reg) & CPG_PLLnCR_STC_MASK;
- mult = (val >> __ffs(CPG_PLLnCR_STC_MASK)) + 1;
+ mult = FIELD_GET(CPG_PLLnCR_STC_MASK, readl(pll_clk->pllcr_reg)) + 1;
return parent_rate * mult * pll_clk->fixed_mult;
}
@@ -94,7 +92,7 @@ static int cpg_pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
val = readl(pll_clk->pllcr_reg);
val &= ~CPG_PLLnCR_STC_MASK;
- val |= (mult - 1) << __ffs(CPG_PLLnCR_STC_MASK);
+ val |= FIELD_PREP(CPG_PLLnCR_STC_MASK, mult - 1);
writel(val, pll_clk->pllcr_reg);
for (i = 1000; i; i--) {
@@ -176,11 +174,7 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int mult;
- u32 val;
-
- val = readl(zclk->reg) & zclk->mask;
- mult = 32 - (val >> __ffs(zclk->mask));
+ unsigned int mult = 32 - field_get(zclk->mask, readl(zclk->reg));
return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
32 * zclk->fixed_div);
@@ -231,7 +225,8 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
return -EBUSY;
- cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
+ cpg_reg_modify(zclk->reg, zclk->mask,
+ field_prep(zclk->mask, 32 - mult));
/*
* Set KICK bit in FRQCRB to update hardware setting and wait for
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c
index fb9a876aaba5..db3a0b8ef2b9 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.c
+++ b/drivers/clk/renesas/rcar-gen4-cpg.c
@@ -279,11 +279,7 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int mult;
- u32 val;
-
- val = readl(zclk->reg) & zclk->mask;
- mult = 32 - (val >> __ffs(zclk->mask));
+ unsigned int mult = 32 - field_get(zclk->mask, readl(zclk->reg));
return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
32 * zclk->fixed_div);
@@ -334,7 +330,8 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
return -EBUSY;
- cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
+ cpg_reg_modify(zclk->reg, zclk->mask,
+ field_prep(zclk->mask, 32 - mult));
/*
* Set KICK bit in FRQCRB to update hardware setting and wait for
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 0a1e017df7c6..9cf3e1e43b78 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -871,7 +871,7 @@ static const int rk3288_saved_cru_reg_ids[] = {
static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)];
-static int rk3288_clk_suspend(void)
+static int rk3288_clk_suspend(void *data)
{
int i, reg_id;
@@ -906,7 +906,7 @@ static int rk3288_clk_suspend(void)
return 0;
}
-static void rk3288_clk_resume(void)
+static void rk3288_clk_resume(void *data)
{
int i, reg_id;
@@ -923,11 +923,15 @@ static void rk3288_clk_shutdown(void)
writel_relaxed(0xf3030000, rk3288_cru_base + RK3288_MODE_CON);
}
-static struct syscore_ops rk3288_clk_syscore_ops = {
+static const struct syscore_ops rk3288_clk_syscore_ops = {
.suspend = rk3288_clk_suspend,
.resume = rk3288_clk_resume,
};
+static struct syscore rk3288_clk_syscore = {
+ .ops = &rk3288_clk_syscore_ops,
+};
+
static void __init rk3288_common_init(struct device_node *np,
enum rk3288_variant soc)
{
@@ -976,7 +980,7 @@ static void __init rk3288_common_init(struct device_node *np,
rockchip_register_restart_notifier(ctx, RK3288_GLB_SRST_FST,
rk3288_clk_shutdown);
- register_syscore_ops(&rk3288_clk_syscore_ops);
+ register_syscore(&rk3288_clk_syscore);
rockchip_clk_of_add_provider(np, ctx);
}
diff --git a/drivers/clk/samsung/clk-s5pv210-audss.c b/drivers/clk/samsung/clk-s5pv210-audss.c
index b1fd8fac3a4c..c9fcb23de183 100644
--- a/drivers/clk/samsung/clk-s5pv210-audss.c
+++ b/drivers/clk/samsung/clk-s5pv210-audss.c
@@ -36,7 +36,7 @@ static unsigned long reg_save[][2] = {
{ASS_CLK_GATE, 0},
};
-static int s5pv210_audss_clk_suspend(void)
+static int s5pv210_audss_clk_suspend(void *data)
{
int i;
@@ -46,7 +46,7 @@ static int s5pv210_audss_clk_suspend(void)
return 0;
}
-static void s5pv210_audss_clk_resume(void)
+static void s5pv210_audss_clk_resume(void *data)
{
int i;
@@ -54,10 +54,14 @@ static void s5pv210_audss_clk_resume(void)
writel(reg_save[i][1], reg_base + reg_save[i][0]);
}
-static struct syscore_ops s5pv210_audss_clk_syscore_ops = {
+static const struct syscore_ops s5pv210_audss_clk_syscore_ops = {
.suspend = s5pv210_audss_clk_suspend,
.resume = s5pv210_audss_clk_resume,
};
+
+static struct syscore s5pv210_audss_clk_syscore = {
+ .ops = &s5pv210_audss_clk_syscore_ops,
+};
#endif /* CONFIG_PM_SLEEP */
/* register s5pv210_audss clocks */
@@ -175,7 +179,7 @@ static int s5pv210_audss_clk_probe(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
- register_syscore_ops(&s5pv210_audss_clk_syscore_ops);
+ register_syscore(&s5pv210_audss_clk_syscore);
#endif
return 0;
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index dbc9925ca8f4..c149ca6c2217 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -271,7 +271,7 @@ void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
}
#ifdef CONFIG_PM_SLEEP
-static int samsung_clk_suspend(void)
+static int samsung_clk_suspend(void *data)
{
struct samsung_clock_reg_cache *reg_cache;
@@ -284,7 +284,7 @@ static int samsung_clk_suspend(void)
return 0;
}
-static void samsung_clk_resume(void)
+static void samsung_clk_resume(void *data)
{
struct samsung_clock_reg_cache *reg_cache;
@@ -293,11 +293,15 @@ static void samsung_clk_resume(void)
reg_cache->rd_num);
}
-static struct syscore_ops samsung_clk_syscore_ops = {
+static const struct syscore_ops samsung_clk_syscore_ops = {
.suspend = samsung_clk_suspend,
.resume = samsung_clk_resume,
};
+static struct syscore samsung_clk_syscore = {
+ .ops = &samsung_clk_syscore_ops,
+};
+
void samsung_clk_extended_sleep_init(void __iomem *reg_base,
const unsigned long *rdump,
unsigned long nr_rdump,
@@ -316,7 +320,7 @@ void samsung_clk_extended_sleep_init(void __iomem *reg_base,
panic("could not allocate register dump storage.\n");
if (list_empty(&clock_reg_cache_list))
- register_syscore_ops(&samsung_clk_syscore_ops);
+ register_syscore(&samsung_clk_syscore);
reg_cache->reg_base = reg_base;
reg_cache->rd_num = nr_rdump;
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 412902f573b5..504d0ea997a5 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -3444,7 +3444,7 @@ static void tegra210_disable_cpu_clock(u32 cpu)
static u32 spare_reg_ctx, misc_clk_enb_ctx, clk_msk_arm_ctx;
static u32 cpu_softrst_ctx[3];
-static int tegra210_clk_suspend(void)
+static int tegra210_clk_suspend(void *data)
{
unsigned int i;
@@ -3465,7 +3465,7 @@ static int tegra210_clk_suspend(void)
return 0;
}
-static void tegra210_clk_resume(void)
+static void tegra210_clk_resume(void *data)
{
unsigned int i;
@@ -3523,13 +3523,17 @@ static void tegra210_cpu_clock_resume(void)
}
#endif
-static struct syscore_ops tegra_clk_syscore_ops = {
+static const struct syscore_ops tegra_clk_syscore_ops = {
#ifdef CONFIG_PM_SLEEP
.suspend = tegra210_clk_suspend,
.resume = tegra210_clk_resume,
#endif
};
+static struct syscore tegra_clk_syscore = {
+ .ops = &tegra_clk_syscore_ops,
+};
+
static struct tegra_cpu_car_ops tegra210_cpu_car_ops = {
.wait_for_reset = tegra210_wait_cpu_in_reset,
.disable_clock = tegra210_disable_cpu_clock,
@@ -3813,6 +3817,6 @@ static void __init tegra210_clock_init(struct device_node *np)
tegra_cpu_car_ops = &tegra210_cpu_car_ops;
- register_syscore_ops(&tegra_clk_syscore_ops);
+ register_syscore(&tegra_clk_syscore);
}
CLK_OF_DECLARE(tegra210, "nvidia,tegra210-car", tegra210_clock_init);
diff --git a/drivers/clocksource/timer-armada-370-xp.c b/drivers/clocksource/timer-armada-370-xp.c
index 54284c1c0651..f2b4cc40db93 100644
--- a/drivers/clocksource/timer-armada-370-xp.c
+++ b/drivers/clocksource/timer-armada-370-xp.c
@@ -207,14 +207,14 @@ static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
static u32 timer0_ctrl_reg, timer0_local_ctrl_reg;
-static int armada_370_xp_timer_suspend(void)
+static int armada_370_xp_timer_suspend(void *data)
{
timer0_ctrl_reg = readl(timer_base + TIMER_CTRL_OFF);
timer0_local_ctrl_reg = readl(local_base + TIMER_CTRL_OFF);
return 0;
}
-static void armada_370_xp_timer_resume(void)
+static void armada_370_xp_timer_resume(void *data)
{
writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
@@ -222,11 +222,15 @@ static void armada_370_xp_timer_resume(void)
writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF);
}
-static struct syscore_ops armada_370_xp_timer_syscore_ops = {
+static const struct syscore_ops armada_370_xp_timer_syscore_ops = {
.suspend = armada_370_xp_timer_suspend,
.resume = armada_370_xp_timer_resume,
};
+static struct syscore armada_370_xp_timer_syscore = {
+ .ops = &armada_370_xp_timer_syscore_ops,
+};
+
static unsigned long armada_370_delay_timer_read(void)
{
return ~readl(timer_base + TIMER0_VAL_OFF);
@@ -324,7 +328,7 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np)
return res;
}
- register_syscore_ops(&armada_370_xp_timer_syscore_ops);
+ register_syscore(&armada_370_xp_timer_syscore);
return 0;
}
diff --git a/drivers/cpufreq/rcpufreq_dt.rs b/drivers/cpufreq/rcpufreq_dt.rs
index 53923b8ef7a1..31e07f0279db 100644
--- a/drivers/cpufreq/rcpufreq_dt.rs
+++ b/drivers/cpufreq/rcpufreq_dt.rs
@@ -207,9 +207,9 @@ impl platform::Driver for CPUFreqDTDriver {
fn probe(
pdev: &platform::Device<Core>,
_id_info: Option<&Self::IdInfo>,
- ) -> Result<Pin<KBox<Self>>> {
+ ) -> impl PinInit<Self, Error> {
cpufreq::Registration::<CPUFreqDTDriver>::new_foreign_owned(pdev.as_ref())?;
- Ok(KBox::new(Self {}, GFP_KERNEL)?.into())
+ Ok(Self {})
}
}
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index e75d85a8f90d..dcf20ea5ef5e 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -177,26 +177,30 @@ static void psci_idle_syscore_switch(bool suspend)
}
}
-static int psci_idle_syscore_suspend(void)
+static int psci_idle_syscore_suspend(void *data)
{
psci_idle_syscore_switch(true);
return 0;
}
-static void psci_idle_syscore_resume(void)
+static void psci_idle_syscore_resume(void *data)
{
psci_idle_syscore_switch(false);
}
-static struct syscore_ops psci_idle_syscore_ops = {
+static const struct syscore_ops psci_idle_syscore_ops = {
.suspend = psci_idle_syscore_suspend,
.resume = psci_idle_syscore_resume,
};
+static struct syscore psci_idle_syscore = {
+ .ops = &psci_idle_syscore_ops,
+};
+
static void psci_idle_init_syscore(void)
{
if (psci_cpuidle_use_syscore)
- register_syscore_ops(&psci_idle_syscore_ops);
+ register_syscore(&psci_idle_syscore);
}
static void psci_idle_init_cpuhp(void)
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index f394e45e11ab..f16a0f611317 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -39,6 +39,7 @@ config CRYPTO_DEV_SP_PSP
bool "Platform Security Processor (PSP) device"
default y
depends on CRYPTO_DEV_CCP_DD && X86_64 && AMD_IOMMU
+ select PCI_TSM if PCI
help
Provide support for the AMD Platform Security Processor (PSP).
The PSP is a dedicated processor that provides support for key
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index a9626b30044a..0424e08561ef 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -16,6 +16,10 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
hsti.o \
sfs.o
+ifeq ($(CONFIG_PCI_TSM),y)
+ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += sev-dev-tsm.o sev-dev-tio.o
+endif
+
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
ccp-crypto-aes.o \
diff --git a/drivers/crypto/ccp/sev-dev-tio.c b/drivers/crypto/ccp/sev-dev-tio.c
new file mode 100644
index 000000000000..9a98f98c20a7
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev-tio.c
@@ -0,0 +1,864 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+// Interface to PSP for CCP/SEV-TIO/SNP-VM
+
+#include <linux/pci.h>
+#include <linux/tsm.h>
+#include <linux/psp.h>
+#include <linux/vmalloc.h>
+#include <linux/bitfield.h>
+#include <linux/pci-doe.h>
+#include <asm/sev-common.h>
+#include <asm/sev.h>
+#include <asm/page.h>
+#include "sev-dev.h"
+#include "sev-dev-tio.h"
+
+#define to_tio_status(dev_data) \
+ (container_of((dev_data), struct tio_dsm, data)->sev->tio_status)
+
+#define SLA_PAGE_TYPE_DATA 0
+#define SLA_PAGE_TYPE_SCATTER 1
+#define SLA_PAGE_SIZE_4K 0
+#define SLA_PAGE_SIZE_2M 1
+#define SLA_SZ(s) ((s).page_size == SLA_PAGE_SIZE_2M ? SZ_2M : SZ_4K)
+#define SLA_SCATTER_LEN(s) (SLA_SZ(s) / sizeof(struct sla_addr_t))
+#define SLA_EOL ((struct sla_addr_t) { .pfn = ((1UL << 40) - 1) })
+#define SLA_NULL ((struct sla_addr_t) { 0 })
+#define IS_SLA_NULL(s) ((s).sla == SLA_NULL.sla)
+#define IS_SLA_EOL(s) ((s).sla == SLA_EOL.sla)
+
+static phys_addr_t sla_to_pa(struct sla_addr_t sla)
+{
+ u64 pfn = sla.pfn;
+ u64 pa = pfn << PAGE_SHIFT;
+
+ return pa;
+}
+
+static void *sla_to_va(struct sla_addr_t sla)
+{
+ void *va = __va(__sme_clr(sla_to_pa(sla)));
+
+ return va;
+}
+
+#define sla_to_pfn(sla) (__pa(sla_to_va(sla)) >> PAGE_SHIFT)
+#define sla_to_page(sla) virt_to_page(sla_to_va(sla))
+
+static struct sla_addr_t make_sla(struct page *pg, bool stp)
+{
+ u64 pa = __sme_set(page_to_phys(pg));
+ struct sla_addr_t ret = {
+ .pfn = pa >> PAGE_SHIFT,
+ .page_size = SLA_PAGE_SIZE_4K, /* Do not do SLA_PAGE_SIZE_2M ATM */
+ .page_type = stp ? SLA_PAGE_TYPE_SCATTER : SLA_PAGE_TYPE_DATA
+ };
+
+ return ret;
+}
+
+/* the BUFFER Structure */
+#define SLA_BUFFER_FLAG_ENCRYPTION BIT(0)
+
+/*
+ * struct sla_buffer_hdr - Scatter list address buffer header
+ *
+ * @capacity_sz: Total capacity of the buffer in bytes
+ * @payload_sz: Size of buffer payload in bytes, must be multiple of 32B
+ * @flags: Buffer flags (SLA_BUFFER_FLAG_ENCRYPTION: buffer is encrypted)
+ * @iv: Initialization vector used for encryption
+ * @authtag: Authentication tag for encrypted buffer
+ */
+struct sla_buffer_hdr {
+ u32 capacity_sz;
+ u32 payload_sz; /* The size of BUFFER_PAYLOAD in bytes. Must be multiple of 32B */
+ u32 flags;
+ u8 reserved1[4];
+ u8 iv[16]; /* IV used for the encryption of this buffer */
+ u8 authtag[16]; /* Authentication tag for this buffer */
+ u8 reserved2[16];
+} __packed;
+
+enum spdm_data_type_t {
+ DOBJ_DATA_TYPE_SPDM = 0x1,
+ DOBJ_DATA_TYPE_SECURE_SPDM = 0x2,
+};
+
+struct spdm_dobj_hdr_req {
+ struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_REQ */
+ u8 data_type; /* spdm_data_type_t */
+ u8 reserved2[5];
+} __packed;
+
+struct spdm_dobj_hdr_resp {
+ struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_RESP */
+ u8 data_type; /* spdm_data_type_t */
+ u8 reserved2[5];
+} __packed;
+
+/* Defined in sev-dev-tio.h so sev-dev-tsm.c can read types of blobs */
+struct spdm_dobj_hdr_cert;
+struct spdm_dobj_hdr_meas;
+struct spdm_dobj_hdr_report;
+
+/* Used in all SPDM-aware TIO commands */
+struct spdm_ctrl {
+ struct sla_addr_t req;
+ struct sla_addr_t resp;
+ struct sla_addr_t scratch;
+ struct sla_addr_t output;
+} __packed;
+
+static size_t sla_dobj_id_to_size(u8 id)
+{
+ size_t n;
+
+ BUILD_BUG_ON(sizeof(struct spdm_dobj_hdr_resp) != 0x10);
+ switch (id) {
+ case SPDM_DOBJ_ID_REQ:
+ n = sizeof(struct spdm_dobj_hdr_req);
+ break;
+ case SPDM_DOBJ_ID_RESP:
+ n = sizeof(struct spdm_dobj_hdr_resp);
+ break;
+ default:
+ WARN_ON(1);
+ n = 0;
+ break;
+ }
+
+ return n;
+}
+
+#define SPDM_DOBJ_HDR_SIZE(hdr) sla_dobj_id_to_size((hdr)->id)
+#define SPDM_DOBJ_DATA(hdr) ((u8 *)(hdr) + SPDM_DOBJ_HDR_SIZE(hdr))
+#define SPDM_DOBJ_LEN(hdr) ((hdr)->length - SPDM_DOBJ_HDR_SIZE(hdr))
+
+#define sla_to_dobj_resp_hdr(buf) ((struct spdm_dobj_hdr_resp *) \
+ sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_RESP))
+#define sla_to_dobj_req_hdr(buf) ((struct spdm_dobj_hdr_req *) \
+ sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_REQ))
+
+static struct spdm_dobj_hdr *sla_to_dobj_hdr(struct sla_buffer_hdr *buf)
+{
+ if (!buf)
+ return NULL;
+
+ return (struct spdm_dobj_hdr *) &buf[1];
+}
+
+static struct spdm_dobj_hdr *sla_to_dobj_hdr_check(struct sla_buffer_hdr *buf, u32 check_dobjid)
+{
+ struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf);
+
+ if (WARN_ON_ONCE(!hdr))
+ return NULL;
+
+ if (hdr->id != check_dobjid) {
+ pr_err("! ERROR: expected %d, found %d\n", check_dobjid, hdr->id);
+ return NULL;
+ }
+
+ return hdr;
+}
+
+static void *sla_to_data(struct sla_buffer_hdr *buf, u32 dobjid)
+{
+ struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf);
+
+ if (WARN_ON_ONCE(dobjid != SPDM_DOBJ_ID_REQ && dobjid != SPDM_DOBJ_ID_RESP))
+ return NULL;
+
+ if (!hdr)
+ return NULL;
+
+ return (u8 *) hdr + sla_dobj_id_to_size(dobjid);
+}
+
+/*
+ * struct sev_data_tio_status - SEV_CMD_TIO_STATUS command
+ *
+ * @length: Length of this command buffer in bytes
+ * @status_paddr: System physical address of the TIO_STATUS structure
+ */
+struct sev_data_tio_status {
+ u32 length;
+ u8 reserved[4];
+ u64 status_paddr;
+} __packed;
+
+/* TIO_INIT */
+struct sev_data_tio_init {
+ u32 length;
+ u8 reserved[12];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_create - TIO_DEV_CREATE command
+ *
+ * @length: Length in bytes of this command buffer
+ * @dev_ctx_sla: Scatter list address pointing to a buffer to be used as a device context buffer
+ * @device_id: PCIe Routing Identifier of the device to connect to
+ * @root_port_id: PCIe Routing Identifier of the root port of the device
+ * @segment_id: PCIe Segment Identifier of the device to connect to
+ */
+struct sev_data_tio_dev_create {
+ u32 length;
+ u8 reserved1[4];
+ struct sla_addr_t dev_ctx_sla;
+ u16 device_id;
+ u16 root_port_id;
+ u8 segment_id;
+ u8 reserved2[11];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_connect - TIO_DEV_CONNECT command
+ *
+ * @length: Length in bytes of this command buffer
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ * @tc_mask: Bitmask of the traffic classes to initialize for SEV-TIO usage.
+ * Setting the kth bit of the TC_MASK to 1 indicates that the traffic
+ * class k will be initialized
+ * @cert_slot: Slot number of the certificate requested for constructing the SPDM session
+ * @ide_stream_id: IDE stream IDs to be associated with this device.
+ * Valid only if corresponding bit in TC_MASK is set
+ */
+struct sev_data_tio_dev_connect {
+ u32 length;
+ u8 reserved1[4];
+ struct spdm_ctrl spdm_ctrl;
+ u8 reserved2[8];
+ struct sla_addr_t dev_ctx_sla;
+ u8 tc_mask;
+ u8 cert_slot;
+ u8 reserved3[6];
+ u8 ide_stream_id[8];
+ u8 reserved4[8];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_disconnect - TIO_DEV_DISCONNECT command
+ *
+ * @length: Length in bytes of this command buffer
+ * @flags: Command flags (TIO_DEV_DISCONNECT_FLAG_FORCE: force disconnect)
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ */
+#define TIO_DEV_DISCONNECT_FLAG_FORCE BIT(0)
+
+struct sev_data_tio_dev_disconnect {
+ u32 length;
+ u32 flags;
+ struct spdm_ctrl spdm_ctrl;
+ struct sla_addr_t dev_ctx_sla;
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_meas - TIO_DEV_MEASUREMENTS command
+ *
+ * @length: Length in bytes of this command buffer
+ * @flags: Command flags (TIO_DEV_MEAS_FLAG_RAW_BITSTREAM: request raw measurements)
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ * @meas_nonce: Nonce for measurement freshness verification
+ */
+#define TIO_DEV_MEAS_FLAG_RAW_BITSTREAM BIT(0)
+
+struct sev_data_tio_dev_meas {
+ u32 length;
+ u32 flags;
+ struct spdm_ctrl spdm_ctrl;
+ struct sla_addr_t dev_ctx_sla;
+ u8 meas_nonce[32];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_certs - TIO_DEV_CERTIFICATES command
+ *
+ * @length: Length in bytes of this command buffer
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ */
+struct sev_data_tio_dev_certs {
+ u32 length;
+ u8 reserved[4];
+ struct spdm_ctrl spdm_ctrl;
+ struct sla_addr_t dev_ctx_sla;
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_reclaim - TIO_DEV_RECLAIM command
+ *
+ * @length: Length in bytes of this command buffer
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ *
+ * This command reclaims resources associated with a device context.
+ */
+struct sev_data_tio_dev_reclaim {
+ u32 length;
+ u8 reserved[4];
+ struct sla_addr_t dev_ctx_sla;
+} __packed;
+
+static struct sla_buffer_hdr *sla_buffer_map(struct sla_addr_t sla)
+{
+ struct sla_buffer_hdr *buf;
+
+ BUILD_BUG_ON(sizeof(struct sla_buffer_hdr) != 0x40);
+ if (IS_SLA_NULL(sla))
+ return NULL;
+
+ if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
+ struct sla_addr_t *scatter = sla_to_va(sla);
+ unsigned int i, npages = 0;
+
+ for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) {
+ if (WARN_ON_ONCE(SLA_SZ(scatter[i]) > SZ_4K))
+ return NULL;
+
+ if (WARN_ON_ONCE(scatter[i].page_type == SLA_PAGE_TYPE_SCATTER))
+ return NULL;
+
+ if (IS_SLA_EOL(scatter[i])) {
+ npages = i;
+ break;
+ }
+ }
+ if (WARN_ON_ONCE(!npages))
+ return NULL;
+
+ struct page **pp = kmalloc_array(npages, sizeof(pp[0]), GFP_KERNEL);
+
+ if (!pp)
+ return NULL;
+
+ for (i = 0; i < npages; ++i)
+ pp[i] = sla_to_page(scatter[i]);
+
+ buf = vm_map_ram(pp, npages, 0);
+ kfree(pp);
+ } else {
+ struct page *pg = sla_to_page(sla);
+
+ buf = vm_map_ram(&pg, 1, 0);
+ }
+
+ return buf;
+}
+
+static void sla_buffer_unmap(struct sla_addr_t sla, struct sla_buffer_hdr *buf)
+{
+ if (!buf)
+ return;
+
+ if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
+ struct sla_addr_t *scatter = sla_to_va(sla);
+ unsigned int i, npages = 0;
+
+ for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) {
+ if (IS_SLA_EOL(scatter[i])) {
+ npages = i;
+ break;
+ }
+ }
+ if (!npages)
+ return;
+
+ vm_unmap_ram(buf, npages);
+ } else {
+ vm_unmap_ram(buf, 1);
+ }
+}
+
+static void dobj_response_init(struct sla_buffer_hdr *buf)
+{
+ struct spdm_dobj_hdr *dobj = sla_to_dobj_hdr(buf);
+
+ dobj->id = SPDM_DOBJ_ID_RESP;
+ dobj->version.major = 0x1;
+ dobj->version.minor = 0;
+ dobj->length = 0;
+ buf->payload_sz = sla_dobj_id_to_size(dobj->id) + dobj->length;
+}
+
+static void sla_free(struct sla_addr_t sla, size_t len, bool firmware_state)
+{
+ unsigned int npages = PAGE_ALIGN(len) >> PAGE_SHIFT;
+ struct sla_addr_t *scatter = NULL;
+ int ret = 0, i;
+
+ if (IS_SLA_NULL(sla))
+ return;
+
+ if (firmware_state) {
+ if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
+ scatter = sla_to_va(sla);
+
+ for (i = 0; i < npages; ++i) {
+ if (IS_SLA_EOL(scatter[i]))
+ break;
+
+ ret = snp_reclaim_pages(sla_to_pa(scatter[i]), 1, false);
+ if (ret)
+ break;
+ }
+ } else {
+ ret = snp_reclaim_pages(sla_to_pa(sla), 1, false);
+ }
+ }
+
+ if (WARN_ON(ret))
+ return;
+
+ if (scatter) {
+ for (i = 0; i < npages; ++i) {
+ if (IS_SLA_EOL(scatter[i]))
+ break;
+ free_page((unsigned long)sla_to_va(scatter[i]));
+ }
+ }
+
+ free_page((unsigned long)sla_to_va(sla));
+}
+
+static struct sla_addr_t sla_alloc(size_t len, bool firmware_state)
+{
+ unsigned long i, npages = PAGE_ALIGN(len) >> PAGE_SHIFT;
+ struct sla_addr_t *scatter = NULL;
+ struct sla_addr_t ret = SLA_NULL;
+ struct sla_buffer_hdr *buf;
+ struct page *pg;
+
+ if (npages == 0)
+ return ret;
+
+ if (WARN_ON_ONCE(npages > ((PAGE_SIZE / sizeof(struct sla_addr_t)) + 1)))
+ return ret;
+
+ BUILD_BUG_ON(PAGE_SIZE < SZ_4K);
+
+ if (npages > 1) {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ return SLA_NULL;
+
+ ret = make_sla(pg, true);
+ scatter = page_to_virt(pg);
+ for (i = 0; i < npages; ++i) {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ goto no_reclaim_exit;
+
+ scatter[i] = make_sla(pg, false);
+ }
+ scatter[i] = SLA_EOL;
+ } else {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ return SLA_NULL;
+
+ ret = make_sla(pg, false);
+ }
+
+ buf = sla_buffer_map(ret);
+ if (!buf)
+ goto no_reclaim_exit;
+
+ buf->capacity_sz = (npages << PAGE_SHIFT);
+ sla_buffer_unmap(ret, buf);
+
+ if (firmware_state) {
+ if (scatter) {
+ for (i = 0; i < npages; ++i) {
+ if (rmp_make_private(sla_to_pfn(scatter[i]), 0,
+ PG_LEVEL_4K, 0, true))
+ goto free_exit;
+ }
+ } else {
+ if (rmp_make_private(sla_to_pfn(ret), 0, PG_LEVEL_4K, 0, true))
+ goto no_reclaim_exit;
+ }
+ }
+
+ return ret;
+
+no_reclaim_exit:
+ firmware_state = false;
+free_exit:
+ sla_free(ret, len, firmware_state);
+ return SLA_NULL;
+}
+
+/* Expands a buffer, only firmware owned buffers allowed for now */
+static int sla_expand(struct sla_addr_t *sla, size_t *len)
+{
+ struct sla_buffer_hdr *oldbuf = sla_buffer_map(*sla), *newbuf;
+ struct sla_addr_t oldsla = *sla, newsla;
+ size_t oldlen = *len, newlen;
+
+ if (!oldbuf)
+ return -EFAULT;
+
+ newlen = oldbuf->capacity_sz;
+ if (oldbuf->capacity_sz == oldlen) {
+ /* This buffer does not require expansion, must be another buffer */
+ sla_buffer_unmap(oldsla, oldbuf);
+ return 1;
+ }
+
+ pr_notice("Expanding BUFFER from %ld to %ld bytes\n", oldlen, newlen);
+
+ newsla = sla_alloc(newlen, true);
+ if (IS_SLA_NULL(newsla))
+ return -ENOMEM;
+
+ newbuf = sla_buffer_map(newsla);
+ if (!newbuf) {
+ sla_free(newsla, newlen, true);
+ return -EFAULT;
+ }
+
+ memcpy(newbuf, oldbuf, oldlen);
+
+ sla_buffer_unmap(newsla, newbuf);
+ sla_free(oldsla, oldlen, true);
+ *sla = newsla;
+ *len = newlen;
+
+ return 0;
+}
+
+static int sev_tio_do_cmd(int cmd, void *data, size_t data_len, int *psp_ret,
+ struct tsm_dsm_tio *dev_data)
+{
+ int rc;
+
+ *psp_ret = 0;
+ rc = sev_do_cmd(cmd, data, psp_ret);
+
+ if (WARN_ON(!rc && *psp_ret == SEV_RET_SPDM_REQUEST))
+ return -EIO;
+
+ if (rc == 0 && *psp_ret == SEV_RET_EXPAND_BUFFER_LENGTH_REQUEST) {
+ int rc1, rc2;
+
+ rc1 = sla_expand(&dev_data->output, &dev_data->output_len);
+ if (rc1 < 0)
+ return rc1;
+
+ rc2 = sla_expand(&dev_data->scratch, &dev_data->scratch_len);
+ if (rc2 < 0)
+ return rc2;
+
+ if (!rc1 && !rc2)
+ /* Neither buffer requires expansion, this is wrong */
+ return -EFAULT;
+
+ *psp_ret = 0;
+ rc = sev_do_cmd(cmd, data, psp_ret);
+ }
+
+ if ((rc == 0 || rc == -EIO) && *psp_ret == SEV_RET_SPDM_REQUEST) {
+ struct spdm_dobj_hdr_resp *resp_hdr;
+ struct spdm_dobj_hdr_req *req_hdr;
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ size_t resp_len = tio_status->spdm_req_size_max -
+ (sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) + sizeof(struct sla_buffer_hdr));
+
+ if (!dev_data->cmd) {
+ if (WARN_ON_ONCE(!data_len || (data_len != *(u32 *) data)))
+ return -EINVAL;
+ if (WARN_ON(data_len > sizeof(dev_data->cmd_data)))
+ return -EFAULT;
+ memcpy(dev_data->cmd_data, data, data_len);
+ memset(&dev_data->cmd_data[data_len], 0xFF,
+ sizeof(dev_data->cmd_data) - data_len);
+ dev_data->cmd = cmd;
+ }
+
+ req_hdr = sla_to_dobj_req_hdr(dev_data->reqbuf);
+ resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf);
+ switch (req_hdr->data_type) {
+ case DOBJ_DATA_TYPE_SPDM:
+ rc = PCI_DOE_FEATURE_CMA;
+ break;
+ case DOBJ_DATA_TYPE_SECURE_SPDM:
+ rc = PCI_DOE_FEATURE_SSESSION;
+ break;
+ default:
+ return -EINVAL;
+ }
+ resp_hdr->data_type = req_hdr->data_type;
+ dev_data->spdm.req_len = req_hdr->hdr.length -
+ sla_dobj_id_to_size(SPDM_DOBJ_ID_REQ);
+ dev_data->spdm.rsp_len = resp_len;
+ } else if (dev_data && dev_data->cmd) {
+ /* For either error or success just stop the bouncing */
+ memset(dev_data->cmd_data, 0, sizeof(dev_data->cmd_data));
+ dev_data->cmd = 0;
+ }
+
+ return rc;
+}
+
+int sev_tio_continue(struct tsm_dsm_tio *dev_data)
+{
+ struct spdm_dobj_hdr_resp *resp_hdr;
+ int ret;
+
+ if (!dev_data || !dev_data->cmd)
+ return -EINVAL;
+
+ resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf);
+ resp_hdr->hdr.length = ALIGN(sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) +
+ dev_data->spdm.rsp_len, 32);
+ dev_data->respbuf->payload_sz = resp_hdr->hdr.length;
+
+ ret = sev_tio_do_cmd(dev_data->cmd, dev_data->cmd_data, 0,
+ &dev_data->psp_ret, dev_data);
+ if (ret)
+ return ret;
+
+ if (dev_data->psp_ret != SEV_RET_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void spdm_ctrl_init(struct spdm_ctrl *ctrl, struct tsm_dsm_tio *dev_data)
+{
+ ctrl->req = dev_data->req;
+ ctrl->resp = dev_data->resp;
+ ctrl->scratch = dev_data->scratch;
+ ctrl->output = dev_data->output;
+}
+
+static void spdm_ctrl_free(struct tsm_dsm_tio *dev_data)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ size_t len = tio_status->spdm_req_size_max -
+ (sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) +
+ sizeof(struct sla_buffer_hdr));
+ struct tsm_spdm *spdm = &dev_data->spdm;
+
+ sla_buffer_unmap(dev_data->resp, dev_data->respbuf);
+ sla_buffer_unmap(dev_data->req, dev_data->reqbuf);
+ spdm->rsp = NULL;
+ spdm->req = NULL;
+ sla_free(dev_data->req, len, true);
+ sla_free(dev_data->resp, len, false);
+ sla_free(dev_data->scratch, tio_status->spdm_scratch_size_max, true);
+
+ dev_data->req.sla = 0;
+ dev_data->resp.sla = 0;
+ dev_data->scratch.sla = 0;
+ dev_data->respbuf = NULL;
+ dev_data->reqbuf = NULL;
+ sla_free(dev_data->output, tio_status->spdm_out_size_max, true);
+}
+
+static int spdm_ctrl_alloc(struct tsm_dsm_tio *dev_data)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ struct tsm_spdm *spdm = &dev_data->spdm;
+ int ret;
+
+ dev_data->req = sla_alloc(tio_status->spdm_req_size_max, true);
+ dev_data->resp = sla_alloc(tio_status->spdm_req_size_max, false);
+ dev_data->scratch_len = tio_status->spdm_scratch_size_max;
+ dev_data->scratch = sla_alloc(dev_data->scratch_len, true);
+ dev_data->output_len = tio_status->spdm_out_size_max;
+ dev_data->output = sla_alloc(dev_data->output_len, true);
+
+ if (IS_SLA_NULL(dev_data->req) || IS_SLA_NULL(dev_data->resp) ||
+ IS_SLA_NULL(dev_data->scratch) || IS_SLA_NULL(dev_data->dev_ctx)) {
+ ret = -ENOMEM;
+ goto free_spdm_exit;
+ }
+
+ dev_data->reqbuf = sla_buffer_map(dev_data->req);
+ dev_data->respbuf = sla_buffer_map(dev_data->resp);
+ if (!dev_data->reqbuf || !dev_data->respbuf) {
+ ret = -EFAULT;
+ goto free_spdm_exit;
+ }
+
+ spdm->req = sla_to_data(dev_data->reqbuf, SPDM_DOBJ_ID_REQ);
+ spdm->rsp = sla_to_data(dev_data->respbuf, SPDM_DOBJ_ID_RESP);
+ if (!spdm->req || !spdm->rsp) {
+ ret = -EFAULT;
+ goto free_spdm_exit;
+ }
+
+ dobj_response_init(dev_data->respbuf);
+
+ return 0;
+
+free_spdm_exit:
+ spdm_ctrl_free(dev_data);
+ return ret;
+}
+
+int sev_tio_init_locked(void *tio_status_page)
+{
+ struct sev_tio_status *tio_status = tio_status_page;
+ struct sev_data_tio_status data_status = {
+ .length = sizeof(data_status),
+ };
+ int ret, psp_ret;
+
+ data_status.status_paddr = __psp_pa(tio_status_page);
+ ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret);
+ if (ret)
+ return ret;
+
+ if (tio_status->length < offsetofend(struct sev_tio_status, tdictx_size) ||
+ tio_status->reserved)
+ return -EFAULT;
+
+ if (!tio_status->tio_en && !tio_status->tio_init_done)
+ return -ENOENT;
+
+ if (tio_status->tio_init_done)
+ return -EBUSY;
+
+ struct sev_data_tio_init ti = { .length = sizeof(ti) };
+
+ ret = __sev_do_cmd_locked(SEV_CMD_TIO_INIT, &ti, &psp_ret);
+ if (ret)
+ return ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id,
+ u16 root_port_id, u8 segment_id)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ struct sev_data_tio_dev_create create = {
+ .length = sizeof(create),
+ .device_id = device_id,
+ .root_port_id = root_port_id,
+ .segment_id = segment_id,
+ };
+ void *data_pg;
+ int ret;
+
+ dev_data->dev_ctx = sla_alloc(tio_status->devctx_size, true);
+ if (IS_SLA_NULL(dev_data->dev_ctx))
+ return -ENOMEM;
+
+ data_pg = snp_alloc_firmware_page(GFP_KERNEL_ACCOUNT);
+ if (!data_pg) {
+ ret = -ENOMEM;
+ goto free_ctx_exit;
+ }
+
+ create.dev_ctx_sla = dev_data->dev_ctx;
+ ret = sev_do_cmd(SEV_CMD_TIO_DEV_CREATE, &create, &dev_data->psp_ret);
+ if (ret)
+ goto free_data_pg_exit;
+
+ dev_data->data_pg = data_pg;
+
+ return 0;
+
+free_data_pg_exit:
+ snp_free_firmware_page(data_pg);
+free_ctx_exit:
+ sla_free(create.dev_ctx_sla, tio_status->devctx_size, true);
+ return ret;
+}
+
+int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ struct sev_data_tio_dev_reclaim r = {
+ .length = sizeof(r),
+ .dev_ctx_sla = dev_data->dev_ctx,
+ };
+ int ret;
+
+ if (dev_data->data_pg) {
+ snp_free_firmware_page(dev_data->data_pg);
+ dev_data->data_pg = NULL;
+ }
+
+ if (IS_SLA_NULL(dev_data->dev_ctx))
+ return 0;
+
+ ret = sev_do_cmd(SEV_CMD_TIO_DEV_RECLAIM, &r, &dev_data->psp_ret);
+
+ sla_free(dev_data->dev_ctx, tio_status->devctx_size, true);
+ dev_data->dev_ctx = SLA_NULL;
+
+ spdm_ctrl_free(dev_data);
+
+ return ret;
+}
+
+int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot)
+{
+ struct sev_data_tio_dev_connect connect = {
+ .length = sizeof(connect),
+ .tc_mask = tc_mask,
+ .cert_slot = cert_slot,
+ .dev_ctx_sla = dev_data->dev_ctx,
+ .ide_stream_id = {
+ ids[0], ids[1], ids[2], ids[3],
+ ids[4], ids[5], ids[6], ids[7]
+ },
+ };
+ int ret;
+
+ if (WARN_ON(IS_SLA_NULL(dev_data->dev_ctx)))
+ return -EFAULT;
+ if (!(tc_mask & 1))
+ return -EINVAL;
+
+ ret = spdm_ctrl_alloc(dev_data);
+ if (ret)
+ return ret;
+
+ spdm_ctrl_init(&connect.spdm_ctrl, dev_data);
+
+ return sev_tio_do_cmd(SEV_CMD_TIO_DEV_CONNECT, &connect, sizeof(connect),
+ &dev_data->psp_ret, dev_data);
+}
+
+int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force)
+{
+ struct sev_data_tio_dev_disconnect dc = {
+ .length = sizeof(dc),
+ .dev_ctx_sla = dev_data->dev_ctx,
+ .flags = force ? TIO_DEV_DISCONNECT_FLAG_FORCE : 0,
+ };
+
+ if (WARN_ON_ONCE(IS_SLA_NULL(dev_data->dev_ctx)))
+ return -EFAULT;
+
+ spdm_ctrl_init(&dc.spdm_ctrl, dev_data);
+
+ return sev_tio_do_cmd(SEV_CMD_TIO_DEV_DISCONNECT, &dc, sizeof(dc),
+ &dev_data->psp_ret, dev_data);
+}
+
+int sev_tio_cmd_buffer_len(int cmd)
+{
+ switch (cmd) {
+ case SEV_CMD_TIO_STATUS: return sizeof(struct sev_data_tio_status);
+ case SEV_CMD_TIO_INIT: return sizeof(struct sev_data_tio_init);
+ case SEV_CMD_TIO_DEV_CREATE: return sizeof(struct sev_data_tio_dev_create);
+ case SEV_CMD_TIO_DEV_RECLAIM: return sizeof(struct sev_data_tio_dev_reclaim);
+ case SEV_CMD_TIO_DEV_CONNECT: return sizeof(struct sev_data_tio_dev_connect);
+ case SEV_CMD_TIO_DEV_DISCONNECT: return sizeof(struct sev_data_tio_dev_disconnect);
+ default: return 0;
+ }
+}
diff --git a/drivers/crypto/ccp/sev-dev-tio.h b/drivers/crypto/ccp/sev-dev-tio.h
new file mode 100644
index 000000000000..67512b3dbc53
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev-tio.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __PSP_SEV_TIO_H__
+#define __PSP_SEV_TIO_H__
+
+#include <linux/pci-tsm.h>
+#include <linux/pci-ide.h>
+#include <linux/tsm.h>
+#include <uapi/linux/psp-sev.h>
+
+struct sla_addr_t {
+ union {
+ u64 sla;
+ struct {
+ u64 page_type :1,
+ page_size :1,
+ reserved1 :10,
+ pfn :40,
+ reserved2 :12;
+ };
+ };
+} __packed;
+
+#define SEV_TIO_MAX_COMMAND_LENGTH 128
+
+/* SPDM control structure for DOE */
+struct tsm_spdm {
+ unsigned long req_len;
+ void *req;
+ unsigned long rsp_len;
+ void *rsp;
+};
+
+/* Describes TIO device */
+struct tsm_dsm_tio {
+ u8 cert_slot;
+ struct sla_addr_t dev_ctx;
+ struct sla_addr_t req;
+ struct sla_addr_t resp;
+ struct sla_addr_t scratch;
+ struct sla_addr_t output;
+ size_t output_len;
+ size_t scratch_len;
+ struct tsm_spdm spdm;
+ struct sla_buffer_hdr *reqbuf; /* vmap'ed @req for DOE */
+ struct sla_buffer_hdr *respbuf; /* vmap'ed @resp for DOE */
+
+ int cmd;
+ int psp_ret;
+ u8 cmd_data[SEV_TIO_MAX_COMMAND_LENGTH];
+ void *data_pg; /* Data page for DEV_STATUS/TDI_STATUS/TDI_INFO/ASID_FENCE */
+
+#define TIO_IDE_MAX_TC 8
+ struct pci_ide *ide[TIO_IDE_MAX_TC];
+};
+
+/* Describes TSM structure for PF0 pointed by pci_dev->tsm */
+struct tio_dsm {
+ struct pci_tsm_pf0 tsm;
+ struct tsm_dsm_tio data;
+ struct sev_device *sev;
+};
+
+/* Data object IDs */
+#define SPDM_DOBJ_ID_NONE 0
+#define SPDM_DOBJ_ID_REQ 1
+#define SPDM_DOBJ_ID_RESP 2
+
+struct spdm_dobj_hdr {
+ u32 id; /* Data object type identifier */
+ u32 length; /* Length of the data object, INCLUDING THIS HEADER */
+ struct { /* Version of the data object structure */
+ u8 minor;
+ u8 major;
+ } version;
+} __packed;
+
+/**
+ * struct sev_tio_status - TIO_STATUS command's info_paddr buffer
+ *
+ * @length: Length of this structure in bytes
+ * @tio_en: Indicates that SNP_INIT_EX initialized the RMP for SEV-TIO
+ * @tio_init_done: Indicates TIO_INIT has been invoked
+ * @spdm_req_size_min: Minimum SPDM request buffer size in bytes
+ * @spdm_req_size_max: Maximum SPDM request buffer size in bytes
+ * @spdm_scratch_size_min: Minimum SPDM scratch buffer size in bytes
+ * @spdm_scratch_size_max: Maximum SPDM scratch buffer size in bytes
+ * @spdm_out_size_min: Minimum SPDM output buffer size in bytes
+ * @spdm_out_size_max: Maximum for the SPDM output buffer size in bytes
+ * @spdm_rsp_size_min: Minimum SPDM response buffer size in bytes
+ * @spdm_rsp_size_max: Maximum SPDM response buffer size in bytes
+ * @devctx_size: Size of a device context buffer in bytes
+ * @tdictx_size: Size of a TDI context buffer in bytes
+ * @tio_crypto_alg: TIO crypto algorithms supported
+ */
+struct sev_tio_status {
+ u32 length;
+ u32 tio_en :1,
+ tio_init_done :1,
+ reserved :30;
+ u32 spdm_req_size_min;
+ u32 spdm_req_size_max;
+ u32 spdm_scratch_size_min;
+ u32 spdm_scratch_size_max;
+ u32 spdm_out_size_min;
+ u32 spdm_out_size_max;
+ u32 spdm_rsp_size_min;
+ u32 spdm_rsp_size_max;
+ u32 devctx_size;
+ u32 tdictx_size;
+ u32 tio_crypto_alg;
+ u8 reserved2[12];
+} __packed;
+
+int sev_tio_init_locked(void *tio_status_page);
+int sev_tio_continue(struct tsm_dsm_tio *dev_data);
+
+int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id, u16 root_port_id,
+ u8 segment_id);
+int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot);
+int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force);
+int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data);
+
+#endif /* __PSP_SEV_TIO_H__ */
diff --git a/drivers/crypto/ccp/sev-dev-tsm.c b/drivers/crypto/ccp/sev-dev-tsm.c
new file mode 100644
index 000000000000..ea29cd5d0ff9
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev-tsm.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+// Interface to CCP/SEV-TIO for generic PCIe TDISP module
+
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/tsm.h>
+#include <linux/iommu.h>
+#include <linux/pci-doe.h>
+#include <linux/bitfield.h>
+#include <linux/module.h>
+
+#include <asm/sev-common.h>
+#include <asm/sev.h>
+
+#include "psp-dev.h"
+#include "sev-dev.h"
+#include "sev-dev-tio.h"
+
+MODULE_IMPORT_NS("PCI_IDE");
+
+#define TIO_DEFAULT_NR_IDE_STREAMS 1
+
+static uint nr_ide_streams = TIO_DEFAULT_NR_IDE_STREAMS;
+module_param_named(ide_nr, nr_ide_streams, uint, 0644);
+MODULE_PARM_DESC(ide_nr, "Set the maximum number of IDE streams per PHB");
+
+#define dev_to_sp(dev) ((struct sp_device *)dev_get_drvdata(dev))
+#define dev_to_psp(dev) ((struct psp_device *)(dev_to_sp(dev)->psp_data))
+#define dev_to_sev(dev) ((struct sev_device *)(dev_to_psp(dev)->sev_data))
+#define tsm_dev_to_sev(tsmdev) dev_to_sev((tsmdev)->dev.parent)
+
+#define pdev_to_tio_dsm(pdev) (container_of((pdev)->tsm, struct tio_dsm, tsm.base_tsm))
+
+static int sev_tio_spdm_cmd(struct tio_dsm *dsm, int ret)
+{
+ struct tsm_dsm_tio *dev_data = &dsm->data;
+ struct tsm_spdm *spdm = &dev_data->spdm;
+
+ /* Check the main command handler response before entering the loop */
+ if (ret == 0 && dev_data->psp_ret != SEV_RET_SUCCESS)
+ return -EINVAL;
+
+ if (ret <= 0)
+ return ret;
+
+ /* ret > 0 means "SPDM requested" */
+ while (ret == PCI_DOE_FEATURE_CMA || ret == PCI_DOE_FEATURE_SSESSION) {
+ ret = pci_doe(dsm->tsm.doe_mb, PCI_VENDOR_ID_PCI_SIG, ret,
+ spdm->req, spdm->req_len, spdm->rsp, spdm->rsp_len);
+ if (ret < 0)
+ break;
+
+ WARN_ON_ONCE(ret == 0); /* The response should never be empty */
+ spdm->rsp_len = ret;
+ ret = sev_tio_continue(dev_data);
+ }
+
+ return ret;
+}
+
+static int stream_enable(struct pci_ide *ide)
+{
+ struct pci_dev *rp = pcie_find_root_port(ide->pdev);
+ int ret;
+
+ ret = pci_ide_stream_enable(rp, ide);
+ if (ret)
+ return ret;
+
+ ret = pci_ide_stream_enable(ide->pdev, ide);
+ if (ret)
+ pci_ide_stream_disable(rp, ide);
+
+ return ret;
+}
+
+static int streams_enable(struct pci_ide **ide)
+{
+ int ret = 0;
+
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (ide[i]) {
+ ret = stream_enable(ide[i]);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void stream_disable(struct pci_ide *ide)
+{
+ pci_ide_stream_disable(ide->pdev, ide);
+ pci_ide_stream_disable(pcie_find_root_port(ide->pdev), ide);
+}
+
+static void streams_disable(struct pci_ide **ide)
+{
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i)
+ if (ide[i])
+ stream_disable(ide[i]);
+}
+
+static void stream_setup(struct pci_ide *ide)
+{
+ struct pci_dev *rp = pcie_find_root_port(ide->pdev);
+
+ ide->partner[PCI_IDE_EP].rid_start = 0;
+ ide->partner[PCI_IDE_EP].rid_end = 0xffff;
+ ide->partner[PCI_IDE_RP].rid_start = 0;
+ ide->partner[PCI_IDE_RP].rid_end = 0xffff;
+
+ ide->pdev->ide_cfg = 0;
+ ide->pdev->ide_tee_limit = 1;
+ rp->ide_cfg = 1;
+ rp->ide_tee_limit = 0;
+
+ pci_warn(ide->pdev, "Forcing CFG/TEE for %s", pci_name(rp));
+ pci_ide_stream_setup(ide->pdev, ide);
+ pci_ide_stream_setup(rp, ide);
+}
+
+static u8 streams_setup(struct pci_ide **ide, u8 *ids)
+{
+ bool def = false;
+ u8 tc_mask = 0;
+ int i;
+
+ for (i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (!ide[i]) {
+ ids[i] = 0xFF;
+ continue;
+ }
+
+ tc_mask |= BIT(i);
+ ids[i] = ide[i]->stream_id;
+
+ if (!def) {
+ struct pci_ide_partner *settings;
+
+ settings = pci_ide_to_settings(ide[i]->pdev, ide[i]);
+ settings->default_stream = 1;
+ def = true;
+ }
+
+ stream_setup(ide[i]);
+ }
+
+ return tc_mask;
+}
+
+static int streams_register(struct pci_ide **ide)
+{
+ int ret = 0, i;
+
+ for (i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (ide[i]) {
+ ret = pci_ide_stream_register(ide[i]);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void streams_unregister(struct pci_ide **ide)
+{
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i)
+ if (ide[i])
+ pci_ide_stream_unregister(ide[i]);
+}
+
+static void stream_teardown(struct pci_ide *ide)
+{
+ pci_ide_stream_teardown(ide->pdev, ide);
+ pci_ide_stream_teardown(pcie_find_root_port(ide->pdev), ide);
+}
+
+static void streams_teardown(struct pci_ide **ide)
+{
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (ide[i]) {
+ stream_teardown(ide[i]);
+ pci_ide_stream_free(ide[i]);
+ ide[i] = NULL;
+ }
+ }
+}
+
+static int stream_alloc(struct pci_dev *pdev, struct pci_ide **ide,
+ unsigned int tc)
+{
+ struct pci_dev *rp = pcie_find_root_port(pdev);
+ struct pci_ide *ide1;
+
+ if (ide[tc]) {
+ pci_err(pdev, "Stream for class=%d already registered", tc);
+ return -EBUSY;
+ }
+
+ /* FIXME: find a better way */
+ if (nr_ide_streams != TIO_DEFAULT_NR_IDE_STREAMS)
+ pci_notice(pdev, "Enable non-default %d streams", nr_ide_streams);
+ pci_ide_set_nr_streams(to_pci_host_bridge(rp->bus->bridge), nr_ide_streams);
+
+ ide1 = pci_ide_stream_alloc(pdev);
+ if (!ide1)
+ return -EFAULT;
+
+ /* Blindly assign streamid=0 to TC=0, and so on */
+ ide1->stream_id = tc;
+
+ ide[tc] = ide1;
+
+ return 0;
+}
+
+static struct pci_tsm *tio_pf0_probe(struct pci_dev *pdev, struct sev_device *sev)
+{
+ struct tio_dsm *dsm __free(kfree) = kzalloc(sizeof(*dsm), GFP_KERNEL);
+ int rc;
+
+ if (!dsm)
+ return NULL;
+
+ rc = pci_tsm_pf0_constructor(pdev, &dsm->tsm, sev->tsmdev);
+ if (rc)
+ return NULL;
+
+ pci_dbg(pdev, "TSM enabled\n");
+ dsm->sev = sev;
+ return &no_free_ptr(dsm)->tsm.base_tsm;
+}
+
+static struct pci_tsm *dsm_probe(struct tsm_dev *tsmdev, struct pci_dev *pdev)
+{
+ struct sev_device *sev = tsm_dev_to_sev(tsmdev);
+
+ if (is_pci_tsm_pf0(pdev))
+ return tio_pf0_probe(pdev, sev);
+ return 0;
+}
+
+static void dsm_remove(struct pci_tsm *tsm)
+{
+ struct pci_dev *pdev = tsm->pdev;
+
+ pci_dbg(pdev, "TSM disabled\n");
+
+ if (is_pci_tsm_pf0(pdev)) {
+ struct tio_dsm *dsm = container_of(tsm, struct tio_dsm, tsm.base_tsm);
+
+ pci_tsm_pf0_destructor(&dsm->tsm);
+ kfree(dsm);
+ }
+}
+
+static int dsm_create(struct tio_dsm *dsm)
+{
+ struct pci_dev *pdev = dsm->tsm.base_tsm.pdev;
+ u8 segment_id = pdev->bus ? pci_domain_nr(pdev->bus) : 0;
+ struct pci_dev *rootport = pcie_find_root_port(pdev);
+ u16 device_id = pci_dev_id(pdev);
+ u16 root_port_id;
+ u32 lnkcap = 0;
+
+ if (pci_read_config_dword(rootport, pci_pcie_cap(rootport) + PCI_EXP_LNKCAP,
+ &lnkcap))
+ return -ENODEV;
+
+ root_port_id = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
+
+ return sev_tio_dev_create(&dsm->data, device_id, root_port_id, segment_id);
+}
+
+static int dsm_connect(struct pci_dev *pdev)
+{
+ struct tio_dsm *dsm = pdev_to_tio_dsm(pdev);
+ struct tsm_dsm_tio *dev_data = &dsm->data;
+ u8 ids[TIO_IDE_MAX_TC];
+ u8 tc_mask;
+ int ret;
+
+ if (pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_PCI_SIG,
+ PCI_DOE_FEATURE_SSESSION) != dsm->tsm.doe_mb) {
+ pci_err(pdev, "CMA DOE MB must support SSESSION\n");
+ return -EFAULT;
+ }
+
+ ret = stream_alloc(pdev, dev_data->ide, 0);
+ if (ret)
+ return ret;
+
+ ret = dsm_create(dsm);
+ if (ret)
+ goto ide_free_exit;
+
+ tc_mask = streams_setup(dev_data->ide, ids);
+
+ ret = sev_tio_dev_connect(dev_data, tc_mask, ids, dev_data->cert_slot);
+ ret = sev_tio_spdm_cmd(dsm, ret);
+ if (ret)
+ goto free_exit;
+
+ streams_enable(dev_data->ide);
+
+ ret = streams_register(dev_data->ide);
+ if (ret)
+ goto free_exit;
+
+ return 0;
+
+free_exit:
+ sev_tio_dev_reclaim(dev_data);
+
+ streams_disable(dev_data->ide);
+ide_free_exit:
+
+ streams_teardown(dev_data->ide);
+
+ return ret;
+}
+
+static void dsm_disconnect(struct pci_dev *pdev)
+{
+ bool force = SYSTEM_HALT <= system_state && system_state <= SYSTEM_RESTART;
+ struct tio_dsm *dsm = pdev_to_tio_dsm(pdev);
+ struct tsm_dsm_tio *dev_data = &dsm->data;
+ int ret;
+
+ ret = sev_tio_dev_disconnect(dev_data, force);
+ ret = sev_tio_spdm_cmd(dsm, ret);
+ if (ret && !force) {
+ ret = sev_tio_dev_disconnect(dev_data, true);
+ sev_tio_spdm_cmd(dsm, ret);
+ }
+
+ sev_tio_dev_reclaim(dev_data);
+
+ streams_disable(dev_data->ide);
+ streams_unregister(dev_data->ide);
+ streams_teardown(dev_data->ide);
+}
+
+static struct pci_tsm_ops sev_tsm_ops = {
+ .probe = dsm_probe,
+ .remove = dsm_remove,
+ .connect = dsm_connect,
+ .disconnect = dsm_disconnect,
+};
+
+void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page)
+{
+ struct sev_tio_status *t = kzalloc(sizeof(*t), GFP_KERNEL);
+ struct tsm_dev *tsmdev;
+ int ret;
+
+ WARN_ON(sev->tio_status);
+
+ if (!t)
+ return;
+
+ ret = sev_tio_init_locked(tio_status_page);
+ if (ret) {
+ pr_warn("SEV-TIO STATUS failed with %d\n", ret);
+ goto error_exit;
+ }
+
+ tsmdev = tsm_register(sev->dev, &sev_tsm_ops);
+ if (IS_ERR(tsmdev))
+ goto error_exit;
+
+ memcpy(t, tio_status_page, sizeof(*t));
+
+ pr_notice("SEV-TIO status: EN=%d INIT_DONE=%d rq=%d..%d rs=%d..%d "
+ "scr=%d..%d out=%d..%d dev=%d tdi=%d algos=%x\n",
+ t->tio_en, t->tio_init_done,
+ t->spdm_req_size_min, t->spdm_req_size_max,
+ t->spdm_rsp_size_min, t->spdm_rsp_size_max,
+ t->spdm_scratch_size_min, t->spdm_scratch_size_max,
+ t->spdm_out_size_min, t->spdm_out_size_max,
+ t->devctx_size, t->tdictx_size,
+ t->tio_crypto_alg);
+
+ sev->tsmdev = tsmdev;
+ sev->tio_status = t;
+
+ return;
+
+error_exit:
+ kfree(t);
+ pr_err("Failed to enable SEV-TIO: ret=%d en=%d initdone=%d SEV=%d\n",
+ ret, t->tio_en, t->tio_init_done, boot_cpu_has(X86_FEATURE_SEV));
+}
+
+void sev_tsm_uninit(struct sev_device *sev)
+{
+ if (sev->tsmdev)
+ tsm_unregister(sev->tsmdev);
+
+ sev->tsmdev = NULL;
+}
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index b28a6f50daaa..956ea609d0cc 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -75,6 +75,14 @@ static bool psp_init_on_probe = true;
module_param(psp_init_on_probe, bool, 0444);
MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it");
+#if IS_ENABLED(CONFIG_PCI_TSM)
+static bool sev_tio_enabled = true;
+module_param_named(tio, sev_tio_enabled, bool, 0444);
+MODULE_PARM_DESC(tio, "Enables TIO in SNP_INIT_EX");
+#else
+static const bool sev_tio_enabled = false;
+#endif
+
MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
@@ -251,7 +259,7 @@ static int sev_cmd_buffer_len(int cmd)
case SEV_CMD_SNP_COMMIT: return sizeof(struct sev_data_snp_commit);
case SEV_CMD_SNP_FEATURE_INFO: return sizeof(struct sev_data_snp_feature_info);
case SEV_CMD_SNP_VLEK_LOAD: return sizeof(struct sev_user_data_snp_vlek_load);
- default: return 0;
+ default: return sev_tio_cmd_buffer_len(cmd);
}
return 0;
@@ -380,13 +388,7 @@ static int sev_write_init_ex_file_if_required(int cmd_id)
return sev_write_init_ex_file();
}
-/*
- * snp_reclaim_pages() needs __sev_do_cmd_locked(), and __sev_do_cmd_locked()
- * needs snp_reclaim_pages(), so a forward declaration is needed.
- */
-static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret);
-
-static int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked)
+int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked)
{
int ret, err, i;
@@ -420,6 +422,7 @@ cleanup:
snp_leak_pages(__phys_to_pfn(paddr), npages - i);
return ret;
}
+EXPORT_SYMBOL_GPL(snp_reclaim_pages);
static int rmp_mark_pages_firmware(unsigned long paddr, unsigned int npages, bool locked)
{
@@ -850,7 +853,7 @@ static int snp_reclaim_cmd_buf(int cmd, void *cmd_buf)
return 0;
}
-static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
+int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
{
struct cmd_buf_desc desc_list[CMD_BUF_DESC_MAX] = {0};
struct psp_device *psp = psp_master;
@@ -1392,6 +1395,8 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
*
*/
if (sev_version_greater_or_equal(SNP_MIN_API_MAJOR, 52)) {
+ bool tio_supp = !!(sev->snp_feat_info_0.ebx & SNP_SEV_TIO_SUPPORTED);
+
/*
* Firmware checks that the pages containing the ranges enumerated
* in the RANGES structure are either in the default page state or in the
@@ -1432,6 +1437,17 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
data.init_rmp = 1;
data.list_paddr_en = 1;
data.list_paddr = __psp_pa(snp_range_list);
+
+ data.tio_en = tio_supp && sev_tio_enabled && amd_iommu_sev_tio_supported();
+
+ /*
+ * When psp_init_on_probe is disabled, the userspace calling
+ * SEV ioctl can inadvertently shut down SNP and SEV-TIO causing
+ * unexpected state loss.
+ */
+ if (data.tio_en && !psp_init_on_probe)
+ dev_warn(sev->dev, "SEV-TIO as incompatible with psp_init_on_probe=0\n");
+
cmd = SEV_CMD_SNP_INIT_EX;
} else {
cmd = SEV_CMD_SNP_INIT;
@@ -1469,7 +1485,8 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
snp_hv_fixed_pages_state_update(sev, HV_FIXED);
sev->snp_initialized = true;
- dev_dbg(sev->dev, "SEV-SNP firmware initialized\n");
+ dev_dbg(sev->dev, "SEV-SNP firmware initialized, SEV-TIO is %s\n",
+ data.tio_en ? "enabled" : "disabled");
dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major,
sev->api_minor, sev->build);
@@ -1477,6 +1494,23 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
atomic_notifier_chain_register(&panic_notifier_list,
&snp_panic_notifier);
+ if (data.tio_en) {
+ /*
+ * This executes with the sev_cmd_mutex held so down the stack
+ * snp_reclaim_pages(locked=false) might be needed (which is extremely
+ * unlikely) but will cause a deadlock.
+ * Instead of exporting __snp_alloc_firmware_pages(), allocate a page
+ * for this one call here.
+ */
+ void *tio_status = page_address(__snp_alloc_firmware_pages(
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO, 0, true));
+
+ if (tio_status) {
+ sev_tsm_init_locked(sev, tio_status);
+ __snp_free_firmware_pages(virt_to_page(tio_status), 0, true);
+ }
+ }
+
sev_es_tmr_size = SNP_TMR_SIZE;
return 0;
@@ -2756,8 +2790,20 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
static void sev_firmware_shutdown(struct sev_device *sev)
{
+ /*
+ * Calling without sev_cmd_mutex held as TSM will likely try disconnecting
+ * IDE and this ends up calling sev_do_cmd() which locks sev_cmd_mutex.
+ */
+ if (sev->tio_status)
+ sev_tsm_uninit(sev);
+
mutex_lock(&sev_cmd_mutex);
+
__sev_firmware_shutdown(sev, false);
+
+ kfree(sev->tio_status);
+ sev->tio_status = NULL;
+
mutex_unlock(&sev_cmd_mutex);
}
@@ -2770,6 +2816,43 @@ void sev_platform_shutdown(void)
}
EXPORT_SYMBOL_GPL(sev_platform_shutdown);
+u64 sev_get_snp_policy_bits(void)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+ u64 policy_bits;
+
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
+ return 0;
+
+ if (!psp || !psp->sev_data)
+ return 0;
+
+ sev = psp->sev_data;
+
+ policy_bits = SNP_POLICY_MASK_BASE;
+
+ if (sev->snp_plat_status.feature_info) {
+ if (sev->snp_feat_info_0.ecx & SNP_RAPL_DISABLE_SUPPORTED)
+ policy_bits |= SNP_POLICY_MASK_RAPL_DIS;
+
+ if (sev->snp_feat_info_0.ecx & SNP_CIPHER_TEXT_HIDING_SUPPORTED)
+ policy_bits |= SNP_POLICY_MASK_CIPHERTEXT_HIDING_DRAM;
+
+ if (sev->snp_feat_info_0.ecx & SNP_AES_256_XTS_POLICY_SUPPORTED)
+ policy_bits |= SNP_POLICY_MASK_MEM_AES_256_XTS;
+
+ if (sev->snp_feat_info_0.ecx & SNP_CXL_ALLOW_POLICY_SUPPORTED)
+ policy_bits |= SNP_POLICY_MASK_CXL_ALLOW;
+
+ if (sev_version_greater_or_equal(1, 58))
+ policy_bits |= SNP_POLICY_MASK_PAGE_SWAP_DISABLE;
+ }
+
+ return policy_bits;
+}
+EXPORT_SYMBOL_GPL(sev_get_snp_policy_bits);
+
void sev_dev_destroy(struct psp_device *psp)
{
struct sev_device *sev = psp->sev_data;
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
index ac03bd0848f7..b1cd556bbbf6 100644
--- a/drivers/crypto/ccp/sev-dev.h
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -34,6 +34,8 @@ struct sev_misc_dev {
struct miscdevice misc;
};
+struct sev_tio_status;
+
struct sev_device {
struct device *dev;
struct psp_device *psp;
@@ -61,15 +63,24 @@ struct sev_device {
struct sev_user_data_snp_status snp_plat_status;
struct snp_feature_info snp_feat_info_0;
+
+ struct tsm_dev *tsmdev;
+ struct sev_tio_status *tio_status;
};
int sev_dev_init(struct psp_device *psp);
void sev_dev_destroy(struct psp_device *psp);
+int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret);
+
void sev_pci_init(void);
void sev_pci_exit(void);
struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages);
void snp_free_hv_fixed_pages(struct page *page);
+void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page);
+void sev_tsm_uninit(struct sev_device *sev);
+int sev_tio_cmd_buffer_len(int cmd);
+
#endif /* __SEV_DEV_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
index 69295a9ddf0a..4ccc94ed9493 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
@@ -1,18 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2025 Intel Corporation */
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/sprintf.h>
#include <linux/string_helpers.h>
#include "adf_pm_dbgfs_utils.h"
-/*
- * This is needed because a variable is used to index the mask at
- * pm_scnprint_table(), making it not compile time constant, so the compile
- * asserts from FIELD_GET() or u32_get_bits() won't be fulfilled.
- */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-
#define PM_INFO_MAX_KEY_LEN 21
static int pm_scnprint_table(char *buff, const struct pm_status_row *table,
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 82d229c8f9bf..ae899f68551f 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -236,7 +236,10 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
return -ENXIO;
}
- cpu_cache_invalidate_memregion(IORES_DESC_CXL);
+ if (!cxlr->params.res)
+ return -ENXIO;
+ cpu_cache_invalidate_memregion(cxlr->params.res->start,
+ resource_size(cxlr->params.res));
return 0;
}
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 8d4ddaa85ae8..eaab6af143e1 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -44,6 +44,7 @@
* but lo_hi_readq() ensures that we are safe across all e3-1200 processors.
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
@@ -139,9 +140,6 @@
#define IE31200_CAPID0_DDPCD BIT(6)
#define IE31200_CAPID0_ECC BIT(1)
-/* Non-constant mask variant of FIELD_GET() */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-
static int nr_channels;
static struct pci_dev *mci_pdev;
static int ie31200_registered = 1;
diff --git a/drivers/firmware/imx/imx-scu-irq.c b/drivers/firmware/imx/imx-scu-irq.c
index 6125cccc9ba7..a68d38f89254 100644
--- a/drivers/firmware/imx/imx-scu-irq.c
+++ b/drivers/firmware/imx/imx-scu-irq.c
@@ -203,6 +203,18 @@ int imx_scu_enable_general_irq_channel(struct device *dev)
struct mbox_chan *ch;
int ret = 0, i = 0;
+ if (!of_parse_phandle_with_args(dev->of_node, "mboxes",
+ "#mbox-cells", 0, &spec)) {
+ i = of_alias_get_id(spec.np, "mu");
+ of_node_put(spec.np);
+ }
+
+ /* use mu1 as general mu irq channel if failed */
+ if (i < 0)
+ i = 1;
+
+ mu_resource_id = IMX_SC_R_MU_0A + i;
+
ret = imx_scu_get_handle(&imx_sc_irq_ipc_handle);
if (ret)
return ret;
@@ -214,27 +226,16 @@ int imx_scu_enable_general_irq_channel(struct device *dev)
cl->dev = dev;
cl->rx_callback = imx_scu_irq_callback;
+ INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler);
+
/* SCU general IRQ uses general interrupt channel 3 */
ch = mbox_request_channel_byname(cl, "gip3");
if (IS_ERR(ch)) {
ret = PTR_ERR(ch);
dev_err(dev, "failed to request mbox chan gip3, ret %d\n", ret);
- devm_kfree(dev, cl);
- return ret;
+ goto free_cl;
}
- INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler);
-
- if (!of_parse_phandle_with_args(dev->of_node, "mboxes",
- "#mbox-cells", 0, &spec))
- i = of_alias_get_id(spec.np, "mu");
-
- /* use mu1 as general mu irq channel if failed */
- if (i < 0)
- i = 1;
-
- mu_resource_id = IMX_SC_R_MU_0A + i;
-
/* Create directory under /sysfs/firmware */
wakeup_obj = kobject_create_and_add("scu_wakeup_source", firmware_kobj);
if (!wakeup_obj) {
@@ -253,7 +254,8 @@ int imx_scu_enable_general_irq_channel(struct device *dev)
free_ch:
mbox_free_channel(ch);
+free_cl:
+ devm_kfree(dev, cl);
return ret;
}
-EXPORT_SYMBOL(imx_scu_enable_general_irq_channel);
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index 8c28e25ddc8a..67b267a7408a 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -73,9 +73,9 @@ static int imx_sc_linux_errmap[IMX_SC_ERR_LAST] = {
-EACCES, /* IMX_SC_ERR_NOACCESS */
-EACCES, /* IMX_SC_ERR_LOCKED */
-ERANGE, /* IMX_SC_ERR_UNAVAILABLE */
- -EEXIST, /* IMX_SC_ERR_NOTFOUND */
- -EPERM, /* IMX_SC_ERR_NOPOWER */
- -EPIPE, /* IMX_SC_ERR_IPC */
+ -ENOENT, /* IMX_SC_ERR_NOTFOUND */
+ -ENODEV, /* IMX_SC_ERR_NOPOWER */
+ -ECOMM, /* IMX_SC_ERR_IPC */
-EBUSY, /* IMX_SC_ERR_BUSY */
-EIO, /* IMX_SC_ERR_FAIL */
};
@@ -324,7 +324,9 @@ static int imx_scu_probe(struct platform_device *pdev)
}
sc_ipc->dev = dev;
- mutex_init(&sc_ipc->lock);
+ ret = devm_mutex_init(dev, &sc_ipc->lock);
+ if (ret)
+ return ret;
init_completion(&sc_ipc->done);
imx_sc_ipc_handle = sc_ipc;
@@ -352,6 +354,7 @@ static struct platform_driver imx_scu_driver = {
.driver = {
.name = "imx-scu",
.of_match_table = imx_scu_match,
+ .suppress_bind_attrs = true,
},
.probe = imx_scu_probe,
};
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 49fd2ae01055..e027a2bd8f26 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -398,6 +398,9 @@ static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
static inline int ti_sci_do_xfer(struct ti_sci_info *info,
struct ti_sci_xfer *xfer)
{
+ struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+ bool response_expected = !!(hdr->flags & (TI_SCI_FLAG_REQ_ACK_ON_PROCESSED |
+ TI_SCI_FLAG_REQ_ACK_ON_RECEIVED));
int ret;
int timeout;
struct device *dev = info->dev;
@@ -409,12 +412,12 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info,
ret = 0;
- if (system_state <= SYSTEM_RUNNING) {
+ if (response_expected && system_state <= SYSTEM_RUNNING) {
/* And we wait for the response. */
timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
if (!wait_for_completion_timeout(&xfer->done, timeout))
ret = -ETIMEDOUT;
- } else {
+ } else if (response_expected) {
/*
* If we are !running, we cannot use wait_for_completion_timeout
* during noirq phase, so we must manually poll the completion.
@@ -1670,6 +1673,9 @@ fail:
static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
u32 ctx_lo, u32 ctx_hi, u32 debug_flags)
{
+ u32 msg_flags = mode == TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO ?
+ TI_SCI_FLAG_REQ_GENERIC_NORESPONSE :
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED;
struct ti_sci_info *info;
struct ti_sci_msg_req_prepare_sleep *req;
struct ti_sci_msg_hdr *resp;
@@ -1686,7 +1692,7 @@ static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PREPARE_SLEEP,
- TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ msg_flags,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
@@ -1706,11 +1712,12 @@ static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
goto fail;
}
- resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
-
- if (!ti_sci_is_response_ack(resp)) {
- dev_err(dev, "Failed to prepare sleep\n");
- ret = -ENODEV;
+ if (msg_flags == TI_SCI_FLAG_REQ_ACK_ON_PROCESSED) {
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to prepare sleep\n");
+ ret = -ENODEV;
+ }
}
fail:
@@ -3664,6 +3671,78 @@ devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
+/*
+ * Iterate all device nodes that have a wakeup-source property and check if one
+ * of the possible phandles points to a Partial-IO system state. If it
+ * does resolve the device node to an actual device and check if wakeup is
+ * enabled.
+ */
+static bool ti_sci_partial_io_wakeup_enabled(struct ti_sci_info *info)
+{
+ struct device_node *wakeup_node = NULL;
+
+ for_each_node_with_property(wakeup_node, "wakeup-source") {
+ struct of_phandle_iterator it;
+ int err;
+
+ of_for_each_phandle(&it, err, wakeup_node, "wakeup-source", NULL, 0) {
+ struct platform_device *pdev;
+ bool may_wakeup;
+
+ /*
+ * Continue if idle-state-name is not off-wake. Return
+ * value is the index of the string which should be 0 if
+ * off-wake is present.
+ */
+ if (of_property_match_string(it.node, "idle-state-name", "off-wake"))
+ continue;
+
+ pdev = of_find_device_by_node(wakeup_node);
+ if (!pdev)
+ continue;
+
+ may_wakeup = device_may_wakeup(&pdev->dev);
+ put_device(&pdev->dev);
+
+ if (may_wakeup) {
+ dev_dbg(info->dev, "%pOF identified as wakeup source for Partial-IO\n",
+ wakeup_node);
+ of_node_put(it.node);
+ of_node_put(wakeup_node);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int ti_sci_sys_off_handler(struct sys_off_data *data)
+{
+ struct ti_sci_info *info = data->cb_data;
+ const struct ti_sci_handle *handle = &info->handle;
+ bool enter_partial_io = ti_sci_partial_io_wakeup_enabled(info);
+ int ret;
+
+ if (!enter_partial_io)
+ return NOTIFY_DONE;
+
+ dev_info(info->dev, "Entering Partial-IO because a powered wakeup-enabled device was found.\n");
+
+ ret = ti_sci_cmd_prepare_sleep(handle, TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO, 0, 0, 0);
+ if (ret) {
+ dev_err(info->dev,
+ "Failed to enter Partial-IO %pe, trying to do an emergency restart\n",
+ ERR_PTR(ret));
+ emergency_restart();
+ }
+
+ mdelay(5000);
+ emergency_restart();
+
+ return NOTIFY_DONE;
+}
+
static int tisci_reboot_handler(struct sys_off_data *data)
{
struct ti_sci_info *info = data->cb_data;
@@ -3706,7 +3785,7 @@ static int ti_sci_prepare_system_suspend(struct ti_sci_info *info)
}
}
-static int __maybe_unused ti_sci_suspend(struct device *dev)
+static int ti_sci_suspend(struct device *dev)
{
struct ti_sci_info *info = dev_get_drvdata(dev);
struct device *cpu_dev, *cpu_dev_max = NULL;
@@ -3746,19 +3825,21 @@ static int __maybe_unused ti_sci_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused ti_sci_suspend_noirq(struct device *dev)
+static int ti_sci_suspend_noirq(struct device *dev)
{
struct ti_sci_info *info = dev_get_drvdata(dev);
int ret = 0;
- ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE);
- if (ret)
- return ret;
+ if (info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION) {
+ ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE);
+ if (ret)
+ return ret;
+ }
return 0;
}
-static int __maybe_unused ti_sci_resume_noirq(struct device *dev)
+static int ti_sci_resume_noirq(struct device *dev)
{
struct ti_sci_info *info = dev_get_drvdata(dev);
int ret = 0;
@@ -3767,9 +3848,11 @@ static int __maybe_unused ti_sci_resume_noirq(struct device *dev)
u8 pin;
u8 mode;
- ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE);
- if (ret)
- return ret;
+ if (info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION) {
+ ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE);
+ if (ret)
+ return ret;
+ }
ret = ti_sci_msg_cmd_lpm_wake_reason(&info->handle, &source, &time, &pin, &mode);
/* Do not fail to resume on error as the wake reason is not critical */
@@ -3780,7 +3863,7 @@ static int __maybe_unused ti_sci_resume_noirq(struct device *dev)
return 0;
}
-static void __maybe_unused ti_sci_pm_complete(struct device *dev)
+static void ti_sci_pm_complete(struct device *dev)
{
struct ti_sci_info *info = dev_get_drvdata(dev);
@@ -3791,12 +3874,10 @@ static void __maybe_unused ti_sci_pm_complete(struct device *dev)
}
static const struct dev_pm_ops ti_sci_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
- .suspend = ti_sci_suspend,
- .suspend_noirq = ti_sci_suspend_noirq,
- .resume_noirq = ti_sci_resume_noirq,
- .complete = ti_sci_pm_complete,
-#endif
+ .suspend = pm_sleep_ptr(ti_sci_suspend),
+ .suspend_noirq = pm_sleep_ptr(ti_sci_suspend_noirq),
+ .resume_noirq = pm_sleep_ptr(ti_sci_resume_noirq),
+ .complete = pm_sleep_ptr(ti_sci_pm_complete),
};
/* Description for K2G */
@@ -3928,11 +4009,12 @@ static int ti_sci_probe(struct platform_device *pdev)
}
ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps);
- dev_dbg(dev, "Detected firmware capabilities: %s%s%s%s\n",
+ dev_dbg(dev, "Detected firmware capabilities: %s%s%s%s%s\n",
info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "",
info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "",
info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : "",
- info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT ? " LPM-Abort" : ""
+ info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT ? " LPM-Abort" : "",
+ info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION ? " IO-Isolation" : ""
);
ti_sci_setup_ops(info);
@@ -3943,6 +4025,19 @@ static int ti_sci_probe(struct platform_device *pdev)
goto out;
}
+ if (info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO) {
+ ret = devm_register_sys_off_handler(dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_FIRMWARE,
+ ti_sci_sys_off_handler,
+ info);
+ if (ret) {
+ dev_err(dev, "Failed to register sys_off_handler %pe\n",
+ ERR_PTR(ret));
+ goto out;
+ }
+ }
+
dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
info->handle.version.abi_major, info->handle.version.abi_minor,
info->handle.version.firmware_revision,
@@ -3952,7 +4047,13 @@ static int ti_sci_probe(struct platform_device *pdev)
list_add_tail(&info->node, &ti_sci_list);
mutex_unlock(&ti_sci_list_mutex);
- return of_platform_populate(dev->of_node, NULL, NULL, dev);
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "platform_populate failed %pe\n", ERR_PTR(ret));
+ goto out;
+ }
+ return 0;
+
out:
if (!IS_ERR(info->chan_tx))
mbox_free_channel(info->chan_tx);
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index 701c416b2e78..91f234550c43 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -149,6 +149,7 @@ struct ti_sci_msg_req_reboot {
* MSG_FLAG_CAPS_LPM_PARTIAL_IO: Partial IO in LPM
* MSG_FLAG_CAPS_LPM_DM_MANAGED: LPM can be managed by DM
* MSG_FLAG_CAPS_LPM_ABORT: Abort entry to LPM
+ * MSG_FLAG_CAPS_IO_ISOLATION: IO Isolation support
*
* Response to a generic message with message type TI_SCI_MSG_QUERY_FW_CAPS
* providing currently available SOC/firmware capabilities. SoC that don't
@@ -160,6 +161,7 @@ struct ti_sci_msg_resp_query_fw_caps {
#define MSG_FLAG_CAPS_LPM_PARTIAL_IO TI_SCI_MSG_FLAG(4)
#define MSG_FLAG_CAPS_LPM_DM_MANAGED TI_SCI_MSG_FLAG(5)
#define MSG_FLAG_CAPS_LPM_ABORT TI_SCI_MSG_FLAG(9)
+#define MSG_FLAG_CAPS_IO_ISOLATION TI_SCI_MSG_FLAG(7)
#define MSG_MASK_CAPS_LPM GENMASK_ULL(4, 1)
u64 fw_caps;
} __packed;
@@ -595,6 +597,11 @@ struct ti_sci_msg_resp_get_clock_freq {
struct ti_sci_msg_req_prepare_sleep {
struct ti_sci_msg_hdr hdr;
+/*
+ * When sending prepare_sleep with MODE_PARTIAL_IO no response will be sent,
+ * no further steps are required.
+ */
+#define TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO 0x03
#define TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED 0xfd
u8 mode;
u32 ctx_lo;
diff --git a/drivers/firmware/xilinx/Makefile b/drivers/firmware/xilinx/Makefile
index 875a53703c82..70f8f02f14a3 100644
--- a/drivers/firmware/xilinx/Makefile
+++ b/drivers/firmware/xilinx/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for Xilinx firmwares
-obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o
+obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o zynqmp-ufs.o
obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
index 22853ae0efdf..36efb827f3da 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.c
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -3,6 +3,7 @@
* Xilinx Zynq MPSoC Firmware layer for debugfs APIs
*
* Copyright (C) 2014-2018 Xilinx, Inc.
+ * Copyright (C) 2022 - 2025 Advanced Micro Devices, Inc.
*
* Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -38,6 +39,7 @@ static struct pm_api_info pm_api_list[] = {
PM_API(PM_RELEASE_NODE),
PM_API(PM_SET_REQUIREMENT),
PM_API(PM_GET_API_VERSION),
+ PM_API(PM_GET_NODE_STATUS),
PM_API(PM_REGISTER_NOTIFIER),
PM_API(PM_RESET_ASSERT),
PM_API(PM_RESET_GET_STATUS),
@@ -167,6 +169,17 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
pm_api_arg[3] ? pm_api_arg[3] :
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
break;
+ case PM_GET_NODE_STATUS:
+ ret = zynqmp_pm_get_node_status(pm_api_arg[0],
+ &pm_api_ret[0],
+ &pm_api_ret[1],
+ &pm_api_ret[2]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "GET_NODE_STATUS:\n\tNodeId: %llu\n\tStatus: %u\n\tRequirements: %u\n\tUsage: %u\n",
+ pm_api_arg[0], pm_api_ret[0],
+ pm_api_ret[1], pm_api_ret[2]);
+ break;
case PM_REGISTER_NOTIFIER:
ret = zynqmp_pm_register_notifier(pm_api_arg[0],
pm_api_arg[1] ?
diff --git a/drivers/firmware/xilinx/zynqmp-ufs.c b/drivers/firmware/xilinx/zynqmp-ufs.c
new file mode 100644
index 000000000000..85da8a822f3a
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp-ufs.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Firmware Layer for UFS APIs
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/module.h>
+
+/* Register Node IDs */
+#define PM_REGNODE_PMC_IOU_SLCR 0x30000002 /* PMC IOU SLCR */
+#define PM_REGNODE_EFUSE_CACHE 0x30000003 /* EFUSE Cache */
+
+/* Register Offsets for PMC IOU SLCR */
+#define SRAM_CSR_OFFSET 0x104C /* SRAM Control and Status */
+#define TXRX_CFGRDY_OFFSET 0x1054 /* M-PHY TX-RX Config ready */
+
+/* Masks for SRAM Control and Status Register */
+#define SRAM_CSR_INIT_DONE_MASK BIT(0) /* SRAM initialization done */
+#define SRAM_CSR_EXT_LD_DONE_MASK BIT(1) /* SRAM External load done */
+#define SRAM_CSR_BYPASS_MASK BIT(2) /* Bypass SRAM interface */
+
+/* Mask to check M-PHY TX-RX configuration readiness */
+#define TX_RX_CFG_RDY_MASK GENMASK(3, 0)
+
+/* Register Offsets for EFUSE Cache */
+#define UFS_CAL_1_OFFSET 0xBE8 /* UFS Calibration Value */
+
+/**
+ * zynqmp_pm_is_mphy_tx_rx_config_ready - check M-PHY TX-RX config readiness
+ * @is_ready: Store output status (true/false)
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready)
+{
+ u32 regval;
+ int ret;
+
+ if (!is_ready)
+ return -EINVAL;
+
+ ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, TXRX_CFGRDY_OFFSET, &regval);
+ if (ret)
+ return ret;
+
+ regval &= TX_RX_CFG_RDY_MASK;
+ if (regval)
+ *is_ready = true;
+ else
+ *is_ready = false;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_is_mphy_tx_rx_config_ready);
+
+/**
+ * zynqmp_pm_is_sram_init_done - check SRAM initialization
+ * @is_done: Store output status (true/false)
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_is_sram_init_done(bool *is_done)
+{
+ u32 regval;
+ int ret;
+
+ if (!is_done)
+ return -EINVAL;
+
+ ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, &regval);
+ if (ret)
+ return ret;
+
+ regval &= SRAM_CSR_INIT_DONE_MASK;
+ if (regval)
+ *is_done = true;
+ else
+ *is_done = false;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_is_sram_init_done);
+
+/**
+ * zynqmp_pm_set_sram_bypass - Set SRAM bypass Control
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_set_sram_bypass(void)
+{
+ u32 sram_csr;
+ int ret;
+
+ ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, &sram_csr);
+ if (ret)
+ return ret;
+
+ sram_csr &= ~SRAM_CSR_EXT_LD_DONE_MASK;
+ sram_csr |= SRAM_CSR_BYPASS_MASK;
+
+ return zynqmp_pm_sec_mask_write_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET,
+ GENMASK(2, 1), sram_csr);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_sram_bypass);
+
+/**
+ * zynqmp_pm_get_ufs_calibration_values - Read UFS calibration values
+ * @val: Store the calibration value
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_get_ufs_calibration_values(u32 *val)
+{
+ return zynqmp_pm_sec_read_reg(PM_REGNODE_EFUSE_CACHE, UFS_CAL_1_OFFSET, val);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_ufs_calibration_values);
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 02da3e48bc8f..ad811f40e059 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -3,7 +3,7 @@
* Xilinx Zynq MPSoC Firmware layer
*
* Copyright (C) 2014-2022 Xilinx, Inc.
- * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc.
+ * Copyright (C) 2022 - 2025 Advanced Micro Devices, Inc.
*
* Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -72,6 +72,15 @@ struct pm_api_feature_data {
struct hlist_node hentry;
};
+struct platform_fw_data {
+ /*
+ * Family code for platform.
+ */
+ const u32 family_code;
+};
+
+static struct platform_fw_data *active_platform_fw_data;
+
static const struct mfd_cell firmware_devs[] = {
{
.name = "zynqmp_power_controller",
@@ -464,8 +473,6 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...)
static u32 pm_api_version;
static u32 pm_tz_version;
-static u32 pm_family_code;
-static u32 pm_sub_family_code;
int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset)
{
@@ -532,32 +539,18 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_chipid);
/**
* zynqmp_pm_get_family_info() - Get family info of platform
* @family: Returned family code value
- * @subfamily: Returned sub-family code value
*
* Return: Returns status, either success or error+reason
*/
-int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
+int zynqmp_pm_get_family_info(u32 *family)
{
- u32 ret_payload[PAYLOAD_ARG_CNT];
- u32 idcode;
- int ret;
+ if (!active_platform_fw_data)
+ return -ENODEV;
- /* Check is family or sub-family code already received */
- if (pm_family_code && pm_sub_family_code) {
- *family = pm_family_code;
- *subfamily = pm_sub_family_code;
- return 0;
- }
-
- ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, ret_payload, 0);
- if (ret < 0)
- return ret;
+ if (!family)
+ return -EINVAL;
- idcode = ret_payload[1];
- pm_family_code = FIELD_GET(FAMILY_CODE_MASK, idcode);
- pm_sub_family_code = FIELD_GET(SUB_FAMILY_CODE_MASK, idcode);
- *family = pm_family_code;
- *subfamily = pm_sub_family_code;
+ *family = active_platform_fw_data->family_code;
return 0;
}
@@ -1238,8 +1231,13 @@ int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
u32 value)
{
int ret;
+ u32 pm_family_code;
+
+ ret = zynqmp_pm_get_family_info(&pm_family_code);
+ if (ret)
+ return ret;
- if (pm_family_code == ZYNQMP_FAMILY_CODE &&
+ if (pm_family_code == PM_ZYNQMP_FAMILY_CODE &&
param == PM_PINCTRL_CONFIG_TRI_STATE) {
ret = zynqmp_pm_feature(PM_PINCTRL_CONFIG_PARAM_SET);
if (ret < PM_PINCTRL_PARAM_SET_VERSION) {
@@ -1414,6 +1412,45 @@ int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode)
EXPORT_SYMBOL_GPL(zynqmp_pm_set_tcm_config);
/**
+ * zynqmp_pm_get_node_status - PM call to request a node's current power state
+ * @node: ID of the component or sub-system in question
+ * @status: Current operating state of the requested node
+ * @requirements: Current requirements asserted on the node,
+ * used for slave nodes only.
+ * @usage: Usage information, used for slave nodes only:
+ * PM_USAGE_NO_MASTER - No master is currently using
+ * the node
+ * PM_USAGE_CURRENT_MASTER - Only requesting master is
+ * currently using the node
+ * PM_USAGE_OTHER_MASTER - Only other masters are
+ * currently using the node
+ * PM_USAGE_BOTH_MASTERS - Both the current and at least
+ * one other master is currently
+ * using the node
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_get_node_status(const u32 node, u32 *const status,
+ u32 *const requirements, u32 *const usage)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!status || !requirements || !usage)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_GET_NODE_STATUS, ret_payload, 1, node);
+ if (ret_payload[0] == XST_PM_SUCCESS) {
+ *status = ret_payload[1];
+ *requirements = ret_payload[2];
+ *usage = ret_payload[3];
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_node_status);
+
+/**
* zynqmp_pm_force_pwrdwn - PM call to request for another PU or subsystem to
* be powered down forcefully
* @node: Node ID of the targeted PU or subsystem
@@ -1617,6 +1654,52 @@ int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
}
/**
+ * zynqmp_pm_sec_read_reg - PM call to securely read from given offset
+ * of the node
+ * @node_id: Node Id of the device
+ * @offset: Offset to be used (20-bit)
+ * @ret_value: Output data read from the given offset after
+ * firmware access policy is successfully enforced
+ *
+ * Return: Returns 0 on success or error value on failure
+ */
+int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ u32 count = 1;
+ int ret;
+
+ if (!ret_value)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_IOCTL, ret_payload, 4, node_id, IOCTL_READ_REG,
+ offset, count);
+
+ *ret_value = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_sec_read_reg);
+
+/**
+ * zynqmp_pm_sec_mask_write_reg - PM call to securely write to given offset
+ * of the node
+ * @node_id: Node Id of the device
+ * @offset: Offset to be used (20-bit)
+ * @mask: Mask to be used
+ * @value: Value to be written
+ *
+ * Return: Returns 0 on success or error value on failure
+ */
+int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset, u32 mask,
+ u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 5, node_id, IOCTL_MASK_WRITE_REG,
+ offset, mask, value);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_sec_mask_write_reg);
+
+/**
* zynqmp_pm_set_sd_config - PM call to set value of SD config registers
* @node: SD node ID
* @config: The config type of SD registers
@@ -2007,12 +2090,18 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct zynqmp_devinfo *devinfo;
+ u32 pm_family_code;
int ret;
ret = get_set_conduit_method(dev->of_node);
if (ret)
return ret;
+ /* Get platform-specific firmware data from device tree match */
+ active_platform_fw_data = (struct platform_fw_data *)device_get_match_data(dev);
+ if (!active_platform_fw_data)
+ return -EINVAL;
+
/* Get SiP SVC version number */
ret = zynqmp_pm_get_sip_svc_version(&sip_svc_version);
if (ret)
@@ -2045,8 +2134,8 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
pr_info("%s Platform Management API v%d.%d\n", __func__,
pm_api_version >> 16, pm_api_version & 0xFFFF);
- /* Get the Family code and sub family code of platform */
- ret = zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code);
+ /* Get the Family code of platform */
+ ret = zynqmp_pm_get_family_info(&pm_family_code);
if (ret < 0)
return ret;
@@ -2073,7 +2162,7 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
zynqmp_pm_api_debugfs_init();
- if (pm_family_code == VERSAL_FAMILY_CODE) {
+ if (pm_family_code != PM_ZYNQMP_FAMILY_CODE) {
em_dev = platform_device_register_data(&pdev->dev, "xlnx_event_manager",
-1, NULL, 0);
if (IS_ERR(em_dev))
@@ -2113,9 +2202,22 @@ static void zynqmp_firmware_sync_state(struct device *dev)
dev_warn(dev, "failed to release power management to firmware\n");
}
+static const struct platform_fw_data platform_fw_data_versal = {
+ .family_code = PM_VERSAL_FAMILY_CODE,
+};
+
+static const struct platform_fw_data platform_fw_data_versal_net = {
+ .family_code = PM_VERSAL_NET_FAMILY_CODE,
+};
+
+static const struct platform_fw_data platform_fw_data_zynqmp = {
+ .family_code = PM_ZYNQMP_FAMILY_CODE,
+};
+
static const struct of_device_id zynqmp_firmware_of_match[] = {
- {.compatible = "xlnx,zynqmp-firmware"},
- {.compatible = "xlnx,versal-firmware"},
+ {.compatible = "xlnx,zynqmp-firmware", .data = &platform_fw_data_zynqmp},
+ {.compatible = "xlnx,versal-firmware", .data = &platform_fw_data_versal},
+ {.compatible = "xlnx,versal-net-firmware", .data = &platform_fw_data_versal_net},
{},
};
MODULE_DEVICE_TABLE(of, zynqmp_firmware_of_match);
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 2e0ae953dd99..cbdf781994dc 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -5,6 +5,7 @@
* Joel Stanley <joel@jms.id.au>
*/
+#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/gpio/aspeed.h>
@@ -30,10 +31,6 @@
*/
#include <linux/gpio/consumer.h>
-/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
#define GPIO_G7_IRQ_STS_BASE 0x100
#define GPIO_G7_IRQ_STS_OFFSET(x) (GPIO_G7_IRQ_STS_BASE + (x) * 0x4)
#define GPIO_G7_CTRL_REG_BASE 0x180
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 52060b3ec745..d7666fe9dbf8 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -667,7 +667,7 @@ static const struct dev_pm_ops mxc_gpio_dev_pm_ops = {
RUNTIME_PM_OPS(mxc_gpio_runtime_suspend, mxc_gpio_runtime_resume, NULL)
};
-static int mxc_gpio_syscore_suspend(void)
+static int mxc_gpio_syscore_suspend(void *data)
{
struct mxc_gpio_port *port;
int ret;
@@ -684,7 +684,7 @@ static int mxc_gpio_syscore_suspend(void)
return 0;
}
-static void mxc_gpio_syscore_resume(void)
+static void mxc_gpio_syscore_resume(void *data)
{
struct mxc_gpio_port *port;
int ret;
@@ -701,11 +701,15 @@ static void mxc_gpio_syscore_resume(void)
}
}
-static struct syscore_ops mxc_gpio_syscore_ops = {
+static const struct syscore_ops mxc_gpio_syscore_ops = {
.suspend = mxc_gpio_syscore_suspend,
.resume = mxc_gpio_syscore_resume,
};
+static struct syscore mxc_gpio_syscore = {
+ .ops = &mxc_gpio_syscore_ops,
+};
+
static struct platform_driver mxc_gpio_driver = {
.driver = {
.name = "gpio-mxc",
@@ -718,7 +722,7 @@ static struct platform_driver mxc_gpio_driver = {
static int __init gpio_mxc_init(void)
{
- register_syscore_ops(&mxc_gpio_syscore_ops);
+ register_syscore(&mxc_gpio_syscore);
return platform_driver_register(&mxc_gpio_driver);
}
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index fa22f3faa163..664cf1eef494 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -747,7 +747,7 @@ static int __init pxa_gpio_dt_init(void)
device_initcall(pxa_gpio_dt_init);
#ifdef CONFIG_PM
-static int pxa_gpio_suspend(void)
+static int pxa_gpio_suspend(void *data)
{
struct pxa_gpio_chip *pchip = pxa_gpio_chip;
struct pxa_gpio_bank *c;
@@ -768,7 +768,7 @@ static int pxa_gpio_suspend(void)
return 0;
}
-static void pxa_gpio_resume(void)
+static void pxa_gpio_resume(void *data)
{
struct pxa_gpio_chip *pchip = pxa_gpio_chip;
struct pxa_gpio_bank *c;
@@ -792,14 +792,18 @@ static void pxa_gpio_resume(void)
#define pxa_gpio_resume NULL
#endif
-static struct syscore_ops pxa_gpio_syscore_ops = {
+static const struct syscore_ops pxa_gpio_syscore_ops = {
.suspend = pxa_gpio_suspend,
.resume = pxa_gpio_resume,
};
+static struct syscore pxa_gpio_syscore = {
+ .ops = &pxa_gpio_syscore_ops,
+};
+
static int __init pxa_gpio_sysinit(void)
{
- register_syscore_ops(&pxa_gpio_syscore_ops);
+ register_syscore(&pxa_gpio_syscore);
return 0;
}
postcore_initcall(pxa_gpio_sysinit);
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 7f6a62f5d1ee..1938ffa2f4f3 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -256,7 +256,7 @@ static void sa1100_gpio_handler(struct irq_desc *desc)
} while (mask);
}
-static int sa1100_gpio_suspend(void)
+static int sa1100_gpio_suspend(void *data)
{
struct sa1100_gpio_chip *sgc = &sa1100_gpio_chip;
@@ -275,19 +275,23 @@ static int sa1100_gpio_suspend(void)
return 0;
}
-static void sa1100_gpio_resume(void)
+static void sa1100_gpio_resume(void *data)
{
sa1100_update_edge_regs(&sa1100_gpio_chip);
}
-static struct syscore_ops sa1100_gpio_syscore_ops = {
+static const struct syscore_ops sa1100_gpio_syscore_ops = {
.suspend = sa1100_gpio_suspend,
.resume = sa1100_gpio_resume,
};
+static struct syscore sa1100_gpio_syscore = {
+ .ops = &sa1100_gpio_syscore_ops,
+};
+
static int __init sa1100_gpio_init_devicefs(void)
{
- register_syscore_ops(&sa1100_gpio_syscore_ops);
+ register_syscore(&sa1100_gpio_syscore);
return 0;
}
diff --git a/drivers/gpu/drm/nova/driver.rs b/drivers/gpu/drm/nova/driver.rs
index 91b7380f83ab..2246d8e104e0 100644
--- a/drivers/gpu/drm/nova/driver.rs
+++ b/drivers/gpu/drm/nova/driver.rs
@@ -45,13 +45,13 @@ impl auxiliary::Driver for NovaDriver {
type IdInfo = ();
const ID_TABLE: auxiliary::IdTable<Self::IdInfo> = &AUX_TABLE;
- fn probe(adev: &auxiliary::Device<Core>, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
+ fn probe(adev: &auxiliary::Device<Core>, _info: &Self::IdInfo) -> impl PinInit<Self, Error> {
let data = try_pin_init!(NovaData { adev: adev.into() });
let drm = drm::Device::<Self>::new(adev.as_ref(), data)?;
drm::Registration::new_foreign_owned(&drm, adev.as_ref(), 0)?;
- Ok(KBox::new(Self { drm }, GFP_KERNEL)?.into())
+ Ok(Self { drm })
}
}
diff --git a/drivers/gpu/drm/nova/file.rs b/drivers/gpu/drm/nova/file.rs
index 90b9d2d0ec4a..a3b7bd36792c 100644
--- a/drivers/gpu/drm/nova/file.rs
+++ b/drivers/gpu/drm/nova/file.rs
@@ -28,7 +28,7 @@ impl File {
_file: &drm::File<File>,
) -> Result<u32> {
let adev = &dev.adev;
- let parent = adev.parent().ok_or(ENOENT)?;
+ let parent = adev.parent();
let pdev: &pci::Device = parent.try_into()?;
let value = match getparam.param as u32 {
diff --git a/drivers/gpu/drm/tyr/driver.rs b/drivers/gpu/drm/tyr/driver.rs
index d5625dd1e41c..0389c558c036 100644
--- a/drivers/gpu/drm/tyr/driver.rs
+++ b/drivers/gpu/drm/tyr/driver.rs
@@ -103,7 +103,7 @@ impl platform::Driver for TyrDriver {
fn probe(
pdev: &platform::Device<Core>,
_info: Option<&Self::IdInfo>,
- ) -> Result<Pin<KBox<Self>>> {
+ ) -> impl PinInit<Self, Error> {
let core_clk = Clk::get(pdev.as_ref(), Some(c_str!("core")))?;
let stacks_clk = OptionalClk::get(pdev.as_ref(), Some(c_str!("stacks")))?;
let coregroup_clk = OptionalClk::get(pdev.as_ref(), Some(c_str!("coregroup")))?;
@@ -143,7 +143,7 @@ impl platform::Driver for TyrDriver {
let tdev: ARef<TyrDevice> = drm::Device::new(pdev.as_ref(), data)?;
drm::driver::Registration::new_foreign_owned(&tdev, pdev.as_ref(), 0)?;
- let driver = KBox::pin_init(try_pin_init!(TyrDriver { device: tdev }), GFP_KERNEL)?;
+ let driver = TyrDriver { device: tdev };
// We need this to be dev_info!() because dev_dbg!() does not work at
// all in Rust for now, and we need to see whether probe succeeded.
diff --git a/drivers/gpu/nova-core/driver.rs b/drivers/gpu/nova-core/driver.rs
index d91bbc50cde7..b8b0cc0f2d93 100644
--- a/drivers/gpu/nova-core/driver.rs
+++ b/drivers/gpu/nova-core/driver.rs
@@ -4,6 +4,7 @@ use kernel::{
auxiliary,
c_str,
device::Core,
+ devres::Devres,
dma::Device,
dma::DmaMask,
pci,
@@ -23,7 +24,8 @@ use crate::gpu::Gpu;
pub(crate) struct NovaCore {
#[pin]
pub(crate) gpu: Gpu,
- _reg: auxiliary::Registration,
+ #[pin]
+ _reg: Devres<auxiliary::Registration>,
}
const BAR0_SIZE: usize = SZ_16M;
@@ -67,41 +69,33 @@ impl pci::Driver for NovaCore {
type IdInfo = ();
const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE;
- fn probe(pdev: &pci::Device<Core>, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
- dev_dbg!(pdev.as_ref(), "Probe Nova Core GPU driver.\n");
-
- pdev.enable_device_mem()?;
- pdev.set_master();
+ fn probe(pdev: &pci::Device<Core>, _info: &Self::IdInfo) -> impl PinInit<Self, Error> {
+ pin_init::pin_init_scope(move || {
+ dev_dbg!(pdev.as_ref(), "Probe Nova Core GPU driver.\n");
- // SAFETY: No concurrent DMA allocations or mappings can be made because
- // the device is still being probed and therefore isn't being used by
- // other threads of execution.
- unsafe { pdev.dma_set_mask_and_coherent(DmaMask::new::<GPU_DMA_BITS>())? };
+ pdev.enable_device_mem()?;
+ pdev.set_master();
- let devres_bar = Arc::pin_init(
- pdev.iomap_region_sized::<BAR0_SIZE>(0, c_str!("nova-core/bar0")),
- GFP_KERNEL,
- )?;
+ // SAFETY: No concurrent DMA allocations or mappings can be made because
+ // the device is still being probed and therefore isn't being used by
+ // other threads of execution.
+ unsafe { pdev.dma_set_mask_and_coherent(DmaMask::new::<GPU_DMA_BITS>())? };
- // Used to provided a `&Bar0` to `Gpu::new` without tying it to the lifetime of
- // `devres_bar`.
- let bar_clone = Arc::clone(&devres_bar);
- let bar = bar_clone.access(pdev.as_ref())?;
+ let bar = Arc::pin_init(
+ pdev.iomap_region_sized::<BAR0_SIZE>(0, c_str!("nova-core/bar0")),
+ GFP_KERNEL,
+ )?;
- let this = KBox::pin_init(
- try_pin_init!(Self {
- gpu <- Gpu::new(pdev, devres_bar, bar),
- _reg: auxiliary::Registration::new(
+ Ok(try_pin_init!(Self {
+ gpu <- Gpu::new(pdev, bar.clone(), bar.access(pdev.as_ref())?),
+ _reg <- auxiliary::Registration::new(
pdev.as_ref(),
c_str!("nova-drm"),
0, // TODO[XARR]: Once it lands, use XArray; for now we don't use the ID.
crate::MODULE_NAME
- )?,
- }),
- GFP_KERNEL,
- )?;
-
- Ok(this)
+ ),
+ }))
+ })
}
fn unbind(pdev: &pci::Device<Core>, this: Pin<&Self>) {
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 69591dc7bad2..67734dc73e16 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2801,7 +2801,7 @@ static void hv_crash_handler(struct pt_regs *regs)
hv_synic_disable_regs(cpu);
};
-static int hv_synic_suspend(void)
+static int hv_synic_suspend(void *data)
{
/*
* When we reach here, all the non-boot CPUs have been offlined.
@@ -2828,7 +2828,7 @@ static int hv_synic_suspend(void)
return 0;
}
-static void hv_synic_resume(void)
+static void hv_synic_resume(void *data)
{
hv_synic_enable_regs(0);
@@ -2840,11 +2840,15 @@ static void hv_synic_resume(void)
}
/* The callbacks run only on CPU0, with irqs_disabled. */
-static struct syscore_ops hv_synic_syscore_ops = {
+static const struct syscore_ops hv_synic_syscore_ops = {
.suspend = hv_synic_suspend,
.resume = hv_synic_resume,
};
+static struct syscore hv_synic_syscore = {
+ .ops = &hv_synic_syscore_ops,
+};
+
static int __init hv_acpi_init(void)
{
int ret;
@@ -2887,7 +2891,7 @@ static int __init hv_acpi_init(void)
hv_setup_kexec_handler(hv_kexec_handler);
hv_setup_crash_handler(hv_crash_handler);
- register_syscore_ops(&hv_synic_syscore_ops);
+ register_syscore(&hv_synic_syscore);
return 0;
@@ -2901,7 +2905,7 @@ static void __exit vmbus_exit(void)
{
int cpu;
- unregister_syscore_ops(&hv_synic_syscore_ops);
+ unregister_syscore(&hv_synic_syscore);
hv_remove_kexec_handler();
hv_remove_crash_handler();
diff --git a/drivers/iio/dac/ad3530r.c b/drivers/iio/dac/ad3530r.c
index 6134613777b8..b97b46090d80 100644
--- a/drivers/iio/dac/ad3530r.c
+++ b/drivers/iio/dac/ad3530r.c
@@ -53,9 +53,6 @@
#define AD3530R_MAX_CHANNELS 8
#define AD3531R_MAX_CHANNELS 4
-/* Non-constant mask variant of FIELD_PREP() */
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
enum ad3530r_mode {
AD3530R_NORMAL_OP,
AD3530R_POWERDOWN_1K,
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index 8a44a00bfd5e..1ad21b73e1b4 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -22,6 +22,7 @@
* the "wakeup" GPIO is not given, power management will be disabled.
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
@@ -68,10 +69,6 @@
#define MLX90614_CONST_SCALE 20 /* Scale in milliKelvin (0.02 * 1000) */
#define MLX90614_CONST_FIR 0x7 /* Fixed value for FIR part of low pass filter */
-/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
struct mlx_chip_info {
/* EEPROM offsets with 16-bit data, MSB first */
/* emissivity correction coefficient */
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 78b1c44bd6b5..320733e7d8b4 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -107,6 +107,7 @@
/* Extended Feature 2 Bits */
+#define FEATURE_SEVSNPIO_SUP BIT_ULL(1)
#define FEATURE_SNPAVICSUP GENMASK_ULL(7, 5)
#define FEATURE_SNPAVICSUP_GAM(x) \
(FIELD_GET(FEATURE_SNPAVICSUP, x) == 0x1)
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 4f4d4955269e..4b2953418977 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -2261,6 +2261,9 @@ static void print_iommu_info(void)
if (check_feature(FEATURE_SNP))
pr_cont(" SNP");
+ if (check_feature2(FEATURE_SEVSNPIO_SUP))
+ pr_cont(" SEV-TIO");
+
pr_cont("\n");
}
@@ -3033,7 +3036,7 @@ static void disable_iommus(void)
* disable suspend until real resume implemented
*/
-static void amd_iommu_resume(void)
+static void amd_iommu_resume(void *data)
{
struct amd_iommu *iommu;
@@ -3047,7 +3050,7 @@ static void amd_iommu_resume(void)
amd_iommu_enable_interrupts();
}
-static int amd_iommu_suspend(void)
+static int amd_iommu_suspend(void *data)
{
/* disable IOMMUs to go out of the way for BIOS */
disable_iommus();
@@ -3055,11 +3058,15 @@ static int amd_iommu_suspend(void)
return 0;
}
-static struct syscore_ops amd_iommu_syscore_ops = {
+static const struct syscore_ops amd_iommu_syscore_ops = {
.suspend = amd_iommu_suspend,
.resume = amd_iommu_resume,
};
+static struct syscore amd_iommu_syscore = {
+ .ops = &amd_iommu_syscore_ops,
+};
+
static void __init free_iommu_resources(void)
{
free_iommu_all();
@@ -3404,7 +3411,7 @@ static int __init state_next(void)
init_state = IOMMU_ENABLED;
break;
case IOMMU_ENABLED:
- register_syscore_ops(&amd_iommu_syscore_ops);
+ register_syscore(&amd_iommu_syscore);
iommu_snp_enable();
ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
@@ -3507,12 +3514,12 @@ int __init amd_iommu_enable(void)
void amd_iommu_disable(void)
{
- amd_iommu_suspend();
+ amd_iommu_suspend(NULL);
}
int amd_iommu_reenable(int mode)
{
- amd_iommu_resume();
+ amd_iommu_resume(NULL);
return 0;
}
@@ -4024,4 +4031,10 @@ int amd_iommu_snp_disable(void)
return 0;
}
EXPORT_SYMBOL_GPL(amd_iommu_snp_disable);
+
+bool amd_iommu_sev_tio_supported(void)
+{
+ return check_feature2(FEATURE_SEVSNPIO_SUP);
+}
+EXPORT_SYMBOL_GPL(amd_iommu_sev_tio_supported);
#endif
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 4e888867e85c..134302fbcd92 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -1825,7 +1825,7 @@ static void iommu_flush_all(void)
}
}
-static int iommu_suspend(void)
+static int iommu_suspend(void *data)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu = NULL;
@@ -1852,7 +1852,7 @@ static int iommu_suspend(void)
return 0;
}
-static void iommu_resume(void)
+static void iommu_resume(void *data)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu = NULL;
@@ -1883,14 +1883,18 @@ static void iommu_resume(void)
}
}
-static struct syscore_ops iommu_syscore_ops = {
+static const struct syscore_ops iommu_syscore_ops = {
.resume = iommu_resume,
.suspend = iommu_suspend,
};
+static struct syscore iommu_syscore = {
+ .ops = &iommu_syscore_ops,
+};
+
static void __init init_iommu_pm_ops(void)
{
- register_syscore_ops(&iommu_syscore_ops);
+ register_syscore(&iommu_syscore);
}
#else
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index e7dfcf0cda43..495848442b35 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -200,12 +200,13 @@ static void __init combiner_init(void __iomem *combiner_base,
/**
* combiner_suspend - save interrupt combiner state before suspend
+ * @data: syscore context
*
* Save the interrupt enable set register for all combiner groups since
* the state is lost when the system enters into a sleep state.
*
*/
-static int combiner_suspend(void)
+static int combiner_suspend(void *data)
{
int i;
@@ -218,12 +219,13 @@ static int combiner_suspend(void)
/**
* combiner_resume - restore interrupt combiner state after resume
+ * @data: syscore context
*
* Restore the interrupt enable set register for all combiner groups since
* the state is lost when the system enters into a sleep state on suspend.
*
*/
-static void combiner_resume(void)
+static void combiner_resume(void *data)
{
int i;
@@ -240,11 +242,15 @@ static void combiner_resume(void)
#define combiner_resume NULL
#endif
-static struct syscore_ops combiner_syscore_ops = {
+static const struct syscore_ops combiner_syscore_ops = {
.suspend = combiner_suspend,
.resume = combiner_resume,
};
+static struct syscore combiner_syscore = {
+ .ops = &combiner_syscore_ops,
+};
+
static int __init combiner_of_init(struct device_node *np,
struct device_node *parent)
{
@@ -264,7 +270,7 @@ static int __init combiner_of_init(struct device_node *np,
combiner_init(combiner_base, np);
- register_syscore_ops(&combiner_syscore_ops);
+ register_syscore(&combiner_syscore);
return 0;
}
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 795b3db4554a..3c70364e7cdd 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -411,12 +411,15 @@ static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
if (is_kernel_in_hyp_mode() &&
(read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EL2_En) &&
read_sysreg_s(SYS_ICH_MISR_EL2) != 0) {
+ u64 val;
+
generic_handle_domain_irq(aic_irqc->hw_domain,
AIC_FIQ_HWIRQ(AIC_VGIC_MI));
if (unlikely((read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EL2_En) &&
- read_sysreg_s(SYS_ICH_MISR_EL2))) {
- pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n");
+ (val = read_sysreg_s(SYS_ICH_MISR_EL2)))) {
+ pr_err_ratelimited("vGIC IRQ fired and not handled by KVM (MISR=%llx), disabling.\n",
+ val);
sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EL2_En, 0);
}
}
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index a44c49e985b7..a4d03a2d1569 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -726,7 +726,7 @@ static void __exception_irq_entry mpic_handle_irq(struct pt_regs *regs)
} while (1);
}
-static int mpic_suspend(void)
+static int mpic_suspend(void *data)
{
struct mpic *mpic = mpic_data;
@@ -735,7 +735,7 @@ static int mpic_suspend(void)
return 0;
}
-static void mpic_resume(void)
+static void mpic_resume(void *data)
{
struct mpic *mpic = mpic_data;
bool src0, src1;
@@ -788,11 +788,15 @@ static void mpic_resume(void)
mpic_ipi_resume(mpic);
}
-static struct syscore_ops mpic_syscore_ops = {
+static const struct syscore_ops mpic_syscore_ops = {
.suspend = mpic_suspend,
.resume = mpic_resume,
};
+static struct syscore mpic_syscore = {
+ .ops = &mpic_syscore_ops,
+};
+
static int __init mpic_map_region(struct device_node *np, int index,
void __iomem **base, phys_addr_t *phys_base)
{
@@ -905,7 +909,7 @@ static int __init mpic_of_init(struct device_node *node, struct device_node *par
mpic_handle_cascade_irq, mpic);
}
- register_syscore_ops(&mpic_syscore_ops);
+ register_syscore(&mpic_syscore);
return 0;
}
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index ea1446c0a09c..45c4824be92f 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -285,7 +285,7 @@ static int bcm7038_l1_init_one(struct device_node *dn, unsigned int idx,
static LIST_HEAD(bcm7038_l1_intcs_list);
static DEFINE_RAW_SPINLOCK(bcm7038_l1_intcs_lock);
-static int bcm7038_l1_suspend(void)
+static int bcm7038_l1_suspend(void *data)
{
struct bcm7038_l1_chip *intc;
int boot_cpu, word;
@@ -311,7 +311,7 @@ static int bcm7038_l1_suspend(void)
return 0;
}
-static void bcm7038_l1_resume(void)
+static void bcm7038_l1_resume(void *data)
{
struct bcm7038_l1_chip *intc;
int boot_cpu, word;
@@ -332,11 +332,15 @@ static void bcm7038_l1_resume(void)
}
}
-static struct syscore_ops bcm7038_l1_syscore_ops = {
+static const struct syscore_ops bcm7038_l1_syscore_ops = {
.suspend = bcm7038_l1_suspend,
.resume = bcm7038_l1_resume,
};
+static struct syscore bcm7038_l1_syscore = {
+ .ops = &bcm7038_l1_syscore_ops,
+};
+
static int bcm7038_l1_set_wake(struct irq_data *d, unsigned int on)
{
struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
@@ -424,7 +428,7 @@ static int bcm7038_l1_probe(struct platform_device *pdev, struct device_node *pa
raw_spin_unlock(&bcm7038_l1_intcs_lock);
if (list_is_singular(&bcm7038_l1_intcs_list))
- register_syscore_ops(&bcm7038_l1_syscore_ops);
+ register_syscore(&bcm7038_l1_syscore);
#endif
pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n",
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 467cb78435a9..ada585bfa451 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -4992,7 +4992,7 @@ static void its_enable_quirks(struct its_node *its)
its_quirks, its);
}
-static int its_save_disable(void)
+static int its_save_disable(void *data)
{
struct its_node *its;
int err = 0;
@@ -5028,7 +5028,7 @@ err:
return err;
}
-static void its_restore_enable(void)
+static void its_restore_enable(void *data)
{
struct its_node *its;
int ret;
@@ -5088,11 +5088,15 @@ static void its_restore_enable(void)
raw_spin_unlock(&its_lock);
}
-static struct syscore_ops its_syscore_ops = {
+static const struct syscore_ops its_syscore_ops = {
.suspend = its_save_disable,
.resume = its_restore_enable,
};
+static struct syscore its_syscore = {
+ .ops = &its_syscore_ops,
+};
+
static void __init __iomem *its_map_one(struct resource *res, int *err)
{
void __iomem *its_base;
@@ -5864,7 +5868,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
}
}
- register_syscore_ops(&its_syscore_ops);
+ register_syscore(&its_syscore);
return 0;
}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 1269ab8eb726..ec70c84e9f91 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1459,6 +1459,8 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
if (ret)
return;
+ gic_v2_kvm_info.gicc_base = gic_data[0].cpu_base.common_base;
+
if (static_branch_likely(&supports_deactivate_key))
vgic_set_kvm_info(&gic_v2_kvm_info);
}
@@ -1620,6 +1622,7 @@ static void __init gic_acpi_setup_kvm_info(void)
return;
gic_v2_kvm_info.maint_irq = irq;
+ gic_v2_kvm_info.gicc_base = gic_data[0].cpu_base.common_base;
vgic_set_kvm_info(&gic_v2_kvm_info);
}
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index 91b2f587119c..cca77f9948a3 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -202,13 +202,13 @@ spurious_8259A_irq:
}
}
-static void i8259A_resume(void)
+static void i8259A_resume(void *data)
{
if (i8259A_auto_eoi >= 0)
init_8259A(i8259A_auto_eoi);
}
-static void i8259A_shutdown(void)
+static void i8259A_shutdown(void *data)
{
/* Put the i8259A into a quiescent state that
* the kernel initialization code can get it
@@ -220,11 +220,15 @@ static void i8259A_shutdown(void)
}
}
-static struct syscore_ops i8259_syscore_ops = {
+static const struct syscore_ops i8259_syscore_ops = {
.resume = i8259A_resume,
.shutdown = i8259A_shutdown,
};
+static struct syscore i8259_syscore = {
+ .ops = &i8259_syscore_ops,
+};
+
static void init_8259A(int auto_eoi)
{
unsigned long flags;
@@ -320,7 +324,7 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade", NULL))
pr_err("Failed to register cascade interrupt\n");
- register_syscore_ops(&i8259_syscore_ops);
+ register_syscore(&i8259_syscore);
return domain;
}
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index b91f5c14b405..04f7ba0657be 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -33,7 +33,7 @@ static void __iomem *gpcv2_idx_to_reg(struct gpcv2_irqchip_data *cd, int i)
return cd->gpc_base + cd->cpu2wakeup + i * 4;
}
-static int gpcv2_wakeup_source_save(void)
+static int gpcv2_wakeup_source_save(void *data)
{
struct gpcv2_irqchip_data *cd;
void __iomem *reg;
@@ -52,7 +52,7 @@ static int gpcv2_wakeup_source_save(void)
return 0;
}
-static void gpcv2_wakeup_source_restore(void)
+static void gpcv2_wakeup_source_restore(void *data)
{
struct gpcv2_irqchip_data *cd;
int i;
@@ -65,9 +65,13 @@ static void gpcv2_wakeup_source_restore(void)
writel_relaxed(cd->saved_irq_mask[i], gpcv2_idx_to_reg(cd, i));
}
-static struct syscore_ops imx_gpcv2_syscore_ops = {
- .suspend = gpcv2_wakeup_source_save,
- .resume = gpcv2_wakeup_source_restore,
+static const struct syscore_ops gpcv2_syscore_ops = {
+ .suspend = gpcv2_wakeup_source_save,
+ .resume = gpcv2_wakeup_source_restore,
+};
+
+static struct syscore gpcv2_syscore = {
+ .ops = &gpcv2_syscore_ops,
};
static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on)
@@ -276,7 +280,7 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
writel_relaxed(~0x1, cd->gpc_base + cd->cpu2wakeup);
imx_gpcv2_instance = cd;
- register_syscore_ops(&imx_gpcv2_syscore_ops);
+ register_syscore(&gpcv2_syscore);
/*
* Clear the OF_POPULATED flag set in of_irq_init so that
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index 39e5a72ccd3c..ad2105685b48 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -407,21 +407,25 @@ static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group
return NULL;
}
-static int eiointc_suspend(void)
+static int eiointc_suspend(void *data)
{
return 0;
}
-static void eiointc_resume(void)
+static void eiointc_resume(void *data)
{
eiointc_router_init(0);
}
-static struct syscore_ops eiointc_syscore_ops = {
+static const struct syscore_ops eiointc_syscore_ops = {
.suspend = eiointc_suspend,
.resume = eiointc_resume,
};
+static struct syscore eiointc_syscore = {
+ .ops = &eiointc_syscore_ops,
+};
+
static int __init pch_pic_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
@@ -540,7 +544,7 @@ static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq,
eiointc_router_init(0);
if (nr_pics == 1) {
- register_syscore_ops(&eiointc_syscore_ops);
+ register_syscore(&eiointc_syscore);
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_EIOINTC_STARTING,
"irqchip/loongarch/eiointc:starting",
eiointc_router_init, NULL);
diff --git a/drivers/irqchip/irq-loongson-htpic.c b/drivers/irqchip/irq-loongson-htpic.c
index f4abdf156de7..1c691c4be989 100644
--- a/drivers/irqchip/irq-loongson-htpic.c
+++ b/drivers/irqchip/irq-loongson-htpic.c
@@ -71,15 +71,19 @@ static void htpic_reg_init(void)
writel(0xffff, htpic->base + HTINT_EN_OFF);
}
-static void htpic_resume(void)
+static void htpic_resume(void *data)
{
htpic_reg_init();
}
-struct syscore_ops htpic_syscore_ops = {
+static const struct syscore_ops htpic_syscore_ops = {
.resume = htpic_resume,
};
+static struct syscore htpic_syscore = {
+ .ops = &htpic_syscore_ops,
+};
+
static int __init htpic_of_init(struct device_node *node, struct device_node *parent)
{
unsigned int parent_irq[4];
@@ -130,7 +134,7 @@ static int __init htpic_of_init(struct device_node *node, struct device_node *pa
htpic_irq_dispatch, htpic);
}
- register_syscore_ops(&htpic_syscore_ops);
+ register_syscore(&htpic_syscore);
return 0;
diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c
index d8558eb35044..d2be8e954e92 100644
--- a/drivers/irqchip/irq-loongson-htvec.c
+++ b/drivers/irqchip/irq-loongson-htvec.c
@@ -159,7 +159,7 @@ static void htvec_reset(struct htvec *priv)
}
}
-static int htvec_suspend(void)
+static int htvec_suspend(void *data)
{
int i;
@@ -169,7 +169,7 @@ static int htvec_suspend(void)
return 0;
}
-static void htvec_resume(void)
+static void htvec_resume(void *data)
{
int i;
@@ -177,11 +177,15 @@ static void htvec_resume(void)
writel(htvec_priv->saved_vec_en[i], htvec_priv->base + HTVEC_EN_OFF + 4 * i);
}
-static struct syscore_ops htvec_syscore_ops = {
+static const struct syscore_ops htvec_syscore_ops = {
.suspend = htvec_suspend,
.resume = htvec_resume,
};
+static struct syscore htvec_syscore = {
+ .ops = &htvec_syscore_ops,
+};
+
static int htvec_init(phys_addr_t addr, unsigned long size,
int num_parents, int parent_irq[], struct fwnode_handle *domain_handle)
{
@@ -214,7 +218,7 @@ static int htvec_init(phys_addr_t addr, unsigned long size,
htvec_priv = priv;
- register_syscore_ops(&htvec_syscore_ops);
+ register_syscore(&htvec_syscore);
return 0;
diff --git a/drivers/irqchip/irq-loongson-pch-lpc.c b/drivers/irqchip/irq-loongson-pch-lpc.c
index 912bf50a5c7c..3a125f3e4287 100644
--- a/drivers/irqchip/irq-loongson-pch-lpc.c
+++ b/drivers/irqchip/irq-loongson-pch-lpc.c
@@ -151,7 +151,7 @@ static int pch_lpc_disabled(struct pch_lpc *priv)
(readl(priv->base + LPC_INT_STS) == 0xffffffff);
}
-static int pch_lpc_suspend(void)
+static int pch_lpc_suspend(void *data)
{
pch_lpc_priv->saved_reg_ctl = readl(pch_lpc_priv->base + LPC_INT_CTL);
pch_lpc_priv->saved_reg_ena = readl(pch_lpc_priv->base + LPC_INT_ENA);
@@ -159,18 +159,22 @@ static int pch_lpc_suspend(void)
return 0;
}
-static void pch_lpc_resume(void)
+static void pch_lpc_resume(void *data)
{
writel(pch_lpc_priv->saved_reg_ctl, pch_lpc_priv->base + LPC_INT_CTL);
writel(pch_lpc_priv->saved_reg_ena, pch_lpc_priv->base + LPC_INT_ENA);
writel(pch_lpc_priv->saved_reg_pol, pch_lpc_priv->base + LPC_INT_POL);
}
-static struct syscore_ops pch_lpc_syscore_ops = {
+static const struct syscore_ops pch_lpc_syscore_ops = {
.suspend = pch_lpc_suspend,
.resume = pch_lpc_resume,
};
+static struct syscore pch_lpc_syscore = {
+ .ops = &pch_lpc_syscore_ops,
+};
+
int __init pch_lpc_acpi_init(struct irq_domain *parent,
struct acpi_madt_lpc_pic *acpi_pchlpc)
{
@@ -222,7 +226,7 @@ int __init pch_lpc_acpi_init(struct irq_domain *parent,
pch_lpc_priv = priv;
pch_lpc_handle = irq_handle;
- register_syscore_ops(&pch_lpc_syscore_ops);
+ register_syscore(&pch_lpc_syscore);
return 0;
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index 62e6bf3a0611..c6b369a974a7 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -278,7 +278,7 @@ static void pch_pic_reset(struct pch_pic *priv)
}
}
-static int pch_pic_suspend(void)
+static int pch_pic_suspend(void *data)
{
int i, j;
@@ -296,7 +296,7 @@ static int pch_pic_suspend(void)
return 0;
}
-static void pch_pic_resume(void)
+static void pch_pic_resume(void *data)
{
int i, j;
@@ -313,11 +313,15 @@ static void pch_pic_resume(void)
}
}
-static struct syscore_ops pch_pic_syscore_ops = {
+static const struct syscore_ops pch_pic_syscore_ops = {
.suspend = pch_pic_suspend,
.resume = pch_pic_resume,
};
+static struct syscore pch_pic_syscore = {
+ .ops = &pch_pic_syscore_ops,
+};
+
static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base,
struct irq_domain *parent_domain, struct fwnode_handle *domain_handle,
u32 gsi_base)
@@ -356,7 +360,7 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base,
pch_pic_priv[nr_pics++] = priv;
if (nr_pics == 1)
- register_syscore_ops(&pch_pic_syscore_ops);
+ register_syscore(&pch_pic_syscore);
return 0;
diff --git a/drivers/irqchip/irq-mchp-eic.c b/drivers/irqchip/irq-mchp-eic.c
index b513a899c085..2474fa467a05 100644
--- a/drivers/irqchip/irq-mchp-eic.c
+++ b/drivers/irqchip/irq-mchp-eic.c
@@ -109,7 +109,7 @@ static int mchp_eic_irq_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
-static int mchp_eic_irq_suspend(void)
+static int mchp_eic_irq_suspend(void *data)
{
unsigned int hwirq;
@@ -123,7 +123,7 @@ static int mchp_eic_irq_suspend(void)
return 0;
}
-static void mchp_eic_irq_resume(void)
+static void mchp_eic_irq_resume(void *data)
{
unsigned int hwirq;
@@ -135,11 +135,15 @@ static void mchp_eic_irq_resume(void)
MCHP_EIC_SCFG(hwirq));
}
-static struct syscore_ops mchp_eic_syscore_ops = {
+static const struct syscore_ops mchp_eic_syscore_ops = {
.suspend = mchp_eic_irq_suspend,
.resume = mchp_eic_irq_resume,
};
+static struct syscore mchp_eic_syscore = {
+ .ops = &mchp_eic_syscore_ops,
+};
+
static struct irq_chip mchp_eic_chip = {
.name = "eic",
.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SET_TYPE_MASKED,
@@ -258,7 +262,7 @@ static int mchp_eic_probe(struct platform_device *pdev, struct device_node *pare
goto clk_unprepare;
}
- register_syscore_ops(&mchp_eic_syscore_ops);
+ register_syscore(&mchp_eic_syscore);
pr_info("%pOF: EIC registered, nr_irqs %u\n", node, MCHP_EIC_NIRQ);
diff --git a/drivers/irqchip/irq-mst-intc.c b/drivers/irqchip/irq-mst-intc.c
index 9643cc3a77d7..7f760f555a76 100644
--- a/drivers/irqchip/irq-mst-intc.c
+++ b/drivers/irqchip/irq-mst-intc.c
@@ -143,7 +143,7 @@ static void mst_intc_polarity_restore(struct mst_intc_chip_data *cd)
writew_relaxed(cd->saved_polarity_conf[i], addr + i * 4);
}
-static void mst_irq_resume(void)
+static void mst_irq_resume(void *data)
{
struct mst_intc_chip_data *cd;
@@ -151,7 +151,7 @@ static void mst_irq_resume(void)
mst_intc_polarity_restore(cd);
}
-static int mst_irq_suspend(void)
+static int mst_irq_suspend(void *data)
{
struct mst_intc_chip_data *cd;
@@ -160,14 +160,18 @@ static int mst_irq_suspend(void)
return 0;
}
-static struct syscore_ops mst_irq_syscore_ops = {
+static const struct syscore_ops mst_irq_syscore_ops = {
.suspend = mst_irq_suspend,
.resume = mst_irq_resume,
};
+static struct syscore mst_irq_syscore = {
+ .ops = &mst_irq_syscore_ops,
+};
+
static int __init mst_irq_pm_init(void)
{
- register_syscore_ops(&mst_irq_syscore_ops);
+ register_syscore(&mst_irq_syscore);
return 0;
}
late_initcall(mst_irq_pm_init);
diff --git a/drivers/irqchip/irq-mtk-cirq.c b/drivers/irqchip/irq-mtk-cirq.c
index de481ba340f8..9571f622774e 100644
--- a/drivers/irqchip/irq-mtk-cirq.c
+++ b/drivers/irqchip/irq-mtk-cirq.c
@@ -199,7 +199,7 @@ static const struct irq_domain_ops cirq_domain_ops = {
};
#ifdef CONFIG_PM_SLEEP
-static int mtk_cirq_suspend(void)
+static int mtk_cirq_suspend(void *data)
{
void __iomem *reg;
u32 value, mask;
@@ -257,7 +257,7 @@ static int mtk_cirq_suspend(void)
return 0;
}
-static void mtk_cirq_resume(void)
+static void mtk_cirq_resume(void *data)
{
void __iomem *reg = mtk_cirq_reg(cirq_data, CIRQ_CONTROL);
u32 value;
@@ -272,14 +272,18 @@ static void mtk_cirq_resume(void)
writel_relaxed(value, reg);
}
-static struct syscore_ops mtk_cirq_syscore_ops = {
+static const struct syscore_ops mtk_cirq_syscore_ops = {
.suspend = mtk_cirq_suspend,
.resume = mtk_cirq_resume,
};
+static struct syscore mtk_cirq_syscore = {
+ .ops = &mtk_cirq_syscore_ops,
+};
+
static void mtk_cirq_syscore_init(void)
{
- register_syscore_ops(&mtk_cirq_syscore_ops);
+ register_syscore(&mtk_cirq_syscore);
}
#else
static inline void mtk_cirq_syscore_init(void) {}
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index 1bf19deb02c4..e73d426cea6d 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -398,7 +398,7 @@ static int rzg2l_irqc_set_type(struct irq_data *d, unsigned int type)
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
}
-static int rzg2l_irqc_irq_suspend(void)
+static int rzg2l_irqc_irq_suspend(void *data)
{
struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache;
void __iomem *base = rzg2l_irqc_data->base;
@@ -410,7 +410,7 @@ static int rzg2l_irqc_irq_suspend(void)
return 0;
}
-static void rzg2l_irqc_irq_resume(void)
+static void rzg2l_irqc_irq_resume(void *data)
{
struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache;
void __iomem *base = rzg2l_irqc_data->base;
@@ -425,11 +425,15 @@ static void rzg2l_irqc_irq_resume(void)
writel_relaxed(cache->iitsr, base + IITSR);
}
-static struct syscore_ops rzg2l_irqc_syscore_ops = {
+static const struct syscore_ops rzg2l_irqc_syscore_ops = {
.suspend = rzg2l_irqc_irq_suspend,
.resume = rzg2l_irqc_irq_resume,
};
+static struct syscore rzg2l_irqc_syscore = {
+ .ops = &rzg2l_irqc_syscore_ops,
+};
+
static const struct irq_chip rzg2l_irqc_chip = {
.name = "rzg2l-irqc",
.irq_eoi = rzg2l_irqc_eoi,
@@ -577,7 +581,7 @@ static int rzg2l_irqc_common_probe(struct platform_device *pdev, struct device_n
return -ENOMEM;
}
- register_syscore_ops(&rzg2l_irqc_syscore_ops);
+ register_syscore(&rzg2l_irqc_syscore);
return 0;
}
diff --git a/drivers/irqchip/irq-sa11x0.c b/drivers/irqchip/irq-sa11x0.c
index d8d4dff16276..e5f24c5f3f41 100644
--- a/drivers/irqchip/irq-sa11x0.c
+++ b/drivers/irqchip/irq-sa11x0.c
@@ -85,7 +85,7 @@ static struct sa1100irq_state {
unsigned int iccr;
} sa1100irq_state;
-static int sa1100irq_suspend(void)
+static int sa1100irq_suspend(void *data)
{
struct sa1100irq_state *st = &sa1100irq_state;
@@ -102,7 +102,7 @@ static int sa1100irq_suspend(void)
return 0;
}
-static void sa1100irq_resume(void)
+static void sa1100irq_resume(void *data)
{
struct sa1100irq_state *st = &sa1100irq_state;
@@ -114,14 +114,18 @@ static void sa1100irq_resume(void)
}
}
-static struct syscore_ops sa1100irq_syscore_ops = {
+static const struct syscore_ops sa1100irq_syscore_ops = {
.suspend = sa1100irq_suspend,
.resume = sa1100irq_resume,
};
+static struct syscore sa1100irq_syscore = {
+ .ops = &sa1100irq_syscore_ops,
+};
+
static int __init sa1100irq_init_devicefs(void)
{
- register_syscore_ops(&sa1100irq_syscore_ops);
+ register_syscore(&sa1100irq_syscore);
return 0;
}
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index c5db7d6e3f7c..210a57959637 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -255,7 +255,7 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type)
return IRQ_SET_MASK_OK;
}
-static int plic_irq_suspend(void)
+static int plic_irq_suspend(void *data)
{
struct plic_priv *priv;
@@ -270,7 +270,7 @@ static int plic_irq_suspend(void)
return 0;
}
-static void plic_irq_resume(void)
+static void plic_irq_resume(void *data)
{
unsigned int i, index, cpu;
unsigned long flags;
@@ -301,11 +301,15 @@ static void plic_irq_resume(void)
}
}
-static struct syscore_ops plic_irq_syscore_ops = {
+static const struct syscore_ops plic_irq_syscore_ops = {
.suspend = plic_irq_suspend,
.resume = plic_irq_resume,
};
+static struct syscore plic_irq_syscore = {
+ .ops = &plic_irq_syscore_ops,
+};
+
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
@@ -769,7 +773,7 @@ done:
cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
"irqchip/sifive/plic:starting",
plic_starting_cpu, plic_dying_cpu);
- register_syscore_ops(&plic_irq_syscore_ops);
+ register_syscore(&plic_irq_syscore);
plic_global_setup_done = true;
}
}
diff --git a/drivers/irqchip/irq-sun6i-r.c b/drivers/irqchip/irq-sun6i-r.c
index 37d4b29763bc..23251831c06e 100644
--- a/drivers/irqchip/irq-sun6i-r.c
+++ b/drivers/irqchip/irq-sun6i-r.c
@@ -268,7 +268,7 @@ static const struct irq_domain_ops sun6i_r_intc_domain_ops = {
.free = irq_domain_free_irqs_common,
};
-static int sun6i_r_intc_suspend(void)
+static int sun6i_r_intc_suspend(void *data)
{
u32 buf[BITS_TO_U32(MAX(SUN6I_NR_TOP_LEVEL_IRQS, SUN6I_NR_MUX_BITS))];
int i;
@@ -284,7 +284,7 @@ static int sun6i_r_intc_suspend(void)
return 0;
}
-static void sun6i_r_intc_resume(void)
+static void sun6i_r_intc_resume(void *data)
{
int i;
@@ -294,17 +294,21 @@ static void sun6i_r_intc_resume(void)
writel_relaxed(0, base + SUN6I_IRQ_ENABLE(i));
}
-static void sun6i_r_intc_shutdown(void)
+static void sun6i_r_intc_shutdown(void *data)
{
- sun6i_r_intc_suspend();
+ sun6i_r_intc_suspend(data);
}
-static struct syscore_ops sun6i_r_intc_syscore_ops = {
+static const struct syscore_ops sun6i_r_intc_syscore_ops = {
.suspend = sun6i_r_intc_suspend,
.resume = sun6i_r_intc_resume,
.shutdown = sun6i_r_intc_shutdown,
};
+static struct syscore sun6i_r_intc_syscore = {
+ .ops = &sun6i_r_intc_syscore_ops,
+};
+
static int __init sun6i_r_intc_init(struct device_node *node,
struct device_node *parent,
const struct sun6i_r_intc_variant *v)
@@ -346,10 +350,10 @@ static int __init sun6i_r_intc_init(struct device_node *node,
return -ENOMEM;
}
- register_syscore_ops(&sun6i_r_intc_syscore_ops);
+ register_syscore(&sun6i_r_intc_syscore);
sun6i_r_intc_ack_nmi();
- sun6i_r_intc_resume();
+ sun6i_r_intc_resume(NULL);
return 0;
}
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index 66cbb9f77ff3..b6382cf6359a 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -132,7 +132,7 @@ static int tegra_set_wake(struct irq_data *d, unsigned int enable)
return 0;
}
-static int tegra_ictlr_suspend(void)
+static int tegra_ictlr_suspend(void *data)
{
unsigned long flags;
unsigned int i;
@@ -161,7 +161,7 @@ static int tegra_ictlr_suspend(void)
return 0;
}
-static void tegra_ictlr_resume(void)
+static void tegra_ictlr_resume(void *data)
{
unsigned long flags;
unsigned int i;
@@ -184,14 +184,18 @@ static void tegra_ictlr_resume(void)
local_irq_restore(flags);
}
-static struct syscore_ops tegra_ictlr_syscore_ops = {
+static const struct syscore_ops tegra_ictlr_syscore_ops = {
.suspend = tegra_ictlr_suspend,
.resume = tegra_ictlr_resume,
};
+static struct syscore tegra_ictlr_syscore = {
+ .ops = &tegra_ictlr_syscore_ops,
+};
+
static void tegra_ictlr_syscore_init(void)
{
- register_syscore_ops(&tegra_ictlr_syscore_ops);
+ register_syscore(&tegra_ictlr_syscore);
}
#else
#define tegra_set_wake NULL
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 2bcdf216a000..e38104c5064e 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -120,7 +120,7 @@ static void resume_one_vic(struct vic_device *vic)
writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR);
}
-static void vic_resume(void)
+static void vic_resume(void *data)
{
int id;
@@ -146,7 +146,7 @@ static void suspend_one_vic(struct vic_device *vic)
writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR);
}
-static int vic_suspend(void)
+static int vic_suspend(void *data)
{
int id;
@@ -156,11 +156,15 @@ static int vic_suspend(void)
return 0;
}
-static struct syscore_ops vic_syscore_ops = {
+static const struct syscore_ops vic_syscore_ops = {
.suspend = vic_suspend,
.resume = vic_resume,
};
+static struct syscore vic_syscore = {
+ .ops = &vic_syscore_ops,
+};
+
/**
* vic_pm_init - initcall to register VIC pm
*
@@ -171,7 +175,7 @@ static struct syscore_ops vic_syscore_ops = {
static int __init vic_pm_init(void)
{
if (vic_id > 0)
- register_syscore_ops(&vic_syscore_ops);
+ register_syscore(&vic_syscore);
return 0;
}
diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
index 05848a2fecff..679323c2ccda 100644
--- a/drivers/leds/trigger/ledtrig-cpu.c
+++ b/drivers/leds/trigger/ledtrig-cpu.c
@@ -94,28 +94,32 @@ void ledtrig_cpu(enum cpu_led_event ledevt)
}
EXPORT_SYMBOL(ledtrig_cpu);
-static int ledtrig_cpu_syscore_suspend(void)
+static int ledtrig_cpu_syscore_suspend(void *data)
{
ledtrig_cpu(CPU_LED_STOP);
return 0;
}
-static void ledtrig_cpu_syscore_resume(void)
+static void ledtrig_cpu_syscore_resume(void *data)
{
ledtrig_cpu(CPU_LED_START);
}
-static void ledtrig_cpu_syscore_shutdown(void)
+static void ledtrig_cpu_syscore_shutdown(void *data)
{
ledtrig_cpu(CPU_LED_HALTED);
}
-static struct syscore_ops ledtrig_cpu_syscore_ops = {
+static const struct syscore_ops ledtrig_cpu_syscore_ops = {
.shutdown = ledtrig_cpu_syscore_shutdown,
.suspend = ledtrig_cpu_syscore_suspend,
.resume = ledtrig_cpu_syscore_resume,
};
+static struct syscore ledtrig_cpu_syscore = {
+ .ops = &ledtrig_cpu_syscore_ops,
+};
+
static int ledtrig_online_cpu(unsigned int cpu)
{
ledtrig_cpu(CPU_LED_START);
@@ -157,7 +161,7 @@ static int __init ledtrig_cpu_init(void)
led_trigger_register_simple(trig->name, &trig->_trig);
}
- register_syscore_ops(&ledtrig_cpu_syscore_ops);
+ register_syscore(&ledtrig_cpu_syscore);
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "leds/trigger:starting",
ledtrig_online_cpu, ledtrig_prepare_down_cpu);
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c
index 369d72f59b3c..06fd910b3fd1 100644
--- a/drivers/macintosh/mac_hid.c
+++ b/drivers/macintosh/mac_hid.c
@@ -187,13 +187,14 @@ static int mac_hid_toggle_emumouse(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
- int old_val = *valp;
+ int old_val;
int rc;
rc = mutex_lock_killable(&mac_hid_emumouse_mutex);
if (rc)
return rc;
+ old_val = *valp;
rc = proc_dointvec(table, write, buffer, lenp, ppos);
if (rc == 0 && write && *valp != old_val) {
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index 26bd9ed5e664..d91825bb0a5c 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -11,6 +11,8 @@
#include <asm/ptrace.h>
#include <linux/adb.h>
#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/of.h>
#include <linux/pmu.h>
#include <asm/backlight.h>
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index b0f09c70f1ff..5fe47e784d43 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2600,7 +2600,7 @@ void pmu_blink(int n)
#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
int pmu_sys_suspended;
-static int pmu_syscore_suspend(void)
+static int pmu_syscore_suspend(void *data)
{
/* Suspend PMU event interrupts */
pmu_suspend();
@@ -2614,7 +2614,7 @@ static int pmu_syscore_suspend(void)
return 0;
}
-static void pmu_syscore_resume(void)
+static void pmu_syscore_resume(void *data)
{
struct adb_request req;
@@ -2634,14 +2634,18 @@ static void pmu_syscore_resume(void)
pmu_sys_suspended = 0;
}
-static struct syscore_ops pmu_syscore_ops = {
+static const struct syscore_ops pmu_syscore_ops = {
.suspend = pmu_syscore_suspend,
.resume = pmu_syscore_resume,
};
+static struct syscore pmu_syscore = {
+ .ops = &pmu_syscore_ops,
+};
+
static int pmu_syscore_register(void)
{
- register_syscore_ops(&pmu_syscore_ops);
+ register_syscore(&pmu_syscore);
return 0;
}
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index cdd2ac198f2c..3932a449a1b1 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -10,7 +10,7 @@
/* driver definitions */
-#define DRIVER_AUTHOR "Joonyoung Shim <jy0922.shim@samsung.com>";
+#define DRIVER_AUTHOR "Joonyoung Shim <jy0922.shim@samsung.com>"
#define DRIVER_CARD "Silicon Labs Si470x FM Radio"
#define DRIVER_DESC "I2C radio driver for Si470x FM Radio Receivers"
#define DRIVER_VERSION "1.0.2"
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 0c510035805b..05c18b6de5c6 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -70,12 +70,12 @@
#include "ts2020.h"
-#define LME2510_C_S7395 "dvb-usb-lme2510c-s7395.fw";
-#define LME2510_C_LG "dvb-usb-lme2510c-lg.fw";
-#define LME2510_C_S0194 "dvb-usb-lme2510c-s0194.fw";
-#define LME2510_C_RS2000 "dvb-usb-lme2510c-rs2000.fw";
-#define LME2510_LG "dvb-usb-lme2510-lg.fw";
-#define LME2510_S0194 "dvb-usb-lme2510-s0194.fw";
+#define LME2510_C_S7395 "dvb-usb-lme2510c-s7395.fw"
+#define LME2510_C_LG "dvb-usb-lme2510c-lg.fw"
+#define LME2510_C_S0194 "dvb-usb-lme2510c-s0194.fw"
+#define LME2510_C_RS2000 "dvb-usb-lme2510c-rs2000.fw"
+#define LME2510_LG "dvb-usb-lme2510-lg.fw"
+#define LME2510_S0194 "dvb-usb-lme2510-s0194.fw"
/* debug */
static int dvb_usb_lme2510_debug;
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index 4a417b693080..58ccc1c02e90 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -67,6 +67,8 @@ struct rpcif_priv {
void __iomem *dirmap;
struct regmap *regmap;
struct reset_control *rstc;
+ struct clk *spi_clk;
+ struct clk *spix2_clk;
struct platform_device *vdev;
size_t size;
const struct rpcif_info *info;
@@ -1024,19 +1026,15 @@ static int rpcif_probe(struct platform_device *pdev)
* flash write failure. So, enable these clocks during probe() and
* disable it in remove().
*/
- if (rpc->info->type == XSPI_RZ_G3E) {
- struct clk *spi_clk;
-
- spi_clk = devm_clk_get_enabled(dev, "spix2");
- if (IS_ERR(spi_clk))
- return dev_err_probe(dev, PTR_ERR(spi_clk),
- "cannot get enabled spix2 clk\n");
-
- spi_clk = devm_clk_get_enabled(dev, "spi");
- if (IS_ERR(spi_clk))
- return dev_err_probe(dev, PTR_ERR(spi_clk),
- "cannot get enabled spi clk\n");
- }
+ rpc->spix2_clk = devm_clk_get_optional_enabled(dev, "spix2");
+ if (IS_ERR(rpc->spix2_clk))
+ return dev_err_probe(dev, PTR_ERR(rpc->spix2_clk),
+ "cannot get enabled spix2 clk\n");
+
+ rpc->spi_clk = devm_clk_get_optional_enabled(dev, "spi");
+ if (IS_ERR(rpc->spi_clk))
+ return dev_err_probe(dev, PTR_ERR(rpc->spi_clk),
+ "cannot get enabled spi clk\n");
vdev = platform_device_alloc(name, pdev->id);
if (!vdev)
@@ -1063,6 +1061,37 @@ static void rpcif_remove(struct platform_device *pdev)
platform_device_unregister(rpc->vdev);
}
+static int rpcif_suspend(struct device *dev)
+{
+ struct rpcif_priv *rpc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(rpc->spi_clk);
+ clk_disable_unprepare(rpc->spix2_clk);
+
+ return 0;
+}
+
+static int rpcif_resume(struct device *dev)
+{
+ struct rpcif_priv *rpc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(rpc->spix2_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable spix2 clock: %pe\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = clk_prepare_enable(rpc->spi_clk);
+ if (ret) {
+ clk_disable_unprepare(rpc->spix2_clk);
+ dev_err(dev, "failed to enable spi clock: %pe\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct rpcif_impl rpcif_impl = {
.hw_init = rpcif_hw_init_impl,
.prepare = rpcif_prepare_impl,
@@ -1125,12 +1154,15 @@ static const struct of_device_id rpcif_of_match[] = {
};
MODULE_DEVICE_TABLE(of, rpcif_of_match);
+static DEFINE_SIMPLE_DEV_PM_OPS(rpcif_pm_ops, rpcif_suspend, rpcif_resume);
+
static struct platform_driver rpcif_driver = {
.probe = rpcif_probe,
.remove = rpcif_remove,
.driver = {
.name = "rpc-if",
.of_match_table = rpcif_of_match,
+ .pm = pm_sleep_ptr(&rpcif_pm_ops),
},
};
module_platform_driver(rpcif_driver);
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 03f1daa2d132..9978ff911c47 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -571,8 +571,8 @@ static void emc_seq_wait_clkchange(struct tegra_emc *emc)
dev_err(emc->dev, "clock change timed out\n");
}
-static struct emc_timing *tegra_emc_find_timing(struct tegra_emc *emc,
- unsigned long rate)
+static struct emc_timing *tegra124_emc_find_timing(struct tegra_emc *emc,
+ unsigned long rate)
{
struct emc_timing *timing = NULL;
unsigned int i;
@@ -592,10 +592,10 @@ static struct emc_timing *tegra_emc_find_timing(struct tegra_emc *emc,
return timing;
}
-static int tegra_emc_prepare_timing_change(struct tegra_emc *emc,
- unsigned long rate)
+static int tegra124_emc_prepare_timing_change(struct tegra_emc *emc,
+ unsigned long rate)
{
- struct emc_timing *timing = tegra_emc_find_timing(emc, rate);
+ struct emc_timing *timing = tegra124_emc_find_timing(emc, rate);
struct emc_timing *last = &emc->last_timing;
enum emc_dll_change dll_change;
unsigned int pre_wait = 0;
@@ -820,10 +820,10 @@ static int tegra_emc_prepare_timing_change(struct tegra_emc *emc,
return 0;
}
-static void tegra_emc_complete_timing_change(struct tegra_emc *emc,
- unsigned long rate)
+static void tegra124_emc_complete_timing_change(struct tegra_emc *emc,
+ unsigned long rate)
{
- struct emc_timing *timing = tegra_emc_find_timing(emc, rate);
+ struct emc_timing *timing = tegra124_emc_find_timing(emc, rate);
struct emc_timing *last = &emc->last_timing;
u32 val;
@@ -896,7 +896,7 @@ static void emc_read_current_timing(struct tegra_emc *emc,
timing->emc_mode_reset = 0;
}
-static int emc_init(struct tegra_emc *emc)
+static void emc_init(struct tegra_emc *emc)
{
emc->dram_type = readl(emc->regs + EMC_FBIO_CFG5);
@@ -913,8 +913,6 @@ static int emc_init(struct tegra_emc *emc)
emc->dram_num = tegra_mc_get_emem_device_count(emc->mc);
emc_read_current_timing(emc, &emc->last_timing);
-
- return 0;
}
static int load_one_timing_from_dt(struct tegra_emc *emc,
@@ -988,8 +986,8 @@ static int cmp_timings(const void *_a, const void *_b)
return 1;
}
-static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
- struct device_node *node)
+static int tegra124_emc_load_timings_from_dt(struct tegra_emc *emc,
+ struct device_node *node)
{
int child_count = of_get_child_count(node);
struct emc_timing *timing;
@@ -1017,15 +1015,15 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
return 0;
}
-static const struct of_device_id tegra_emc_of_match[] = {
+static const struct of_device_id tegra124_emc_of_match[] = {
{ .compatible = "nvidia,tegra124-emc" },
{ .compatible = "nvidia,tegra132-emc" },
{}
};
-MODULE_DEVICE_TABLE(of, tegra_emc_of_match);
+MODULE_DEVICE_TABLE(of, tegra124_emc_of_match);
static struct device_node *
-tegra_emc_find_node_by_ram_code(struct device_node *node, u32 ram_code)
+tegra124_emc_find_node_by_ram_code(struct device_node *node, u32 ram_code)
{
struct device_node *np;
int err;
@@ -1043,7 +1041,7 @@ tegra_emc_find_node_by_ram_code(struct device_node *node, u32 ram_code)
return NULL;
}
-static void tegra_emc_rate_requests_init(struct tegra_emc *emc)
+static void tegra124_emc_rate_requests_init(struct tegra_emc *emc)
{
unsigned int i;
@@ -1145,7 +1143,7 @@ static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate,
* valid range.
*/
-static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
+static bool tegra124_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
{
unsigned int i;
@@ -1156,8 +1154,8 @@ static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
return false;
}
-static int tegra_emc_debug_available_rates_show(struct seq_file *s,
- void *data)
+static int tegra124_emc_debug_available_rates_show(struct seq_file *s,
+ void *data)
{
struct tegra_emc *emc = s->private;
const char *prefix = "";
@@ -1173,9 +1171,9 @@ static int tegra_emc_debug_available_rates_show(struct seq_file *s,
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(tegra_emc_debug_available_rates);
+DEFINE_SHOW_ATTRIBUTE(tegra124_emc_debug_available_rates);
-static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
+static int tegra124_emc_debug_min_rate_get(void *data, u64 *rate)
{
struct tegra_emc *emc = data;
@@ -1184,12 +1182,12 @@ static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
return 0;
}
-static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
+static int tegra124_emc_debug_min_rate_set(void *data, u64 rate)
{
struct tegra_emc *emc = data;
int err;
- if (!tegra_emc_validate_rate(emc, rate))
+ if (!tegra124_emc_validate_rate(emc, rate))
return -EINVAL;
err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG);
@@ -1201,11 +1199,11 @@ static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
- tegra_emc_debug_min_rate_get,
- tegra_emc_debug_min_rate_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(tegra124_emc_debug_min_rate_fops,
+ tegra124_emc_debug_min_rate_get,
+ tegra124_emc_debug_min_rate_set, "%llu\n");
-static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
+static int tegra124_emc_debug_max_rate_get(void *data, u64 *rate)
{
struct tegra_emc *emc = data;
@@ -1214,12 +1212,12 @@ static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
return 0;
}
-static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
+static int tegra124_emc_debug_max_rate_set(void *data, u64 rate)
{
struct tegra_emc *emc = data;
int err;
- if (!tegra_emc_validate_rate(emc, rate))
+ if (!tegra124_emc_validate_rate(emc, rate))
return -EINVAL;
err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG);
@@ -1231,9 +1229,9 @@ static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
- tegra_emc_debug_max_rate_get,
- tegra_emc_debug_max_rate_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(tegra124_emc_debug_max_rate_fops,
+ tegra124_emc_debug_max_rate_get,
+ tegra124_emc_debug_max_rate_set, "%llu\n");
static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
{
@@ -1268,11 +1266,11 @@ static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
emc->debugfs.root = debugfs_create_dir("emc", NULL);
debugfs_create_file("available_rates", 0444, emc->debugfs.root, emc,
- &tegra_emc_debug_available_rates_fops);
+ &tegra124_emc_debug_available_rates_fops);
debugfs_create_file("min_rate", 0644, emc->debugfs.root,
- emc, &tegra_emc_debug_min_rate_fops);
+ emc, &tegra124_emc_debug_min_rate_fops);
debugfs_create_file("max_rate", 0644, emc->debugfs.root,
- emc, &tegra_emc_debug_max_rate_fops);
+ emc, &tegra124_emc_debug_max_rate_fops);
}
static inline struct tegra_emc *
@@ -1336,7 +1334,7 @@ static int emc_icc_set(struct icc_node *src, struct icc_node *dst)
return 0;
}
-static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+static int tegra124_emc_interconnect_init(struct tegra_emc *emc)
{
const struct tegra_mc_soc *soc = emc->mc->soc;
struct icc_node *node;
@@ -1352,10 +1350,8 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
/* create External Memory Controller node */
node = icc_node_create(TEGRA_ICC_EMC);
- if (IS_ERR(node)) {
- err = PTR_ERR(node);
- goto err_msg;
- }
+ if (IS_ERR(node))
+ return PTR_ERR(node);
node->name = "External Memory Controller";
icc_node_add(node, &emc->provider);
@@ -1383,30 +1379,28 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
remove_nodes:
icc_nodes_remove(&emc->provider);
-err_msg:
- dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
- return err;
+ return dev_err_probe(emc->dev, err, "failed to initialize ICC\n");
}
-static int tegra_emc_opp_table_init(struct tegra_emc *emc)
+static int tegra124_emc_opp_table_init(struct tegra_emc *emc)
{
u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
int opp_token, err;
err = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
- if (err < 0) {
- dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err);
- return err;
- }
+ if (err < 0)
+ return dev_err_probe(emc->dev, err, "failed to set OPP supported HW\n");
+
opp_token = err;
err = dev_pm_opp_of_add_table(emc->dev);
if (err) {
if (err == -ENODEV)
- dev_err(emc->dev, "OPP table not found, please update your device tree\n");
+ dev_err_probe(emc->dev, err,
+ "OPP table not found, please update your device tree\n");
else
- dev_err(emc->dev, "failed to add OPP table: %d\n", err);
+ dev_err_probe(emc->dev, err, "failed to add OPP table\n");
goto put_hw_table;
}
@@ -1417,7 +1411,7 @@ static int tegra_emc_opp_table_init(struct tegra_emc *emc)
/* first dummy rate-set initializes voltage state */
err = dev_pm_opp_set_rate(emc->dev, clk_get_rate(emc->clk));
if (err) {
- dev_err(emc->dev, "failed to initialize OPP clock: %d\n", err);
+ dev_err_probe(emc->dev, err, "failed to initialize OPP clock\n");
goto remove_table;
}
@@ -1431,12 +1425,12 @@ put_hw_table:
return err;
}
-static void devm_tegra_emc_unset_callback(void *data)
+static void devm_tegra124_emc_unset_callback(void *data)
{
tegra124_clk_set_emc_callbacks(NULL, NULL);
}
-static int tegra_emc_probe(struct platform_device *pdev)
+static int tegra124_emc_probe(struct platform_device *pdev)
{
struct device_node *np;
struct tegra_emc *emc;
@@ -1460,9 +1454,9 @@ static int tegra_emc_probe(struct platform_device *pdev)
ram_code = tegra_read_ram_code();
- np = tegra_emc_find_node_by_ram_code(pdev->dev.of_node, ram_code);
+ np = tegra124_emc_find_node_by_ram_code(pdev->dev.of_node, ram_code);
if (np) {
- err = tegra_emc_load_timings_from_dt(emc, np);
+ err = tegra124_emc_load_timings_from_dt(emc, np);
of_node_put(np);
if (err)
return err;
@@ -1472,39 +1466,33 @@ static int tegra_emc_probe(struct platform_device *pdev)
ram_code);
}
- err = emc_init(emc);
- if (err) {
- dev_err(&pdev->dev, "EMC initialization failed: %d\n", err);
- return err;
- }
+ emc_init(emc);
platform_set_drvdata(pdev, emc);
- tegra124_clk_set_emc_callbacks(tegra_emc_prepare_timing_change,
- tegra_emc_complete_timing_change);
+ tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change,
+ tegra124_emc_complete_timing_change);
- err = devm_add_action_or_reset(&pdev->dev, devm_tegra_emc_unset_callback,
+ err = devm_add_action_or_reset(&pdev->dev, devm_tegra124_emc_unset_callback,
NULL);
if (err)
return err;
emc->clk = devm_clk_get(&pdev->dev, "emc");
- if (IS_ERR(emc->clk)) {
- err = PTR_ERR(emc->clk);
- dev_err(&pdev->dev, "failed to get EMC clock: %d\n", err);
- return err;
- }
+ if (IS_ERR(emc->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(emc->clk),
+ "failed to get EMC clock\n");
- err = tegra_emc_opp_table_init(emc);
+ err = tegra124_emc_opp_table_init(emc);
if (err)
return err;
- tegra_emc_rate_requests_init(emc);
+ tegra124_emc_rate_requests_init(emc);
if (IS_ENABLED(CONFIG_DEBUG_FS))
emc_debugfs_init(&pdev->dev, emc);
- tegra_emc_interconnect_init(emc);
+ tegra124_emc_interconnect_init(emc);
/*
* Don't allow the kernel module to be unloaded. Unloading adds some
@@ -1516,16 +1504,16 @@ static int tegra_emc_probe(struct platform_device *pdev)
return 0;
};
-static struct platform_driver tegra_emc_driver = {
- .probe = tegra_emc_probe,
+static struct platform_driver tegra124_emc_driver = {
+ .probe = tegra124_emc_probe,
.driver = {
.name = "tegra-emc",
- .of_match_table = tegra_emc_of_match,
+ .of_match_table = tegra124_emc_of_match,
.suppress_bind_attrs = true,
.sync_state = icc_sync_state,
},
};
-module_platform_driver(tegra_emc_driver);
+module_platform_driver(tegra124_emc_driver);
MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA Tegra124 EMC driver");
diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
index d6cd90c7ad53..dfddceecdd1a 100644
--- a/drivers/memory/tegra/tegra186-emc.c
+++ b/drivers/memory/tegra/tegra186-emc.c
@@ -218,20 +218,20 @@ static int tegra186_emc_get_emc_dvfs_latency(struct tegra186_emc *emc)
}
/*
- * tegra_emc_icc_set_bw() - Set BW api for EMC provider
+ * tegra186_emc_icc_set_bw() - Set BW api for EMC provider
* @src: ICC node for External Memory Controller (EMC)
* @dst: ICC node for External Memory (DRAM)
*
* Do nothing here as info to BPMP-FW is now passed in the BW set function
* of the MC driver. BPMP-FW sets the final Freq based on the passed values.
*/
-static int tegra_emc_icc_set_bw(struct icc_node *src, struct icc_node *dst)
+static int tegra186_emc_icc_set_bw(struct icc_node *src, struct icc_node *dst)
{
return 0;
}
static struct icc_node *
-tegra_emc_of_icc_xlate(const struct of_phandle_args *spec, void *data)
+tegra186_emc_of_icc_xlate(const struct of_phandle_args *spec, void *data)
{
struct icc_provider *provider = data;
struct icc_node *node;
@@ -247,7 +247,7 @@ tegra_emc_of_icc_xlate(const struct of_phandle_args *spec, void *data)
return ERR_PTR(-EPROBE_DEFER);
}
-static int tegra_emc_icc_get_init_bw(struct icc_node *node, u32 *avg, u32 *peak)
+static int tegra186_emc_icc_get_init_bw(struct icc_node *node, u32 *avg, u32 *peak)
{
*avg = 0;
*peak = 0;
@@ -255,7 +255,7 @@ static int tegra_emc_icc_get_init_bw(struct icc_node *node, u32 *avg, u32 *peak)
return 0;
}
-static int tegra_emc_interconnect_init(struct tegra186_emc *emc)
+static int tegra186_emc_interconnect_init(struct tegra186_emc *emc)
{
struct tegra_mc *mc = dev_get_drvdata(emc->dev->parent);
const struct tegra_mc_soc *soc = mc->soc;
@@ -263,20 +263,18 @@ static int tegra_emc_interconnect_init(struct tegra186_emc *emc)
int err;
emc->provider.dev = emc->dev;
- emc->provider.set = tegra_emc_icc_set_bw;
+ emc->provider.set = tegra186_emc_icc_set_bw;
emc->provider.data = &emc->provider;
emc->provider.aggregate = soc->icc_ops->aggregate;
- emc->provider.xlate = tegra_emc_of_icc_xlate;
- emc->provider.get_bw = tegra_emc_icc_get_init_bw;
+ emc->provider.xlate = tegra186_emc_of_icc_xlate;
+ emc->provider.get_bw = tegra186_emc_icc_get_init_bw;
icc_provider_init(&emc->provider);
/* create External Memory Controller node */
node = icc_node_create(TEGRA_ICC_EMC);
- if (IS_ERR(node)) {
- err = PTR_ERR(node);
- goto err_msg;
- }
+ if (IS_ERR(node))
+ return PTR_ERR(node);
node->name = "External Memory Controller";
icc_node_add(node, &emc->provider);
@@ -304,10 +302,8 @@ static int tegra_emc_interconnect_init(struct tegra186_emc *emc)
remove_nodes:
icc_nodes_remove(&emc->provider);
-err_msg:
- dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
- return err;
+ return dev_err_probe(emc->dev, err, "failed to initialize ICC\n");
}
static int tegra186_emc_probe(struct platform_device *pdev)
@@ -322,12 +318,13 @@ static int tegra186_emc_probe(struct platform_device *pdev)
emc->bpmp = tegra_bpmp_get(&pdev->dev);
if (IS_ERR(emc->bpmp))
- return dev_err_probe(&pdev->dev, PTR_ERR(emc->bpmp), "failed to get BPMP\n");
+ return dev_err_probe(&pdev->dev, PTR_ERR(emc->bpmp),
+ "failed to get BPMP\n");
emc->clk = devm_clk_get(&pdev->dev, "emc");
if (IS_ERR(emc->clk)) {
- err = PTR_ERR(emc->clk);
- dev_err(&pdev->dev, "failed to get EMC clock: %d\n", err);
+ err = dev_err_probe(&pdev->dev, PTR_ERR(emc->clk),
+ "failed to get EMC clock\n");
goto put_bpmp;
}
@@ -359,7 +356,7 @@ static int tegra186_emc_probe(struct platform_device *pdev)
* EINVAL instead of passing the request to BPMP-FW later when the BW
* request is made by client with 'icc_set_bw()' call.
*/
- err = tegra_emc_interconnect_init(emc);
+ err = tegra186_emc_interconnect_init(emc);
if (err) {
mc->bpmp = NULL;
goto put_bpmp;
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 44ac55feacd3..398cb8ae2e38 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -232,7 +232,7 @@ struct tegra_emc {
bool mrr_error;
};
-static irqreturn_t tegra_emc_isr(int irq, void *data)
+static irqreturn_t tegra20_emc_isr(int irq, void *data)
{
struct tegra_emc *emc = data;
u32 intmask = EMC_REFRESH_OVERFLOW_INT;
@@ -253,8 +253,8 @@ static irqreturn_t tegra_emc_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static struct emc_timing *tegra_emc_find_timing(struct tegra_emc *emc,
- unsigned long rate)
+static struct emc_timing *tegra20_emc_find_timing(struct tegra_emc *emc,
+ unsigned long rate)
{
struct emc_timing *timing = NULL;
unsigned int i;
@@ -276,7 +276,7 @@ static struct emc_timing *tegra_emc_find_timing(struct tegra_emc *emc,
static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
{
- struct emc_timing *timing = tegra_emc_find_timing(emc, rate);
+ struct emc_timing *timing = tegra20_emc_find_timing(emc, rate);
unsigned int i;
if (!timing)
@@ -321,8 +321,8 @@ static int emc_complete_timing_change(struct tegra_emc *emc, bool flush)
return 0;
}
-static int tegra_emc_clk_change_notify(struct notifier_block *nb,
- unsigned long msg, void *data)
+static int tegra20_emc_clk_change_notify(struct notifier_block *nb,
+ unsigned long msg, void *data)
{
struct tegra_emc *emc = container_of(nb, struct tegra_emc, clk_nb);
struct clk_notifier_data *cnd = data;
@@ -407,8 +407,8 @@ static int cmp_timings(const void *_a, const void *_b)
return 0;
}
-static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
- struct device_node *node)
+static int tegra20_emc_load_timings_from_dt(struct tegra_emc *emc,
+ struct device_node *node)
{
struct emc_timing *timing;
int child_count;
@@ -452,7 +452,7 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
}
static struct device_node *
-tegra_emc_find_node_by_ram_code(struct tegra_emc *emc)
+tegra20_emc_find_node_by_ram_code(struct tegra_emc *emc)
{
struct device *dev = emc->dev;
struct device_node *np;
@@ -710,7 +710,7 @@ static long emc_round_rate(unsigned long rate,
return timing->rate;
}
-static void tegra_emc_rate_requests_init(struct tegra_emc *emc)
+static void tegra20_emc_rate_requests_init(struct tegra_emc *emc)
{
unsigned int i;
@@ -812,7 +812,7 @@ static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate,
* valid range.
*/
-static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
+static bool tegra20_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
{
unsigned int i;
@@ -823,7 +823,7 @@ static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
return false;
}
-static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data)
+static int tegra20_emc_debug_available_rates_show(struct seq_file *s, void *data)
{
struct tegra_emc *emc = s->private;
const char *prefix = "";
@@ -838,9 +838,9 @@ static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data)
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(tegra_emc_debug_available_rates);
+DEFINE_SHOW_ATTRIBUTE(tegra20_emc_debug_available_rates);
-static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
+static int tegra20_emc_debug_min_rate_get(void *data, u64 *rate)
{
struct tegra_emc *emc = data;
@@ -849,12 +849,12 @@ static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
return 0;
}
-static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
+static int tegra20_emc_debug_min_rate_set(void *data, u64 rate)
{
struct tegra_emc *emc = data;
int err;
- if (!tegra_emc_validate_rate(emc, rate))
+ if (!tegra20_emc_validate_rate(emc, rate))
return -EINVAL;
err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG);
@@ -866,11 +866,11 @@ static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
- tegra_emc_debug_min_rate_get,
- tegra_emc_debug_min_rate_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(tegra20_emc_debug_min_rate_fops,
+ tegra20_emc_debug_min_rate_get,
+ tegra20_emc_debug_min_rate_set, "%llu\n");
-static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
+static int tegra20_emc_debug_max_rate_get(void *data, u64 *rate)
{
struct tegra_emc *emc = data;
@@ -879,12 +879,12 @@ static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
return 0;
}
-static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
+static int tegra20_emc_debug_max_rate_set(void *data, u64 rate)
{
struct tegra_emc *emc = data;
int err;
- if (!tegra_emc_validate_rate(emc, rate))
+ if (!tegra20_emc_validate_rate(emc, rate))
return -EINVAL;
err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG);
@@ -896,11 +896,11 @@ static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
- tegra_emc_debug_max_rate_get,
- tegra_emc_debug_max_rate_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(tegra20_emc_debug_max_rate_fops,
+ tegra20_emc_debug_max_rate_get,
+ tegra20_emc_debug_max_rate_set, "%llu\n");
-static void tegra_emc_debugfs_init(struct tegra_emc *emc)
+static void tegra20_emc_debugfs_init(struct tegra_emc *emc)
{
struct device *dev = emc->dev;
unsigned int i;
@@ -933,11 +933,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
emc->debugfs.root = debugfs_create_dir("emc", NULL);
debugfs_create_file("available_rates", 0444, emc->debugfs.root,
- emc, &tegra_emc_debug_available_rates_fops);
+ emc, &tegra20_emc_debug_available_rates_fops);
debugfs_create_file("min_rate", 0644, emc->debugfs.root,
- emc, &tegra_emc_debug_min_rate_fops);
+ emc, &tegra20_emc_debug_min_rate_fops);
debugfs_create_file("max_rate", 0644, emc->debugfs.root,
- emc, &tegra_emc_debug_max_rate_fops);
+ emc, &tegra20_emc_debug_max_rate_fops);
}
static inline struct tegra_emc *
@@ -1000,7 +1000,7 @@ static int emc_icc_set(struct icc_node *src, struct icc_node *dst)
return 0;
}
-static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+static int tegra20_emc_interconnect_init(struct tegra_emc *emc)
{
const struct tegra_mc_soc *soc;
struct icc_node *node;
@@ -1022,10 +1022,8 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
/* create External Memory Controller node */
node = icc_node_create(TEGRA_ICC_EMC);
- if (IS_ERR(node)) {
- err = PTR_ERR(node);
- goto err_msg;
- }
+ if (IS_ERR(node))
+ return PTR_ERR(node);
node->name = "External Memory Controller";
icc_node_add(node, &emc->provider);
@@ -1053,57 +1051,52 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
remove_nodes:
icc_nodes_remove(&emc->provider);
-err_msg:
- dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
- return err;
+ return dev_err_probe(emc->dev, err, "failed to initialize ICC\n");
}
-static void devm_tegra_emc_unset_callback(void *data)
+static void devm_tegra20_emc_unset_callback(void *data)
{
tegra20_clk_set_emc_round_callback(NULL, NULL);
}
-static void devm_tegra_emc_unreg_clk_notifier(void *data)
+static void devm_tegra20_emc_unreg_clk_notifier(void *data)
{
struct tegra_emc *emc = data;
clk_notifier_unregister(emc->clk, &emc->clk_nb);
}
-static int tegra_emc_init_clk(struct tegra_emc *emc)
+static int tegra20_emc_init_clk(struct tegra_emc *emc)
{
int err;
tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
- err = devm_add_action_or_reset(emc->dev, devm_tegra_emc_unset_callback,
+ err = devm_add_action_or_reset(emc->dev, devm_tegra20_emc_unset_callback,
NULL);
if (err)
return err;
emc->clk = devm_clk_get(emc->dev, NULL);
- if (IS_ERR(emc->clk)) {
- dev_err(emc->dev, "failed to get EMC clock: %pe\n", emc->clk);
- return PTR_ERR(emc->clk);
- }
+ if (IS_ERR(emc->clk))
+ return dev_err_probe(emc->dev, PTR_ERR(emc->clk),
+ "failed to get EMC clock\n");
err = clk_notifier_register(emc->clk, &emc->clk_nb);
- if (err) {
- dev_err(emc->dev, "failed to register clk notifier: %d\n", err);
- return err;
- }
+ if (err)
+ return dev_err_probe(emc->dev, err, "failed to register clk notifier\n");
err = devm_add_action_or_reset(emc->dev,
- devm_tegra_emc_unreg_clk_notifier, emc);
+ devm_tegra20_emc_unreg_clk_notifier, emc);
if (err)
return err;
return 0;
}
-static int tegra_emc_devfreq_target(struct device *dev, unsigned long *freq,
- u32 flags)
+static int tegra20_emc_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
{
struct tegra_emc *emc = dev_get_drvdata(dev);
struct dev_pm_opp *opp;
@@ -1121,8 +1114,8 @@ static int tegra_emc_devfreq_target(struct device *dev, unsigned long *freq,
return emc_set_min_rate(emc, rate, EMC_RATE_DEVFREQ);
}
-static int tegra_emc_devfreq_get_dev_status(struct device *dev,
- struct devfreq_dev_status *stat)
+static int tegra20_emc_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
{
struct tegra_emc *emc = dev_get_drvdata(dev);
@@ -1144,13 +1137,13 @@ static int tegra_emc_devfreq_get_dev_status(struct device *dev,
return 0;
}
-static struct devfreq_dev_profile tegra_emc_devfreq_profile = {
+static struct devfreq_dev_profile tegra20_emc_devfreq_profile = {
.polling_ms = 30,
- .target = tegra_emc_devfreq_target,
- .get_dev_status = tegra_emc_devfreq_get_dev_status,
+ .target = tegra20_emc_devfreq_target,
+ .get_dev_status = tegra20_emc_devfreq_get_dev_status,
};
-static int tegra_emc_devfreq_init(struct tegra_emc *emc)
+static int tegra20_emc_devfreq_init(struct tegra_emc *emc)
{
struct devfreq *devfreq;
@@ -1172,18 +1165,17 @@ static int tegra_emc_devfreq_init(struct tegra_emc *emc)
writel_relaxed(0x00000000, emc->regs + EMC_STAT_LLMC_CONTROL);
writel_relaxed(0xffffffff, emc->regs + EMC_STAT_PWR_CLOCK_LIMIT);
- devfreq = devm_devfreq_add_device(emc->dev, &tegra_emc_devfreq_profile,
+ devfreq = devm_devfreq_add_device(emc->dev, &tegra20_emc_devfreq_profile,
DEVFREQ_GOV_SIMPLE_ONDEMAND,
&emc->ondemand_data);
- if (IS_ERR(devfreq)) {
- dev_err(emc->dev, "failed to initialize devfreq: %pe", devfreq);
- return PTR_ERR(devfreq);
- }
+ if (IS_ERR(devfreq))
+ return dev_err_probe(emc->dev, PTR_ERR(devfreq),
+ "failed to initialize devfreq\n");
return 0;
}
-static int tegra_emc_probe(struct platform_device *pdev)
+static int tegra20_emc_probe(struct platform_device *pdev)
{
struct tegra_core_opp_params opp_params = {};
struct device_node *np;
@@ -1199,7 +1191,7 @@ static int tegra_emc_probe(struct platform_device *pdev)
return -ENOMEM;
mutex_init(&emc->rate_lock);
- emc->clk_nb.notifier_call = tegra_emc_clk_change_notify;
+ emc->clk_nb.notifier_call = tegra20_emc_clk_change_notify;
emc->dev = &pdev->dev;
emc->regs = devm_platform_ioremap_resource(pdev, 0);
@@ -1210,22 +1202,22 @@ static int tegra_emc_probe(struct platform_device *pdev)
if (err)
return err;
- np = tegra_emc_find_node_by_ram_code(emc);
+ np = tegra20_emc_find_node_by_ram_code(emc);
if (np) {
- err = tegra_emc_load_timings_from_dt(emc, np);
+ err = tegra20_emc_load_timings_from_dt(emc, np);
of_node_put(np);
if (err)
return err;
}
- err = devm_request_irq(&pdev->dev, irq, tegra_emc_isr, 0,
+ err = devm_request_irq(&pdev->dev, irq, tegra20_emc_isr, 0,
dev_name(&pdev->dev), emc);
if (err) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
return err;
}
- err = tegra_emc_init_clk(emc);
+ err = tegra20_emc_init_clk(emc);
if (err)
return err;
@@ -1236,10 +1228,10 @@ static int tegra_emc_probe(struct platform_device *pdev)
return err;
platform_set_drvdata(pdev, emc);
- tegra_emc_rate_requests_init(emc);
- tegra_emc_debugfs_init(emc);
- tegra_emc_interconnect_init(emc);
- tegra_emc_devfreq_init(emc);
+ tegra20_emc_rate_requests_init(emc);
+ tegra20_emc_debugfs_init(emc);
+ tegra20_emc_interconnect_init(emc);
+ tegra20_emc_devfreq_init(emc);
/*
* Don't allow the kernel module to be unloaded. Unloading adds some
@@ -1251,22 +1243,22 @@ static int tegra_emc_probe(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id tegra_emc_of_match[] = {
+static const struct of_device_id tegra20_emc_of_match[] = {
{ .compatible = "nvidia,tegra20-emc", },
{},
};
-MODULE_DEVICE_TABLE(of, tegra_emc_of_match);
+MODULE_DEVICE_TABLE(of, tegra20_emc_of_match);
-static struct platform_driver tegra_emc_driver = {
- .probe = tegra_emc_probe,
+static struct platform_driver tegra20_emc_driver = {
+ .probe = tegra20_emc_probe,
.driver = {
.name = "tegra20-emc",
- .of_match_table = tegra_emc_of_match,
+ .of_match_table = tegra20_emc_of_match,
.suppress_bind_attrs = true,
.sync_state = icc_sync_state,
},
};
-module_platform_driver(tegra_emc_driver);
+module_platform_driver(tegra20_emc_driver);
MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
MODULE_DESCRIPTION("NVIDIA Tegra20 EMC driver");
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index 921dce1b8bc6..914116d8ec16 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -413,7 +413,7 @@ static int emc_seq_update_timing(struct tegra_emc *emc)
return 0;
}
-static irqreturn_t tegra_emc_isr(int irq, void *data)
+static irqreturn_t tegra30_emc_isr(int irq, void *data)
{
struct tegra_emc *emc = data;
u32 intmask = EMC_REFRESH_OVERFLOW_INT;
@@ -1228,7 +1228,7 @@ static long emc_round_rate(unsigned long rate,
return timing->rate;
}
-static void tegra_emc_rate_requests_init(struct tegra_emc *emc)
+static void tegra30_emc_rate_requests_init(struct tegra_emc *emc)
{
unsigned int i;
@@ -1330,7 +1330,7 @@ static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate,
* valid range.
*/
-static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
+static bool tegra30_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
{
unsigned int i;
@@ -1341,7 +1341,7 @@ static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
return false;
}
-static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data)
+static int tegra30_emc_debug_available_rates_show(struct seq_file *s, void *data)
{
struct tegra_emc *emc = s->private;
const char *prefix = "";
@@ -1356,9 +1356,9 @@ static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data)
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(tegra_emc_debug_available_rates);
+DEFINE_SHOW_ATTRIBUTE(tegra30_emc_debug_available_rates);
-static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
+static int tegra30_emc_debug_min_rate_get(void *data, u64 *rate)
{
struct tegra_emc *emc = data;
@@ -1367,12 +1367,12 @@ static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
return 0;
}
-static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
+static int tegra30_emc_debug_min_rate_set(void *data, u64 rate)
{
struct tegra_emc *emc = data;
int err;
- if (!tegra_emc_validate_rate(emc, rate))
+ if (!tegra30_emc_validate_rate(emc, rate))
return -EINVAL;
err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG);
@@ -1384,11 +1384,11 @@ static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
- tegra_emc_debug_min_rate_get,
- tegra_emc_debug_min_rate_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(tegra30_emc_debug_min_rate_fops,
+ tegra30_emc_debug_min_rate_get,
+ tegra30_emc_debug_min_rate_set, "%llu\n");
-static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
+static int tegra30_emc_debug_max_rate_get(void *data, u64 *rate)
{
struct tegra_emc *emc = data;
@@ -1397,12 +1397,12 @@ static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
return 0;
}
-static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
+static int tegra30_emc_debug_max_rate_set(void *data, u64 rate)
{
struct tegra_emc *emc = data;
int err;
- if (!tegra_emc_validate_rate(emc, rate))
+ if (!tegra30_emc_validate_rate(emc, rate))
return -EINVAL;
err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG);
@@ -1414,11 +1414,11 @@ static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
- tegra_emc_debug_max_rate_get,
- tegra_emc_debug_max_rate_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(tegra30_emc_debug_max_rate_fops,
+ tegra30_emc_debug_max_rate_get,
+ tegra30_emc_debug_max_rate_set, "%llu\n");
-static void tegra_emc_debugfs_init(struct tegra_emc *emc)
+static void tegra30_emc_debugfs_init(struct tegra_emc *emc)
{
struct device *dev = emc->dev;
unsigned int i;
@@ -1451,11 +1451,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
emc->debugfs.root = debugfs_create_dir("emc", NULL);
debugfs_create_file("available_rates", 0444, emc->debugfs.root,
- emc, &tegra_emc_debug_available_rates_fops);
+ emc, &tegra30_emc_debug_available_rates_fops);
debugfs_create_file("min_rate", 0644, emc->debugfs.root,
- emc, &tegra_emc_debug_min_rate_fops);
+ emc, &tegra30_emc_debug_min_rate_fops);
debugfs_create_file("max_rate", 0644, emc->debugfs.root,
- emc, &tegra_emc_debug_max_rate_fops);
+ emc, &tegra30_emc_debug_max_rate_fops);
}
static inline struct tegra_emc *
@@ -1518,7 +1518,7 @@ static int emc_icc_set(struct icc_node *src, struct icc_node *dst)
return 0;
}
-static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+static int tegra30_emc_interconnect_init(struct tegra_emc *emc)
{
const struct tegra_mc_soc *soc = emc->mc->soc;
struct icc_node *node;
@@ -1534,10 +1534,8 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
/* create External Memory Controller node */
node = icc_node_create(TEGRA_ICC_EMC);
- if (IS_ERR(node)) {
- err = PTR_ERR(node);
- goto err_msg;
- }
+ if (IS_ERR(node))
+ return PTR_ERR(node);
node->name = "External Memory Controller";
icc_node_add(node, &emc->provider);
@@ -1565,56 +1563,51 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
remove_nodes:
icc_nodes_remove(&emc->provider);
-err_msg:
- dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
- return err;
+ return dev_err_probe(emc->dev, err, "failed to initialize ICC\n");
}
-static void devm_tegra_emc_unset_callback(void *data)
+static void devm_tegra30_emc_unset_callback(void *data)
{
tegra20_clk_set_emc_round_callback(NULL, NULL);
}
-static void devm_tegra_emc_unreg_clk_notifier(void *data)
+static void devm_tegra30_emc_unreg_clk_notifier(void *data)
{
struct tegra_emc *emc = data;
clk_notifier_unregister(emc->clk, &emc->clk_nb);
}
-static int tegra_emc_init_clk(struct tegra_emc *emc)
+static int tegra30_emc_init_clk(struct tegra_emc *emc)
{
int err;
tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
- err = devm_add_action_or_reset(emc->dev, devm_tegra_emc_unset_callback,
+ err = devm_add_action_or_reset(emc->dev, devm_tegra30_emc_unset_callback,
NULL);
if (err)
return err;
emc->clk = devm_clk_get(emc->dev, NULL);
- if (IS_ERR(emc->clk)) {
- dev_err(emc->dev, "failed to get EMC clock: %pe\n", emc->clk);
- return PTR_ERR(emc->clk);
- }
+ if (IS_ERR(emc->clk))
+ return dev_err_probe(emc->dev, PTR_ERR(emc->clk),
+ "failed to get EMC clock\n");
err = clk_notifier_register(emc->clk, &emc->clk_nb);
- if (err) {
- dev_err(emc->dev, "failed to register clk notifier: %d\n", err);
- return err;
- }
+ if (err)
+ return dev_err_probe(emc->dev, err, "failed to register clk notifier\n");
err = devm_add_action_or_reset(emc->dev,
- devm_tegra_emc_unreg_clk_notifier, emc);
+ devm_tegra30_emc_unreg_clk_notifier, emc);
if (err)
return err;
return 0;
}
-static int tegra_emc_probe(struct platform_device *pdev)
+static int tegra30_emc_probe(struct platform_device *pdev)
{
struct tegra_core_opp_params opp_params = {};
struct device_node *np;
@@ -1655,14 +1648,12 @@ static int tegra_emc_probe(struct platform_device *pdev)
emc->irq = err;
- err = devm_request_irq(&pdev->dev, emc->irq, tegra_emc_isr, 0,
+ err = devm_request_irq(&pdev->dev, emc->irq, tegra30_emc_isr, 0,
dev_name(&pdev->dev), emc);
- if (err) {
- dev_err(&pdev->dev, "failed to request irq: %d\n", err);
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "failed to request irq\n");
- err = tegra_emc_init_clk(emc);
+ err = tegra30_emc_init_clk(emc);
if (err)
return err;
@@ -1673,9 +1664,9 @@ static int tegra_emc_probe(struct platform_device *pdev)
return err;
platform_set_drvdata(pdev, emc);
- tegra_emc_rate_requests_init(emc);
- tegra_emc_debugfs_init(emc);
- tegra_emc_interconnect_init(emc);
+ tegra30_emc_rate_requests_init(emc);
+ tegra30_emc_debugfs_init(emc);
+ tegra30_emc_interconnect_init(emc);
/*
* Don't allow the kernel module to be unloaded. Unloading adds some
@@ -1687,7 +1678,7 @@ static int tegra_emc_probe(struct platform_device *pdev)
return 0;
}
-static int tegra_emc_suspend(struct device *dev)
+static int tegra30_emc_suspend(struct device *dev)
{
struct tegra_emc *emc = dev_get_drvdata(dev);
int err;
@@ -1708,7 +1699,7 @@ static int tegra_emc_suspend(struct device *dev)
return 0;
}
-static int tegra_emc_resume(struct device *dev)
+static int tegra30_emc_resume(struct device *dev)
{
struct tegra_emc *emc = dev_get_drvdata(dev);
@@ -1720,28 +1711,28 @@ static int tegra_emc_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops tegra_emc_pm_ops = {
- .suspend = tegra_emc_suspend,
- .resume = tegra_emc_resume,
+static const struct dev_pm_ops tegra30_emc_pm_ops = {
+ .suspend = tegra30_emc_suspend,
+ .resume = tegra30_emc_resume,
};
-static const struct of_device_id tegra_emc_of_match[] = {
+static const struct of_device_id tegra30_emc_of_match[] = {
{ .compatible = "nvidia,tegra30-emc", },
{},
};
-MODULE_DEVICE_TABLE(of, tegra_emc_of_match);
+MODULE_DEVICE_TABLE(of, tegra30_emc_of_match);
-static struct platform_driver tegra_emc_driver = {
- .probe = tegra_emc_probe,
+static struct platform_driver tegra30_emc_driver = {
+ .probe = tegra30_emc_probe,
.driver = {
.name = "tegra30-emc",
- .of_match_table = tegra_emc_of_match,
- .pm = &tegra_emc_pm_ops,
+ .of_match_table = tegra30_emc_of_match,
+ .pm = &tegra30_emc_pm_ops,
.suppress_bind_attrs = true,
.sync_state = icc_sync_state,
},
};
-module_platform_driver(tegra_emc_driver);
+module_platform_driver(tegra30_emc_driver);
MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
MODULE_DESCRIPTION("NVIDIA Tegra30 EMC driver");
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 738bc4e60a18..e60a8d3947c9 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1857,7 +1857,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
ioc->reset_work_q =
- alloc_workqueue("mpt_poll_%d", WQ_MEM_RECLAIM, 0, ioc->id);
+ alloc_workqueue("mpt_poll_%d", WQ_MEM_RECLAIM | WQ_PERCPU, 0,
+ ioc->id);
if (!ioc->reset_work_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name);
@@ -1984,7 +1985,9 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ioc->fw_event_list);
spin_lock_init(&ioc->fw_event_lock);
- ioc->fw_event_q = alloc_workqueue("mpt/%d", WQ_MEM_RECLAIM, 0, ioc->id);
+ ioc->fw_event_q = alloc_workqueue("mpt/%d",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0,
+ ioc->id);
if (!ioc->fw_event_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index b9c11f67315f..9d1de68dee27 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -106,7 +106,7 @@ config PHANTOM
config RPMB
tristate "RPMB partition interface"
- depends on MMC
+ depends on MMC || SCSI_UFSHCD
help
Unified RPMB unit interface for RPMB capable devices such as eMMC and
UFS. Provides interface for in-kernel security controllers to access
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index fde3e17c836c..44ab929a1ad5 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -97,6 +97,25 @@ config OF_PMEM
Select Y if unsure.
+config RAMDAX
+ tristate "Support persistent memory interfaces on RAM carveouts"
+ depends on X86_PMEM_LEGACY || OF || COMPILE_TEST
+ default LIBNVDIMM
+ help
+ Allows creation of DAX devices on RAM carveouts.
+
+ Memory ranges that are manually specified by the
+ 'memmap=nn[KMG]!ss[KMG]' kernel command line or defined by dummy
+ pmem-region device tree nodes would be managed by this driver as DIMM
+ devices with support for dynamic layout of namespaces.
+ The driver steals 128K in the end of the memmap range for the
+ namespace management. This allows supporting up to 509 namespaces
+ (see 'ndctl create-namespace --help').
+ The driver should be force bound to e820_pmem or pmem-region platform
+ devices using 'driver_override' device attribute.
+
+ Select N if unsure.
+
config NVDIMM_KEYS
def_bool y
depends on ENCRYPTED_KEYS
diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile
index ba0296dca9db..8c268814936c 100644
--- a/drivers/nvdimm/Makefile
+++ b/drivers/nvdimm/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_ND_BTT) += nd_btt.o
obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o
obj-$(CONFIG_OF_PMEM) += of_pmem.o
obj-$(CONFIG_VIRTIO_PMEM) += virtio_pmem.o nd_virtio.o
+obj-$(CONFIG_RAMDAX) += ramdax.o
nd_pmem-y := pmem.o
diff --git a/drivers/nvdimm/ramdax.c b/drivers/nvdimm/ramdax.c
new file mode 100644
index 000000000000..954cb7919807
--- /dev/null
+++ b/drivers/nvdimm/ramdax.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Mike Rapoport, Microsoft
+ *
+ * Based on e820 pmem driver:
+ * Copyright (c) 2015, Christoph Hellwig.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+#include <linux/platform_device.h>
+#include <linux/memory_hotplug.h>
+#include <linux/libnvdimm.h>
+#include <linux/module.h>
+#include <linux/numa.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#include <uapi/linux/ndctl.h>
+
+#define LABEL_AREA_SIZE SZ_128K
+
+struct ramdax_dimm {
+ struct nvdimm *nvdimm;
+ void *label_area;
+};
+
+static void ramdax_remove(struct platform_device *pdev)
+{
+ struct nvdimm_bus *nvdimm_bus = platform_get_drvdata(pdev);
+
+ nvdimm_bus_unregister(nvdimm_bus);
+}
+
+static int ramdax_register_region(struct resource *res,
+ struct nvdimm *nvdimm,
+ struct nvdimm_bus *nvdimm_bus)
+{
+ struct nd_mapping_desc mapping;
+ struct nd_region_desc ndr_desc;
+ struct nd_interleave_set *nd_set;
+ int nid = phys_to_target_node(res->start);
+
+ nd_set = kzalloc(sizeof(*nd_set), GFP_KERNEL);
+ if (!nd_set)
+ return -ENOMEM;
+
+ nd_set->cookie1 = 0xcafebeefcafebeef;
+ nd_set->cookie2 = nd_set->cookie1;
+ nd_set->altcookie = nd_set->cookie1;
+
+ memset(&mapping, 0, sizeof(mapping));
+ mapping.nvdimm = nvdimm;
+ mapping.start = 0;
+ mapping.size = resource_size(res) - LABEL_AREA_SIZE;
+
+ memset(&ndr_desc, 0, sizeof(ndr_desc));
+ ndr_desc.res = res;
+ ndr_desc.numa_node = numa_map_to_online_node(nid);
+ ndr_desc.target_node = nid;
+ ndr_desc.num_mappings = 1;
+ ndr_desc.mapping = &mapping;
+ ndr_desc.nd_set = nd_set;
+
+ if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc))
+ goto err_free_nd_set;
+
+ return 0;
+
+err_free_nd_set:
+ kfree(nd_set);
+ return -ENXIO;
+}
+
+static int ramdax_register_dimm(struct resource *res, void *data)
+{
+ resource_size_t start = res->start;
+ resource_size_t size = resource_size(res);
+ unsigned long flags = 0, cmd_mask = 0;
+ struct nvdimm_bus *nvdimm_bus = data;
+ struct ramdax_dimm *dimm;
+ int err;
+
+ dimm = kzalloc(sizeof(*dimm), GFP_KERNEL);
+ if (!dimm)
+ return -ENOMEM;
+
+ dimm->label_area = memremap(start + size - LABEL_AREA_SIZE,
+ LABEL_AREA_SIZE, MEMREMAP_WB);
+ if (!dimm->label_area) {
+ err = -ENOMEM;
+ goto err_free_dimm;
+ }
+
+ set_bit(NDD_LABELING, &flags);
+ set_bit(NDD_REGISTER_SYNC, &flags);
+ set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
+ set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
+ set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
+ dimm->nvdimm = nvdimm_create(nvdimm_bus, dimm,
+ /* dimm_attribute_groups */ NULL,
+ flags, cmd_mask, 0, NULL);
+ if (!dimm->nvdimm) {
+ err = -ENOMEM;
+ goto err_unmap_label;
+ }
+
+ err = ramdax_register_region(res, dimm->nvdimm, nvdimm_bus);
+ if (err)
+ goto err_remove_nvdimm;
+
+ return 0;
+
+err_remove_nvdimm:
+ nvdimm_delete(dimm->nvdimm);
+err_unmap_label:
+ memunmap(dimm->label_area);
+err_free_dimm:
+ kfree(dimm);
+ return err;
+}
+
+static int ramdax_get_config_size(struct nvdimm *nvdimm, int buf_len,
+ struct nd_cmd_get_config_size *cmd)
+{
+ if (sizeof(*cmd) > buf_len)
+ return -EINVAL;
+
+ *cmd = (struct nd_cmd_get_config_size){
+ .status = 0,
+ .config_size = LABEL_AREA_SIZE,
+ .max_xfer = 8,
+ };
+
+ return 0;
+}
+
+static int ramdax_get_config_data(struct nvdimm *nvdimm, int buf_len,
+ struct nd_cmd_get_config_data_hdr *cmd)
+{
+ struct ramdax_dimm *dimm = nvdimm_provider_data(nvdimm);
+
+ if (sizeof(*cmd) > buf_len)
+ return -EINVAL;
+ if (struct_size(cmd, out_buf, cmd->in_length) > buf_len)
+ return -EINVAL;
+ if (size_add(cmd->in_offset, cmd->in_length) > LABEL_AREA_SIZE)
+ return -EINVAL;
+
+ memcpy(cmd->out_buf, dimm->label_area + cmd->in_offset, cmd->in_length);
+
+ return 0;
+}
+
+static int ramdax_set_config_data(struct nvdimm *nvdimm, int buf_len,
+ struct nd_cmd_set_config_hdr *cmd)
+{
+ struct ramdax_dimm *dimm = nvdimm_provider_data(nvdimm);
+
+ if (sizeof(*cmd) > buf_len)
+ return -EINVAL;
+ if (struct_size(cmd, in_buf, cmd->in_length) > buf_len)
+ return -EINVAL;
+ if (size_add(cmd->in_offset, cmd->in_length) > LABEL_AREA_SIZE)
+ return -EINVAL;
+
+ memcpy(dimm->label_area + cmd->in_offset, cmd->in_buf, cmd->in_length);
+
+ return 0;
+}
+
+static int ramdax_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
+ void *buf, unsigned int buf_len)
+{
+ unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
+
+ if (!test_bit(cmd, &cmd_mask))
+ return -ENOTTY;
+
+ switch (cmd) {
+ case ND_CMD_GET_CONFIG_SIZE:
+ return ramdax_get_config_size(nvdimm, buf_len, buf);
+ case ND_CMD_GET_CONFIG_DATA:
+ return ramdax_get_config_data(nvdimm, buf_len, buf);
+ case ND_CMD_SET_CONFIG_DATA:
+ return ramdax_set_config_data(nvdimm, buf_len, buf);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int ramdax_ctl(struct nvdimm_bus_descriptor *nd_desc,
+ struct nvdimm *nvdimm, unsigned int cmd, void *buf,
+ unsigned int buf_len, int *cmd_rc)
+{
+ /*
+ * No firmware response to translate, let the transport error
+ * code take precedence.
+ */
+ *cmd_rc = 0;
+
+ if (!nvdimm)
+ return -ENOTTY;
+ return ramdax_nvdimm_ctl(nvdimm, cmd, buf, buf_len);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id ramdax_of_matches[] = {
+ { .compatible = "pmem-region", },
+ { },
+};
+#endif
+
+static int ramdax_probe_of(struct platform_device *pdev,
+ struct nvdimm_bus *bus, struct device_node *np)
+{
+ int err;
+
+ if (!of_match_node(ramdax_of_matches, np))
+ return -ENODEV;
+
+ for (int i = 0; i < pdev->num_resources; i++) {
+ err = ramdax_register_dimm(&pdev->resource[i], bus);
+ if (err)
+ goto err_unregister;
+ }
+
+ return 0;
+
+err_unregister:
+ /*
+ * FIXME: should we unregister the dimms that were registered
+ * successfully
+ */
+ return err;
+}
+
+static int ramdax_probe(struct platform_device *pdev)
+{
+ static struct nvdimm_bus_descriptor nd_desc;
+ struct device *dev = &pdev->dev;
+ struct nvdimm_bus *nvdimm_bus;
+ struct device_node *np;
+ int rc = -ENXIO;
+
+ nd_desc.provider_name = "ramdax";
+ nd_desc.module = THIS_MODULE;
+ nd_desc.ndctl = ramdax_ctl;
+ nvdimm_bus = nvdimm_bus_register(dev, &nd_desc);
+ if (!nvdimm_bus)
+ goto err;
+
+ np = dev_of_node(&pdev->dev);
+ if (np)
+ rc = ramdax_probe_of(pdev, nvdimm_bus, np);
+ else
+ rc = walk_iomem_res_desc(IORES_DESC_PERSISTENT_MEMORY_LEGACY,
+ IORESOURCE_MEM, 0, -1, nvdimm_bus,
+ ramdax_register_dimm);
+ if (rc)
+ goto err;
+
+ platform_set_drvdata(pdev, nvdimm_bus);
+
+ return 0;
+err:
+ nvdimm_bus_unregister(nvdimm_bus);
+ return rc;
+}
+
+static struct platform_driver ramdax_driver = {
+ .probe = ramdax_probe,
+ .remove = ramdax_remove,
+ .driver = {
+ .name = "ramdax",
+ },
+};
+
+module_platform_driver(ramdax_driver);
+
+MODULE_DESCRIPTION("NVDIMM support for e820 type-12 memory and OF pmem-region");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Microsoft Corporation");
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index cd9b52040d7b..53567f3ed427 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -110,7 +110,7 @@ static void nd_region_remove(struct device *dev)
* here is ok.
*/
if (cpu_cache_has_invalidate_memregion())
- cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
+ cpu_cache_invalidate_all();
}
static int child_notify(struct device *dev, void *data)
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index a5ceaf5db595..1220530a23b6 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -90,7 +90,7 @@ static int nd_region_invalidate_memregion(struct nd_region *nd_region)
}
}
- cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
+ cpu_cache_invalidate_all();
out:
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index 4adce8c38870..e41f6951ca0f 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -424,7 +424,7 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
* query.
*/
get_device(dev);
- queue_delayed_work(system_wq, &nvdimm->dwork, 0);
+ queue_delayed_work(system_percpu_wq, &nvdimm->dwork, 0);
}
return rc;
@@ -457,7 +457,7 @@ static void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
/* setup delayed work again */
tmo += 10;
- queue_delayed_work(system_wq, &nvdimm->dwork, tmo * HZ);
+ queue_delayed_work(system_percpu_wq, &nvdimm->dwork, tmo * HZ);
nvdimm->sec.overwrite_tmo = min(15U * 60U, tmo);
return;
}
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
index 1f51fbebd9fa..e07e7d4bf8b6 100644
--- a/drivers/nvme/common/auth.c
+++ b/drivers/nvme/common/auth.c
@@ -178,7 +178,7 @@ struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
if (!key)
return ERR_PTR(-ENOMEM);
- key_len = base64_decode(secret, allocated_len, key->key);
+ key_len = base64_decode(secret, allocated_len, key->key, true, BASE64_STD);
if (key_len < 0) {
pr_debug("base64 key decoding error %d\n",
key_len);
@@ -663,7 +663,7 @@ int nvme_auth_generate_digest(u8 hmac_id, u8 *psk, size_t psk_len,
if (ret)
goto out_free_digest;
- ret = base64_encode(digest, digest_len, enc);
+ ret = base64_encode(digest, digest_len, enc, true, BASE64_STD);
if (ret < hmac_len) {
ret = -ENOKEY;
goto out_free_digest;
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index feef537257d0..4e7071714356 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -517,10 +517,10 @@ static u32 hint_lookup[] = {
* ccio_io_pdir_entry - Initialize an I/O Pdir.
* @pdir_ptr: A pointer into I/O Pdir.
* @sid: The Space Identifier.
- * @vba: The virtual address.
+ * @pba: The physical address.
* @hints: The DMA Hint.
*
- * Given a virtual address (vba, arg2) and space id, (sid, arg1),
+ * Given a physical address (pba, arg2) and space id, (sid, arg1),
* load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir
* entry consists of 8 bytes as shown below (MSB == bit 0):
*
@@ -543,7 +543,7 @@ static u32 hint_lookup[] = {
* index are bits 12:19 of the value returned by LCI.
*/
static void
-ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
+ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, phys_addr_t pba,
unsigned long hints)
{
register unsigned long pa;
@@ -557,7 +557,7 @@ ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
** "hints" parm includes the VALID bit!
** "dep" clobbers the physical address offset bits as well.
*/
- pa = lpa(vba);
+ pa = pba;
asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
((u32 *)pdir_ptr)[1] = (u32) pa;
@@ -582,7 +582,7 @@ ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
** Grab virtual index [0:11]
** Deposit virt_idx bits into I/O PDIR word
*/
- asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba));
+ asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pba)));
asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
@@ -704,14 +704,14 @@ ccio_dma_supported(struct device *dev, u64 mask)
/**
* ccio_map_single - Map an address range into the IOMMU.
* @dev: The PCI device.
- * @addr: The start address of the DMA region.
+ * @addr: The physical address of the DMA region.
* @size: The length of the DMA region.
* @direction: The direction of the DMA transaction (to/from device).
*
* This function implements the pci_map_single function.
*/
static dma_addr_t
-ccio_map_single(struct device *dev, void *addr, size_t size,
+ccio_map_single(struct device *dev, phys_addr_t addr, size_t size,
enum dma_data_direction direction)
{
int idx;
@@ -730,7 +730,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
BUG_ON(size <= 0);
/* save offset bits */
- offset = ((unsigned long) addr) & ~IOVP_MASK;
+ offset = offset_in_page(addr);
/* round up to nearest IOVP_SIZE */
size = ALIGN(size + offset, IOVP_SIZE);
@@ -746,15 +746,15 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
pdir_start = &(ioc->pdir_base[idx]);
- DBG_RUN("%s() %px -> %#lx size: %zu\n",
- __func__, addr, (long)(iovp | offset), size);
+ DBG_RUN("%s() %pa -> %#lx size: %zu\n",
+ __func__, &addr, (long)(iovp | offset), size);
/* If not cacheline aligned, force SAFE_DMA on the whole mess */
- if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
+ if ((size % L1_CACHE_BYTES) || (addr % L1_CACHE_BYTES))
hint |= HINT_SAFE_DMA;
while(size > 0) {
- ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long)addr, hint);
+ ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, addr, hint);
DBG_RUN(" pdir %p %08x%08x\n",
pdir_start,
@@ -773,17 +773,18 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
static dma_addr_t
-ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs)
+ccio_map_phys(struct device *dev, phys_addr_t phys, size_t size,
+ enum dma_data_direction direction, unsigned long attrs)
{
- return ccio_map_single(dev, page_address(page) + offset, size,
- direction);
+ if (unlikely(attrs & DMA_ATTR_MMIO))
+ return DMA_MAPPING_ERROR;
+
+ return ccio_map_single(dev, phys, size, direction);
}
/**
- * ccio_unmap_page - Unmap an address range from the IOMMU.
+ * ccio_unmap_phys - Unmap an address range from the IOMMU.
* @dev: The PCI device.
* @iova: The start address of the DMA region.
* @size: The length of the DMA region.
@@ -791,7 +792,7 @@ ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
* @attrs: attributes
*/
static void
-ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
+ccio_unmap_phys(struct device *dev, dma_addr_t iova, size_t size,
enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
@@ -853,7 +854,8 @@ ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
if (ret) {
memset(ret, 0, size);
- *dma_handle = ccio_map_single(dev, ret, size, DMA_BIDIRECTIONAL);
+ *dma_handle = ccio_map_single(dev, virt_to_phys(ret), size,
+ DMA_BIDIRECTIONAL);
}
return ret;
@@ -873,7 +875,7 @@ static void
ccio_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle, unsigned long attrs)
{
- ccio_unmap_page(dev, dma_handle, size, 0, 0);
+ ccio_unmap_phys(dev, dma_handle, size, 0, 0);
free_pages((unsigned long)cpu_addr, get_order(size));
}
@@ -920,7 +922,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
/* Fast path single entry scatterlists. */
if (nents == 1) {
sg_dma_address(sglist) = ccio_map_single(dev,
- sg_virt(sglist), sglist->length,
+ sg_phys(sglist), sglist->length,
direction);
sg_dma_len(sglist) = sglist->length;
return 1;
@@ -1004,7 +1006,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
#ifdef CCIO_COLLECT_STATS
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
#endif
- ccio_unmap_page(dev, sg_dma_address(sglist),
+ ccio_unmap_phys(dev, sg_dma_address(sglist),
sg_dma_len(sglist), direction, 0);
++sglist;
nents--;
@@ -1017,8 +1019,8 @@ static const struct dma_map_ops ccio_ops = {
.dma_supported = ccio_dma_supported,
.alloc = ccio_alloc,
.free = ccio_free,
- .map_page = ccio_map_page,
- .unmap_page = ccio_unmap_page,
+ .map_phys = ccio_map_phys,
+ .unmap_phys = ccio_unmap_phys,
.map_sg = ccio_map_sg,
.unmap_sg = ccio_unmap_sg,
.get_sgtable = dma_common_get_sgtable,
@@ -1072,7 +1074,7 @@ static int ccio_proc_info(struct seq_file *m, void *p)
ioc->msingle_calls, ioc->msingle_pages,
(int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
- /* KLUGE - unmap_sg calls unmap_page for each mapped page */
+ /* KLUGE - unmap_sg calls unmap_phys for each mapped page */
min = ioc->usingle_calls - ioc->usg_calls;
max = ioc->usingle_pages - ioc->usg_pages;
seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
index a0daaa548bc3..8ba778170447 100644
--- a/drivers/parisc/gsc.c
+++ b/drivers/parisc/gsc.c
@@ -154,7 +154,9 @@ static int gsc_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
gsc_dev->eim = ((u32) gsc_dev->gsc_irq.txn_addr) | gsc_dev->gsc_irq.txn_data;
/* switch IRQ's for devices below LASI/WAX to other CPU */
- gsc_writel(gsc_dev->eim, gsc_dev->hpa + OFFSET_IAR);
+ /* ASP chip (svers 0x70) does not support reprogramming */
+ if (gsc_dev->gsc->id.sversion != 0x70)
+ gsc_writel(gsc_dev->eim, gsc_dev->hpa + OFFSET_IAR);
irq_data_update_effective_affinity(d, &tmask);
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
index c43f1a212a5c..0691884f5095 100644
--- a/drivers/parisc/iommu-helpers.h
+++ b/drivers/parisc/iommu-helpers.h
@@ -14,7 +14,7 @@
static inline unsigned int
iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
unsigned long hint,
- void (*iommu_io_pdir_entry)(__le64 *, space_t, unsigned long,
+ void (*iommu_io_pdir_entry)(__le64 *, space_t, phys_addr_t,
unsigned long))
{
struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
@@ -28,7 +28,7 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
dma_sg--;
while (nents-- > 0) {
- unsigned long vaddr;
+ phys_addr_t paddr;
long size;
DBG_RUN_SG(" %d : %08lx %p/%05x\n", nents,
@@ -67,7 +67,7 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
BUG_ON(pdirp == NULL);
- vaddr = (unsigned long)sg_virt(startsg);
+ paddr = sg_phys(startsg);
sg_dma_len(dma_sg) += startsg->length;
size = startsg->length + dma_offset;
dma_offset = 0;
@@ -76,8 +76,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
#endif
do {
iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
- vaddr, hint);
- vaddr += IOVP_SIZE;
+ paddr, hint);
+ paddr += IOVP_SIZE;
size -= IOVP_SIZE;
pdirp++;
} while(unlikely(size > 0));
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index fc3863c09f83..a6eb6bffa5ea 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -532,7 +532,7 @@ typedef unsigned long space_t;
* sba_io_pdir_entry - fill in one IO PDIR entry
* @pdir_ptr: pointer to IO PDIR entry
* @sid: process Space ID - currently only support KERNEL_SPACE
- * @vba: Virtual CPU address of buffer to map
+ * @pba: Physical address of buffer to map
* @hint: DMA hint set to use for this mapping
*
* SBA Mapping Routine
@@ -569,20 +569,17 @@ typedef unsigned long space_t;
*/
static void
-sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
+sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, phys_addr_t pba,
unsigned long hint)
{
- u64 pa; /* physical address */
register unsigned ci; /* coherent index */
- pa = lpa(vba);
- pa &= IOVP_MASK;
+ asm("lci 0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pba)));
+ pba &= IOVP_MASK;
+ pba |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
- asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
- pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
-
- pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
- *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */
+ pba |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
+ *pdir_ptr = cpu_to_le64(pba); /* swap and store into I/O Pdir */
/*
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
@@ -707,7 +704,7 @@ static int sba_dma_supported( struct device *dev, u64 mask)
* See Documentation/core-api/dma-api-howto.rst
*/
static dma_addr_t
-sba_map_single(struct device *dev, void *addr, size_t size,
+sba_map_single(struct device *dev, phys_addr_t addr, size_t size,
enum dma_data_direction direction)
{
struct ioc *ioc;
@@ -722,7 +719,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
return DMA_MAPPING_ERROR;
/* save offset bits */
- offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
+ offset = offset_in_page(addr);
/* round up to nearest IOVP_SIZE */
size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
@@ -739,13 +736,13 @@ sba_map_single(struct device *dev, void *addr, size_t size,
pide = sba_alloc_range(ioc, dev, size);
iovp = (dma_addr_t) pide << IOVP_SHIFT;
- DBG_RUN("%s() 0x%p -> 0x%lx\n",
- __func__, addr, (long) iovp | offset);
+ DBG_RUN("%s() 0x%pa -> 0x%lx\n",
+ __func__, &addr, (long) iovp | offset);
pdir_start = &(ioc->pdir_base[pide]);
while (size > 0) {
- sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
+ sba_io_pdir_entry(pdir_start, KERNEL_SPACE, addr, 0);
DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
pdir_start,
@@ -778,17 +775,18 @@ sba_map_single(struct device *dev, void *addr, size_t size,
static dma_addr_t
-sba_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs)
+sba_map_phys(struct device *dev, phys_addr_t phys, size_t size,
+ enum dma_data_direction direction, unsigned long attrs)
{
- return sba_map_single(dev, page_address(page) + offset, size,
- direction);
+ if (unlikely(attrs & DMA_ATTR_MMIO))
+ return DMA_MAPPING_ERROR;
+
+ return sba_map_single(dev, phys, size, direction);
}
/**
- * sba_unmap_page - unmap one IOVA and free resources
+ * sba_unmap_phys - unmap one IOVA and free resources
* @dev: instance of PCI owned by the driver that's asking.
* @iova: IOVA of driver buffer previously mapped.
* @size: number of bytes mapped in driver buffer.
@@ -798,7 +796,7 @@ sba_map_page(struct device *dev, struct page *page, unsigned long offset,
* See Documentation/core-api/dma-api-howto.rst
*/
static void
-sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
+sba_unmap_phys(struct device *dev, dma_addr_t iova, size_t size,
enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
@@ -893,7 +891,7 @@ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle
if (ret) {
memset(ret, 0, size);
- *dma_handle = sba_map_single(hwdev, ret, size, 0);
+ *dma_handle = sba_map_single(hwdev, virt_to_phys(ret), size, 0);
}
return ret;
@@ -914,7 +912,7 @@ static void
sba_free(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
- sba_unmap_page(hwdev, dma_handle, size, 0, 0);
+ sba_unmap_phys(hwdev, dma_handle, size, 0, 0);
free_pages((unsigned long) vaddr, get_order(size));
}
@@ -962,7 +960,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
/* Fast path single entry scatterlists. */
if (nents == 1) {
- sg_dma_address(sglist) = sba_map_single(dev, sg_virt(sglist),
+ sg_dma_address(sglist) = sba_map_single(dev, sg_phys(sglist),
sglist->length, direction);
sg_dma_len(sglist) = sglist->length;
return 1;
@@ -1061,7 +1059,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
while (nents && sg_dma_len(sglist)) {
- sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
+ sba_unmap_phys(dev, sg_dma_address(sglist), sg_dma_len(sglist),
direction, 0);
#ifdef SBA_COLLECT_STATS
ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
@@ -1085,8 +1083,8 @@ static const struct dma_map_ops sba_ops = {
.dma_supported = sba_dma_supported,
.alloc = sba_alloc,
.free = sba_free,
- .map_page = sba_map_page,
- .unmap_page = sba_unmap_page,
+ .map_phys = sba_map_phys,
+ .unmap_phys = sba_unmap_phys,
.map_sg = sba_map_sg,
.unmap_sg = sba_unmap_sg,
.get_sgtable = dma_common_get_sgtable,
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index f94f5d384362..00b0210e1f1d 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -122,6 +122,24 @@ config XEN_PCIDEV_FRONTEND
config PCI_ATS
bool
+config PCI_IDE
+ bool
+
+config PCI_TSM
+ bool "PCI TSM: Device security protocol support"
+ select PCI_IDE
+ select PCI_DOE
+ select TSM
+ help
+ The TEE (Trusted Execution Environment) Device Interface
+ Security Protocol (TDISP) defines a "TSM" as a platform agent
+ that manages device authentication, link encryption, link
+ integrity protection, and assignment of PCI device functions
+ (virtual or physical) to confidential computing VMs that can
+ access (DMA) guest private memory.
+
+ Enable a platform TSM driver to use this capability.
+
config PCI_DOE
bool "Enable PCI Data Object Exchange (DOE) support"
help
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index f3c81c892786..e10cfe5a280b 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -34,6 +34,8 @@ obj-$(CONFIG_PCI_P2PDMA) += p2pdma.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
obj-$(CONFIG_VGA_ARB) += vgaarb.o
obj-$(CONFIG_PCI_DOE) += doe.o
+obj-$(CONFIG_PCI_IDE) += ide.o
+obj-$(CONFIG_PCI_TSM) += tsm.o
obj-$(CONFIG_PCI_DYNAMIC_OF_NODES) += of_property.o
obj-$(CONFIG_PCI_NPEM) += npem.o
obj-$(CONFIG_PCIE_TPH) += tph.o
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 9daf13ed3714..4383a36fd6ca 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -8,6 +8,7 @@
*/
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/cleanup.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -435,6 +436,27 @@ static int __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void
return ret;
}
+static int __pci_walk_bus_reverse(struct pci_bus *top,
+ int (*cb)(struct pci_dev *, void *),
+ void *userdata)
+{
+ struct pci_dev *dev;
+ int ret = 0;
+
+ list_for_each_entry_reverse(dev, &top->devices, bus_list) {
+ if (dev->subordinate) {
+ ret = __pci_walk_bus_reverse(dev->subordinate, cb,
+ userdata);
+ if (ret)
+ break;
+ }
+ ret = cb(dev, userdata);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
/**
* pci_walk_bus - walk devices on/under bus, calling callback.
* @top: bus whose devices should be walked
@@ -456,6 +478,23 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void
}
EXPORT_SYMBOL_GPL(pci_walk_bus);
+/**
+ * pci_walk_bus_reverse - walk devices on/under bus, calling callback.
+ * @top: bus whose devices should be walked
+ * @cb: callback to be called for each device found
+ * @userdata: arbitrary pointer to be passed to callback
+ *
+ * Same semantics as pci_walk_bus(), but walks the bus in reverse order.
+ */
+void pci_walk_bus_reverse(struct pci_bus *top,
+ int (*cb)(struct pci_dev *, void *), void *userdata)
+{
+ down_read(&pci_bus_sem);
+ __pci_walk_bus_reverse(top, cb, userdata);
+ up_read(&pci_bus_sem);
+}
+EXPORT_SYMBOL_GPL(pci_walk_bus_reverse);
+
void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
{
lockdep_assert_held(&pci_bus_sem);
diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
index aae9a8a00406..62be9c8dbc52 100644
--- a/drivers/pci/doe.c
+++ b/drivers/pci/doe.c
@@ -24,8 +24,6 @@
#include "pci.h"
-#define PCI_DOE_FEATURE_DISCOVERY 0
-
/* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */
#define PCI_DOE_TIMEOUT HZ
#define PCI_DOE_POLL_INTERVAL (PCI_DOE_TIMEOUT / 128)
diff --git a/drivers/pci/ide.c b/drivers/pci/ide.c
new file mode 100644
index 000000000000..f0ef474e1a0d
--- /dev/null
+++ b/drivers/pci/ide.c
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
+
+/* PCIe r7.0 section 6.33 Integrity & Data Encryption (IDE) */
+
+#define dev_fmt(fmt) "PCI/IDE: " fmt
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/pci.h>
+#include <linux/pci-ide.h>
+#include <linux/pci_regs.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/tsm.h>
+
+#include "pci.h"
+
+static int __sel_ide_offset(u16 ide_cap, u8 nr_link_ide, u8 stream_index,
+ u8 nr_ide_mem)
+{
+ u32 offset = ide_cap + PCI_IDE_LINK_STREAM_0 +
+ nr_link_ide * PCI_IDE_LINK_BLOCK_SIZE;
+
+ /*
+ * Assume a constant number of address association resources per stream
+ * index
+ */
+ return offset + stream_index * PCI_IDE_SEL_BLOCK_SIZE(nr_ide_mem);
+}
+
+static int sel_ide_offset(struct pci_dev *pdev,
+ struct pci_ide_partner *settings)
+{
+ return __sel_ide_offset(pdev->ide_cap, pdev->nr_link_ide,
+ settings->stream_index, pdev->nr_ide_mem);
+}
+
+static bool reserve_stream_index(struct pci_dev *pdev, u8 idx)
+{
+ int ret;
+
+ ret = ida_alloc_range(&pdev->ide_stream_ida, idx, idx, GFP_KERNEL);
+ return ret >= 0;
+}
+
+static bool reserve_stream_id(struct pci_host_bridge *hb, u8 id)
+{
+ int ret;
+
+ ret = ida_alloc_range(&hb->ide_stream_ids_ida, id, id, GFP_KERNEL);
+ return ret >= 0;
+}
+
+static bool claim_stream(struct pci_host_bridge *hb, u8 stream_id,
+ struct pci_dev *pdev, u8 stream_idx)
+{
+ dev_info(&hb->dev, "Stream ID %d active at init\n", stream_id);
+ if (!reserve_stream_id(hb, stream_id)) {
+ dev_info(&hb->dev, "Failed to claim %s Stream ID %d\n",
+ stream_id == PCI_IDE_RESERVED_STREAM_ID ? "reserved" :
+ "active",
+ stream_id);
+ return false;
+ }
+
+ /* No stream index to reserve in the Link IDE case */
+ if (!pdev)
+ return true;
+
+ if (!reserve_stream_index(pdev, stream_idx)) {
+ pci_info(pdev, "Failed to claim active Selective Stream %d\n",
+ stream_idx);
+ return false;
+ }
+
+ return true;
+}
+
+void pci_ide_init(struct pci_dev *pdev)
+{
+ struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
+ u16 nr_link_ide, nr_ide_mem, nr_streams;
+ u16 ide_cap;
+ u32 val;
+
+ /*
+ * Unconditionally init so that ida idle state is consistent with
+ * pdev->ide_cap.
+ */
+ ida_init(&pdev->ide_stream_ida);
+
+ if (!pci_is_pcie(pdev))
+ return;
+
+ ide_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_IDE);
+ if (!ide_cap)
+ return;
+
+ pci_read_config_dword(pdev, ide_cap + PCI_IDE_CAP, &val);
+ if ((val & PCI_IDE_CAP_SELECTIVE) == 0)
+ return;
+
+ /*
+ * Require endpoint IDE capability to be paired with IDE Root Port IDE
+ * capability.
+ */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ENDPOINT) {
+ struct pci_dev *rp = pcie_find_root_port(pdev);
+
+ if (!rp->ide_cap)
+ return;
+ }
+
+ pdev->ide_cfg = FIELD_GET(PCI_IDE_CAP_SEL_CFG, val);
+ pdev->ide_tee_limit = FIELD_GET(PCI_IDE_CAP_TEE_LIMITED, val);
+
+ if (val & PCI_IDE_CAP_LINK)
+ nr_link_ide = 1 + FIELD_GET(PCI_IDE_CAP_LINK_TC_NUM, val);
+ else
+ nr_link_ide = 0;
+
+ nr_ide_mem = 0;
+ nr_streams = 1 + FIELD_GET(PCI_IDE_CAP_SEL_NUM, val);
+ for (u16 i = 0; i < nr_streams; i++) {
+ int pos = __sel_ide_offset(ide_cap, nr_link_ide, i, nr_ide_mem);
+ int nr_assoc;
+ u32 val;
+ u8 id;
+
+ pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CAP, &val);
+
+ /*
+ * Let's not entertain streams that do not have a constant
+ * number of address association blocks
+ */
+ nr_assoc = FIELD_GET(PCI_IDE_SEL_CAP_ASSOC_NUM, val);
+ if (i && (nr_assoc != nr_ide_mem)) {
+ pci_info(pdev, "Unsupported Selective Stream %d capability, SKIP the rest\n", i);
+ nr_streams = i;
+ break;
+ }
+
+ nr_ide_mem = nr_assoc;
+
+ /*
+ * Claim Stream IDs and Selective Stream blocks that are already
+ * active on the device
+ */
+ pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CTL, &val);
+ id = FIELD_GET(PCI_IDE_SEL_CTL_ID, val);
+ if ((val & PCI_IDE_SEL_CTL_EN) &&
+ !claim_stream(hb, id, pdev, i))
+ return;
+ }
+
+ /* Reserve link stream-ids that are already active on the device */
+ for (u16 i = 0; i < nr_link_ide; ++i) {
+ int pos = ide_cap + PCI_IDE_LINK_STREAM_0 + i * PCI_IDE_LINK_BLOCK_SIZE;
+ u8 id;
+
+ pci_read_config_dword(pdev, pos + PCI_IDE_LINK_CTL_0, &val);
+ id = FIELD_GET(PCI_IDE_LINK_CTL_ID, val);
+ if ((val & PCI_IDE_LINK_CTL_EN) &&
+ !claim_stream(hb, id, NULL, -1))
+ return;
+ }
+
+ for (u16 i = 0; i < nr_streams; i++) {
+ int pos = __sel_ide_offset(ide_cap, nr_link_ide, i, nr_ide_mem);
+
+ pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CAP, &val);
+ if (val & PCI_IDE_SEL_CTL_EN)
+ continue;
+ val &= ~PCI_IDE_SEL_CTL_ID;
+ val |= FIELD_PREP(PCI_IDE_SEL_CTL_ID, PCI_IDE_RESERVED_STREAM_ID);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, val);
+ }
+
+ for (u16 i = 0; i < nr_link_ide; ++i) {
+ int pos = ide_cap + PCI_IDE_LINK_STREAM_0 +
+ i * PCI_IDE_LINK_BLOCK_SIZE;
+
+ pci_read_config_dword(pdev, pos, &val);
+ if (val & PCI_IDE_LINK_CTL_EN)
+ continue;
+ val &= ~PCI_IDE_LINK_CTL_ID;
+ val |= FIELD_PREP(PCI_IDE_LINK_CTL_ID, PCI_IDE_RESERVED_STREAM_ID);
+ pci_write_config_dword(pdev, pos, val);
+ }
+
+ pdev->ide_cap = ide_cap;
+ pdev->nr_link_ide = nr_link_ide;
+ pdev->nr_sel_ide = nr_streams;
+ pdev->nr_ide_mem = nr_ide_mem;
+}
+
+struct stream_index {
+ struct ida *ida;
+ u8 stream_index;
+};
+
+static void free_stream_index(struct stream_index *stream)
+{
+ ida_free(stream->ida, stream->stream_index);
+}
+
+DEFINE_FREE(free_stream, struct stream_index *, if (_T) free_stream_index(_T))
+static struct stream_index *alloc_stream_index(struct ida *ida, u16 max,
+ struct stream_index *stream)
+{
+ int id;
+
+ if (!max)
+ return NULL;
+
+ id = ida_alloc_max(ida, max - 1, GFP_KERNEL);
+ if (id < 0)
+ return NULL;
+
+ *stream = (struct stream_index) {
+ .ida = ida,
+ .stream_index = id,
+ };
+ return stream;
+}
+
+/**
+ * pci_ide_stream_alloc() - Reserve stream indices and probe for settings
+ * @pdev: IDE capable PCIe Endpoint Physical Function
+ *
+ * Retrieve the Requester ID range of @pdev for programming its Root
+ * Port IDE RID Association registers, and conversely retrieve the
+ * Requester ID of the Root Port for programming @pdev's IDE RID
+ * Association registers.
+ *
+ * Allocate a Selective IDE Stream Register Block instance per port.
+ *
+ * Allocate a platform stream resource from the associated host bridge.
+ * Retrieve stream association parameters for Requester ID range and
+ * address range restrictions for the stream.
+ */
+struct pci_ide *pci_ide_stream_alloc(struct pci_dev *pdev)
+{
+ /* EP, RP, + HB Stream allocation */
+ struct stream_index __stream[PCI_IDE_HB + 1];
+ struct pci_bus_region pref_assoc = { 0, -1 };
+ struct pci_bus_region mem_assoc = { 0, -1 };
+ struct resource *mem, *pref;
+ struct pci_host_bridge *hb;
+ struct pci_dev *rp, *br;
+ int num_vf, rid_end;
+
+ if (!pci_is_pcie(pdev))
+ return NULL;
+
+ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ENDPOINT)
+ return NULL;
+
+ if (!pdev->ide_cap)
+ return NULL;
+
+ struct pci_ide *ide __free(kfree) = kzalloc(sizeof(*ide), GFP_KERNEL);
+ if (!ide)
+ return NULL;
+
+ hb = pci_find_host_bridge(pdev->bus);
+ struct stream_index *hb_stream __free(free_stream) = alloc_stream_index(
+ &hb->ide_stream_ida, hb->nr_ide_streams, &__stream[PCI_IDE_HB]);
+ if (!hb_stream)
+ return NULL;
+
+ rp = pcie_find_root_port(pdev);
+ struct stream_index *rp_stream __free(free_stream) = alloc_stream_index(
+ &rp->ide_stream_ida, rp->nr_sel_ide, &__stream[PCI_IDE_RP]);
+ if (!rp_stream)
+ return NULL;
+
+ struct stream_index *ep_stream __free(free_stream) = alloc_stream_index(
+ &pdev->ide_stream_ida, pdev->nr_sel_ide, &__stream[PCI_IDE_EP]);
+ if (!ep_stream)
+ return NULL;
+
+ /* for SR-IOV case, cover all VFs */
+ num_vf = pci_num_vf(pdev);
+ if (num_vf)
+ rid_end = PCI_DEVID(pci_iov_virtfn_bus(pdev, num_vf),
+ pci_iov_virtfn_devfn(pdev, num_vf));
+ else
+ rid_end = pci_dev_id(pdev);
+
+ br = pci_upstream_bridge(pdev);
+ if (!br)
+ return NULL;
+
+ /*
+ * Check if the device consumes memory and/or prefetch-memory. Setup
+ * downstream address association ranges for each.
+ */
+ mem = pci_resource_n(br, PCI_BRIDGE_MEM_WINDOW);
+ pref = pci_resource_n(br, PCI_BRIDGE_PREF_MEM_WINDOW);
+ if (resource_assigned(mem))
+ pcibios_resource_to_bus(br->bus, &mem_assoc, mem);
+ if (resource_assigned(pref))
+ pcibios_resource_to_bus(br->bus, &pref_assoc, pref);
+
+ *ide = (struct pci_ide) {
+ .pdev = pdev,
+ .partner = {
+ [PCI_IDE_EP] = {
+ .rid_start = pci_dev_id(rp),
+ .rid_end = pci_dev_id(rp),
+ .stream_index = no_free_ptr(ep_stream)->stream_index,
+ /* Disable upstream address association */
+ .mem_assoc = { 0, -1 },
+ .pref_assoc = { 0, -1 },
+ },
+ [PCI_IDE_RP] = {
+ .rid_start = pci_dev_id(pdev),
+ .rid_end = rid_end,
+ .stream_index = no_free_ptr(rp_stream)->stream_index,
+ .mem_assoc = mem_assoc,
+ .pref_assoc = pref_assoc,
+ },
+ },
+ .host_bridge_stream = no_free_ptr(hb_stream)->stream_index,
+ .stream_id = -1,
+ };
+
+ return_ptr(ide);
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_alloc);
+
+/**
+ * pci_ide_stream_free() - unwind pci_ide_stream_alloc()
+ * @ide: idle IDE settings descriptor
+ *
+ * Free all of the stream index (register block) allocations acquired by
+ * pci_ide_stream_alloc(). The stream represented by @ide is assumed to
+ * be unregistered and not instantiated in any device.
+ */
+void pci_ide_stream_free(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_dev *rp = pcie_find_root_port(pdev);
+ struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
+
+ ida_free(&pdev->ide_stream_ida, ide->partner[PCI_IDE_EP].stream_index);
+ ida_free(&rp->ide_stream_ida, ide->partner[PCI_IDE_RP].stream_index);
+ ida_free(&hb->ide_stream_ida, ide->host_bridge_stream);
+ kfree(ide);
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_free);
+
+/**
+ * pci_ide_stream_release() - unwind and release an @ide context
+ * @ide: partially or fully registered IDE settings descriptor
+ *
+ * In support of automatic cleanup of IDE setup routines perform IDE
+ * teardown in expected reverse order of setup and with respect to which
+ * aspects of IDE setup have successfully completed.
+ *
+ * Be careful that setup order mirrors this shutdown order. Otherwise,
+ * open code releasing the IDE context.
+ */
+void pci_ide_stream_release(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_dev *rp = pcie_find_root_port(pdev);
+
+ if (ide->partner[PCI_IDE_RP].enable)
+ pci_ide_stream_disable(rp, ide);
+
+ if (ide->partner[PCI_IDE_EP].enable)
+ pci_ide_stream_disable(pdev, ide);
+
+ if (ide->tsm_dev)
+ tsm_ide_stream_unregister(ide);
+
+ if (ide->partner[PCI_IDE_RP].setup)
+ pci_ide_stream_teardown(rp, ide);
+
+ if (ide->partner[PCI_IDE_EP].setup)
+ pci_ide_stream_teardown(pdev, ide);
+
+ if (ide->name)
+ pci_ide_stream_unregister(ide);
+
+ pci_ide_stream_free(ide);
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_release);
+
+struct pci_ide_stream_id {
+ struct pci_host_bridge *hb;
+ u8 stream_id;
+};
+
+static struct pci_ide_stream_id *
+request_stream_id(struct pci_host_bridge *hb, u8 stream_id,
+ struct pci_ide_stream_id *sid)
+{
+ if (!reserve_stream_id(hb, stream_id))
+ return NULL;
+
+ *sid = (struct pci_ide_stream_id) {
+ .hb = hb,
+ .stream_id = stream_id,
+ };
+
+ return sid;
+}
+DEFINE_FREE(free_stream_id, struct pci_ide_stream_id *,
+ if (_T) ida_free(&_T->hb->ide_stream_ids_ida, _T->stream_id))
+
+/**
+ * pci_ide_stream_register() - Prepare to activate an IDE Stream
+ * @ide: IDE settings descriptor
+ *
+ * After a Stream ID has been acquired for @ide, record the presence of
+ * the stream in sysfs. The expectation is that @ide is immutable while
+ * registered.
+ */
+int pci_ide_stream_register(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
+ struct pci_ide_stream_id __sid;
+ u8 ep_stream, rp_stream;
+ int rc;
+
+ if (ide->stream_id < 0 || ide->stream_id > U8_MAX) {
+ pci_err(pdev, "Setup fail: Invalid Stream ID: %d\n", ide->stream_id);
+ return -ENXIO;
+ }
+
+ struct pci_ide_stream_id *sid __free(free_stream_id) =
+ request_stream_id(hb, ide->stream_id, &__sid);
+ if (!sid) {
+ pci_err(pdev, "Setup fail: Stream ID %d in use\n", ide->stream_id);
+ return -EBUSY;
+ }
+
+ ep_stream = ide->partner[PCI_IDE_EP].stream_index;
+ rp_stream = ide->partner[PCI_IDE_RP].stream_index;
+ const char *name __free(kfree) = kasprintf(GFP_KERNEL, "stream%d.%d.%d",
+ ide->host_bridge_stream,
+ rp_stream, ep_stream);
+ if (!name)
+ return -ENOMEM;
+
+ rc = sysfs_create_link(&hb->dev.kobj, &pdev->dev.kobj, name);
+ if (rc)
+ return rc;
+
+ ide->name = no_free_ptr(name);
+
+ /* Stream ID reservation recorded in @ide is now successfully registered */
+ retain_and_null_ptr(sid);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_register);
+
+/**
+ * pci_ide_stream_unregister() - unwind pci_ide_stream_register()
+ * @ide: idle IDE settings descriptor
+ *
+ * In preparation for freeing @ide, remove sysfs enumeration for the
+ * stream.
+ */
+void pci_ide_stream_unregister(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
+
+ sysfs_remove_link(&hb->dev.kobj, ide->name);
+ kfree(ide->name);
+ ida_free(&hb->ide_stream_ids_ida, ide->stream_id);
+ ide->name = NULL;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_unregister);
+
+static int pci_ide_domain(struct pci_dev *pdev)
+{
+ if (pdev->fm_enabled)
+ return pci_domain_nr(pdev->bus);
+ return 0;
+}
+
+struct pci_ide_partner *pci_ide_to_settings(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ if (!pci_is_pcie(pdev)) {
+ pci_warn_once(pdev, "not a PCIe device\n");
+ return NULL;
+ }
+
+ switch (pci_pcie_type(pdev)) {
+ case PCI_EXP_TYPE_ENDPOINT:
+ if (pdev != ide->pdev) {
+ pci_warn_once(pdev, "setup expected Endpoint: %s\n", pci_name(ide->pdev));
+ return NULL;
+ }
+ return &ide->partner[PCI_IDE_EP];
+ case PCI_EXP_TYPE_ROOT_PORT: {
+ struct pci_dev *rp = pcie_find_root_port(ide->pdev);
+
+ if (pdev != rp) {
+ pci_warn_once(pdev, "setup expected Root Port: %s\n",
+ pci_name(rp));
+ return NULL;
+ }
+ return &ide->partner[PCI_IDE_RP];
+ }
+ default:
+ pci_warn_once(pdev, "invalid device type\n");
+ return NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(pci_ide_to_settings);
+
+static void set_ide_sel_ctl(struct pci_dev *pdev, struct pci_ide *ide,
+ struct pci_ide_partner *settings, int pos,
+ bool enable)
+{
+ u32 val = FIELD_PREP(PCI_IDE_SEL_CTL_ID, ide->stream_id) |
+ FIELD_PREP(PCI_IDE_SEL_CTL_DEFAULT, settings->default_stream) |
+ FIELD_PREP(PCI_IDE_SEL_CTL_CFG_EN, pdev->ide_cfg) |
+ FIELD_PREP(PCI_IDE_SEL_CTL_TEE_LIMITED, pdev->ide_tee_limit) |
+ FIELD_PREP(PCI_IDE_SEL_CTL_EN, enable);
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, val);
+}
+
+#define SEL_ADDR1_LOWER GENMASK(31, 20)
+#define SEL_ADDR_UPPER GENMASK_ULL(63, 32)
+#define PREP_PCI_IDE_SEL_ADDR1(base, limit) \
+ (FIELD_PREP(PCI_IDE_SEL_ADDR_1_VALID, 1) | \
+ FIELD_PREP(PCI_IDE_SEL_ADDR_1_BASE_LOW, \
+ FIELD_GET(SEL_ADDR1_LOWER, (base))) | \
+ FIELD_PREP(PCI_IDE_SEL_ADDR_1_LIMIT_LOW, \
+ FIELD_GET(SEL_ADDR1_LOWER, (limit))))
+
+static void mem_assoc_to_regs(struct pci_bus_region *region,
+ struct pci_ide_regs *regs, int idx)
+{
+ /* convert to u64 range for bitfield size checks */
+ struct range r = { region->start, region->end };
+
+ regs->addr[idx].assoc1 = PREP_PCI_IDE_SEL_ADDR1(r.start, r.end);
+ regs->addr[idx].assoc2 = FIELD_GET(SEL_ADDR_UPPER, r.end);
+ regs->addr[idx].assoc3 = FIELD_GET(SEL_ADDR_UPPER, r.start);
+}
+
+/**
+ * pci_ide_stream_to_regs() - convert IDE settings to association register values
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered IDE settings descriptor
+ * @regs: output register values
+ */
+static void pci_ide_stream_to_regs(struct pci_dev *pdev, struct pci_ide *ide,
+ struct pci_ide_regs *regs)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ int assoc_idx = 0;
+
+ memset(regs, 0, sizeof(*regs));
+
+ if (!settings)
+ return;
+
+ regs->rid1 = FIELD_PREP(PCI_IDE_SEL_RID_1_LIMIT, settings->rid_end);
+
+ regs->rid2 = FIELD_PREP(PCI_IDE_SEL_RID_2_VALID, 1) |
+ FIELD_PREP(PCI_IDE_SEL_RID_2_BASE, settings->rid_start) |
+ FIELD_PREP(PCI_IDE_SEL_RID_2_SEG, pci_ide_domain(pdev));
+
+ if (pdev->nr_ide_mem && pci_bus_region_size(&settings->mem_assoc)) {
+ mem_assoc_to_regs(&settings->mem_assoc, regs, assoc_idx);
+ assoc_idx++;
+ }
+
+ if (pdev->nr_ide_mem > assoc_idx &&
+ pci_bus_region_size(&settings->pref_assoc)) {
+ mem_assoc_to_regs(&settings->pref_assoc, regs, assoc_idx);
+ assoc_idx++;
+ }
+
+ regs->nr_addr = assoc_idx;
+}
+
+/**
+ * pci_ide_stream_setup() - program settings to Selective IDE Stream registers
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered IDE settings descriptor
+ *
+ * When @pdev is a PCI_EXP_TYPE_ENDPOINT then the PCI_IDE_EP partner
+ * settings are written to @pdev's Selective IDE Stream register block,
+ * and when @pdev is a PCI_EXP_TYPE_ROOT_PORT, the PCI_IDE_RP settings
+ * are selected.
+ */
+void pci_ide_stream_setup(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ struct pci_ide_regs regs;
+ int pos;
+
+ if (!settings)
+ return;
+
+ pci_ide_stream_to_regs(pdev, ide, &regs);
+
+ pos = sel_ide_offset(pdev, settings);
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_1, regs.rid1);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_2, regs.rid2);
+
+ for (int i = 0; i < regs.nr_addr; i++) {
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i),
+ regs.addr[i].assoc1);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i),
+ regs.addr[i].assoc2);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i),
+ regs.addr[i].assoc3);
+ }
+
+ /* clear extra unused address association blocks */
+ for (int i = regs.nr_addr; i < pdev->nr_ide_mem; i++) {
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i), 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i), 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i), 0);
+ }
+
+ /*
+ * Setup control register early for devices that expect
+ * stream_id is set during key programming.
+ */
+ set_ide_sel_ctl(pdev, ide, settings, pos, false);
+ settings->setup = 1;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_setup);
+
+/**
+ * pci_ide_stream_teardown() - disable the stream and clear all settings
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered IDE settings descriptor
+ *
+ * For stream destruction, zero all registers that may have been written
+ * by pci_ide_stream_setup(). Consider pci_ide_stream_disable() to leave
+ * settings in place while temporarily disabling the stream.
+ */
+void pci_ide_stream_teardown(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ int pos, i;
+
+ if (!settings)
+ return;
+
+ pos = sel_ide_offset(pdev, settings);
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, 0);
+
+ for (i = 0; i < pdev->nr_ide_mem; i++) {
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i), 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i), 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i), 0);
+ }
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_2, 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_1, 0);
+ settings->setup = 0;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_teardown);
+
+/**
+ * pci_ide_stream_enable() - enable a Selective IDE Stream
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered and setup IDE settings descriptor
+ *
+ * Activate the stream by writing to the Selective IDE Stream Control
+ * Register.
+ *
+ * Return: 0 if the stream successfully entered the "secure" state, and -EINVAL
+ * if @ide is invalid, and -ENXIO if the stream fails to enter the secure state.
+ *
+ * Note that the state may go "insecure" at any point after returning 0, but
+ * those events are equivalent to a "link down" event and handled via
+ * asynchronous error reporting.
+ *
+ * Caller is responsible to clear the enable bit in the -ENXIO case.
+ */
+int pci_ide_stream_enable(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ int pos;
+ u32 val;
+
+ if (!settings)
+ return -EINVAL;
+
+ pos = sel_ide_offset(pdev, settings);
+
+ set_ide_sel_ctl(pdev, ide, settings, pos, true);
+ settings->enable = 1;
+
+ pci_read_config_dword(pdev, pos + PCI_IDE_SEL_STS, &val);
+ if (FIELD_GET(PCI_IDE_SEL_STS_STATE, val) !=
+ PCI_IDE_SEL_STS_STATE_SECURE)
+ return -ENXIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_enable);
+
+/**
+ * pci_ide_stream_disable() - disable a Selective IDE Stream
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered and setup IDE settings descriptor
+ *
+ * Clear the Selective IDE Stream Control Register, but leave all other
+ * registers untouched.
+ */
+void pci_ide_stream_disable(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ int pos;
+
+ if (!settings)
+ return;
+
+ pos = sel_ide_offset(pdev, settings);
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, 0);
+ settings->enable = 0;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_disable);
+
+void pci_ide_init_host_bridge(struct pci_host_bridge *hb)
+{
+ hb->nr_ide_streams = 256;
+ ida_init(&hb->ide_stream_ida);
+ ida_init(&hb->ide_stream_ids_ida);
+ reserve_stream_id(hb, PCI_IDE_RESERVED_STREAM_ID);
+}
+
+static ssize_t available_secure_streams_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_host_bridge *hb = to_pci_host_bridge(dev);
+ int nr = READ_ONCE(hb->nr_ide_streams);
+ int avail = nr;
+
+ if (!nr)
+ return -ENXIO;
+
+ /*
+ * Yes, this is inefficient and racy, but it is only for occasional
+ * platform resource surveys. Worst case is bounded to 256 streams.
+ */
+ for (int i = 0; i < nr; i++)
+ if (ida_exists(&hb->ide_stream_ida, i))
+ avail--;
+ return sysfs_emit(buf, "%d\n", avail);
+}
+static DEVICE_ATTR_RO(available_secure_streams);
+
+static struct attribute *pci_ide_attrs[] = {
+ &dev_attr_available_secure_streams.attr,
+ NULL
+};
+
+static umode_t pci_ide_attr_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_host_bridge *hb = to_pci_host_bridge(dev);
+
+ if (a == &dev_attr_available_secure_streams.attr)
+ if (!hb->nr_ide_streams)
+ return 0;
+
+ return a->mode;
+}
+
+const struct attribute_group pci_ide_attr_group = {
+ .attrs = pci_ide_attrs,
+ .is_visible = pci_ide_attr_visible,
+};
+
+/**
+ * pci_ide_set_nr_streams() - sets size of the pool of IDE Stream resources
+ * @hb: host bridge boundary for the stream pool
+ * @nr: number of streams
+ *
+ * Platform PCI init and/or expert test module use only. Limit IDE
+ * Stream establishment by setting the number of stream resources
+ * available at the host bridge. Platform init code must set this before
+ * the first pci_ide_stream_alloc() call if the platform has less than the
+ * default of 256 streams per host-bridge.
+ *
+ * The "PCI_IDE" symbol namespace is required because this is typically
+ * a detail that is settled in early PCI init. I.e. this export is not
+ * for endpoint drivers.
+ */
+void pci_ide_set_nr_streams(struct pci_host_bridge *hb, u16 nr)
+{
+ hb->nr_ide_streams = min(nr, 256);
+ WARN_ON_ONCE(!ida_is_empty(&hb->ide_stream_ida));
+ sysfs_update_group(&hb->dev.kobj, &pci_ide_attr_group);
+}
+EXPORT_SYMBOL_NS_GPL(pci_ide_set_nr_streams, "PCI_IDE");
+
+void pci_ide_destroy(struct pci_dev *pdev)
+{
+ ida_destroy(&pdev->ide_stream_ida);
+}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 80a7c4fe6b03..c2df915ad2d2 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1856,5 +1856,9 @@ const struct attribute_group *pci_dev_attr_groups[] = {
#ifdef CONFIG_PCI_DOE
&pci_doe_sysfs_group,
#endif
+#ifdef CONFIG_PCI_TSM
+ &pci_tsm_auth_attr_group,
+ &pci_tsm_attr_group,
+#endif
NULL,
};
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index a33bc4e0bf34..0e67014aa001 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -615,6 +615,27 @@ static inline void pci_doe_sysfs_init(struct pci_dev *pdev) { }
static inline void pci_doe_sysfs_teardown(struct pci_dev *pdev) { }
#endif
+#ifdef CONFIG_PCI_IDE
+void pci_ide_init(struct pci_dev *dev);
+void pci_ide_init_host_bridge(struct pci_host_bridge *hb);
+void pci_ide_destroy(struct pci_dev *dev);
+extern const struct attribute_group pci_ide_attr_group;
+#else
+static inline void pci_ide_init(struct pci_dev *dev) { }
+static inline void pci_ide_init_host_bridge(struct pci_host_bridge *hb) { }
+static inline void pci_ide_destroy(struct pci_dev *dev) { }
+#endif
+
+#ifdef CONFIG_PCI_TSM
+void pci_tsm_init(struct pci_dev *pdev);
+void pci_tsm_destroy(struct pci_dev *pdev);
+extern const struct attribute_group pci_tsm_attr_group;
+extern const struct attribute_group pci_tsm_auth_attr_group;
+#else
+static inline void pci_tsm_init(struct pci_dev *pdev) { }
+static inline void pci_tsm_destroy(struct pci_dev *pdev) { }
+#endif
+
/**
* pci_dev_set_io_state - Set the new error state if possible.
*
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 0b5ed4722ac3..e0bcaa896803 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -30,6 +30,7 @@
#include <linux/kfifo.h>
#include <linux/ratelimit.h>
#include <linux/slab.h>
+#include <linux/vmcore_info.h>
#include <acpi/apei.h>
#include <acpi/ghes.h>
#include <ras/ras_event.h>
@@ -765,6 +766,7 @@ static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
break;
case AER_NONFATAL:
aer_info->dev_total_nonfatal_errs++;
+ hwerr_log_error_type(HWERR_RECOV_PCI);
counter = &aer_info->dev_nonfatal_errs[0];
max = AER_MAX_TYPEOF_UNCOR_ERRS;
break;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 124d2d309c58..41183aed8f5d 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -658,6 +658,18 @@ static void pci_release_host_bridge_dev(struct device *dev)
kfree(bridge);
}
+static const struct attribute_group *pci_host_bridge_groups[] = {
+#ifdef CONFIG_PCI_IDE
+ &pci_ide_attr_group,
+#endif
+ NULL
+};
+
+static const struct device_type pci_host_bridge_type = {
+ .groups = pci_host_bridge_groups,
+ .release = pci_release_host_bridge_dev,
+};
+
static void pci_init_host_bridge(struct pci_host_bridge *bridge)
{
INIT_LIST_HEAD(&bridge->windows);
@@ -677,6 +689,8 @@ static void pci_init_host_bridge(struct pci_host_bridge *bridge)
bridge->native_dpc = 1;
bridge->domain_nr = PCI_DOMAIN_NR_NOT_SET;
bridge->native_cxl_error = 1;
+ bridge->dev.type = &pci_host_bridge_type;
+ pci_ide_init_host_bridge(bridge);
device_initialize(&bridge->dev);
}
@@ -690,7 +704,6 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
return NULL;
pci_init_host_bridge(bridge);
- bridge->dev.release = pci_release_host_bridge_dev;
return bridge;
}
@@ -2296,6 +2309,17 @@ int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
return 0;
}
+static void pci_dev3_init(struct pci_dev *pdev)
+{
+ u16 cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DEV3);
+ u32 val = 0;
+
+ if (!cap)
+ return;
+ pci_read_config_dword(pdev, cap + PCI_DEV3_STA, &val);
+ pdev->fm_enabled = !!(val & PCI_DEV3_STA_SEGMENT);
+}
+
/**
* pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
* @dev: PCI device to query
@@ -2680,6 +2704,8 @@ static void pci_init_capabilities(struct pci_dev *dev)
pci_doe_init(dev); /* Data Object Exchange */
pci_tph_init(dev); /* TLP Processing Hints */
pci_rebar_init(dev); /* Resizable BAR */
+ pci_dev3_init(dev); /* Device 3 capabilities */
+ pci_ide_init(dev); /* Link Integrity and Data Encryption */
pcie_report_downtraining(dev);
pci_init_reset_methods(dev);
@@ -2773,6 +2799,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
ret = device_add(&dev->dev);
WARN_ON(ret < 0);
+ /* Establish pdev->tsm for newly added (e.g. new SR-IOV VFs) */
+ pci_tsm_init(dev);
+
pci_npem_create(dev);
pci_doe_sysfs_init(dev);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index ce5c25adef55..417a9ea59117 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -57,6 +57,12 @@ static void pci_destroy_dev(struct pci_dev *dev)
pci_doe_sysfs_teardown(dev);
pci_npem_remove(dev);
+ /*
+ * While device is in D0 drop the device from TSM link operations
+ * including unbind and disconnect (IDE + SPDM teardown).
+ */
+ pci_tsm_destroy(dev);
+
device_del(&dev->dev);
down_write(&pci_bus_sem);
@@ -64,6 +70,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
up_write(&pci_bus_sem);
pci_doe_destroy(dev);
+ pci_ide_destroy(dev);
pcie_aspm_exit_link_state(dev);
pci_bridge_d3_update(dev);
pci_pwrctrl_unregister(&dev->dev);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 53840634fbfc..e6e84dc62e82 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -282,6 +282,45 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
return pdev;
}
+static struct pci_dev *pci_get_dev_by_id_reverse(const struct pci_device_id *id,
+ struct pci_dev *from)
+{
+ struct device *dev;
+ struct device *dev_start = NULL;
+ struct pci_dev *pdev = NULL;
+
+ if (from)
+ dev_start = &from->dev;
+ dev = bus_find_device_reverse(&pci_bus_type, dev_start, (void *)id,
+ match_pci_dev_by_id);
+ if (dev)
+ pdev = to_pci_dev(dev);
+ pci_dev_put(from);
+ return pdev;
+}
+
+enum pci_search_direction {
+ PCI_SEARCH_FORWARD,
+ PCI_SEARCH_REVERSE,
+};
+
+static struct pci_dev *__pci_get_subsys(unsigned int vendor, unsigned int device,
+ unsigned int ss_vendor, unsigned int ss_device,
+ struct pci_dev *from, enum pci_search_direction dir)
+{
+ struct pci_device_id id = {
+ .vendor = vendor,
+ .device = device,
+ .subvendor = ss_vendor,
+ .subdevice = ss_device,
+ };
+
+ if (dir == PCI_SEARCH_FORWARD)
+ return pci_get_dev_by_id(&id, from);
+ else
+ return pci_get_dev_by_id_reverse(&id, from);
+}
+
/**
* pci_get_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id
* @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
@@ -302,14 +341,8 @@ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
unsigned int ss_vendor, unsigned int ss_device,
struct pci_dev *from)
{
- struct pci_device_id id = {
- .vendor = vendor,
- .device = device,
- .subvendor = ss_vendor,
- .subdevice = ss_device,
- };
-
- return pci_get_dev_by_id(&id, from);
+ return __pci_get_subsys(vendor, device, ss_vendor, ss_device, from,
+ PCI_SEARCH_FORWARD);
}
EXPORT_SYMBOL(pci_get_subsys);
@@ -334,6 +367,19 @@ struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
}
EXPORT_SYMBOL(pci_get_device);
+/*
+ * Same semantics as pci_get_device(), except walks the PCI device list
+ * in reverse discovery order.
+ */
+struct pci_dev *pci_get_device_reverse(unsigned int vendor,
+ unsigned int device,
+ struct pci_dev *from)
+{
+ return __pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from,
+ PCI_SEARCH_REVERSE);
+}
+EXPORT_SYMBOL(pci_get_device_reverse);
+
/**
* pci_get_class - begin or continue searching for a PCI device by class
* @class: search for a PCI device with this class designation
diff --git a/drivers/pci/tsm.c b/drivers/pci/tsm.c
new file mode 100644
index 000000000000..5fdcd7f2e820
--- /dev/null
+++ b/drivers/pci/tsm.c
@@ -0,0 +1,900 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interface with platform TEE Security Manager (TSM) objects as defined by
+ * PCIe r7.0 section 11 TEE Device Interface Security Protocol (TDISP)
+ *
+ * Copyright(c) 2024-2025 Intel Corporation. All rights reserved.
+ */
+
+#define dev_fmt(fmt) "PCI/TSM: " fmt
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include <linux/pci-doe.h>
+#include <linux/pci-tsm.h>
+#include <linux/sysfs.h>
+#include <linux/tsm.h>
+#include <linux/xarray.h>
+#include "pci.h"
+
+/*
+ * Provide a read/write lock against the init / exit of pdev tsm
+ * capabilities and arrival/departure of a TSM instance
+ */
+static DECLARE_RWSEM(pci_tsm_rwsem);
+
+/*
+ * Count of TSMs registered that support physical link operations vs device
+ * security state management.
+ */
+static int pci_tsm_link_count;
+static int pci_tsm_devsec_count;
+
+static const struct pci_tsm_ops *to_pci_tsm_ops(struct pci_tsm *tsm)
+{
+ return tsm->tsm_dev->pci_ops;
+}
+
+static inline bool is_dsm(struct pci_dev *pdev)
+{
+ return pdev->tsm && pdev->tsm->dsm_dev == pdev;
+}
+
+static inline bool has_tee(struct pci_dev *pdev)
+{
+ return pdev->devcap & PCI_EXP_DEVCAP_TEE;
+}
+
+/* 'struct pci_tsm_pf0' wraps 'struct pci_tsm' when ->dsm_dev == ->pdev (self) */
+static struct pci_tsm_pf0 *to_pci_tsm_pf0(struct pci_tsm *tsm)
+{
+ /*
+ * All "link" TSM contexts reference the device that hosts the DSM
+ * interface for a set of devices. Walk to the DSM device and cast its
+ * ->tsm context to a 'struct pci_tsm_pf0 *'.
+ */
+ struct pci_dev *pf0 = tsm->dsm_dev;
+
+ if (!is_pci_tsm_pf0(pf0) || !is_dsm(pf0)) {
+ pci_WARN_ONCE(tsm->pdev, 1, "invalid context object\n");
+ return NULL;
+ }
+
+ return container_of(pf0->tsm, struct pci_tsm_pf0, base_tsm);
+}
+
+static void tsm_remove(struct pci_tsm *tsm)
+{
+ struct pci_dev *pdev;
+
+ if (!tsm)
+ return;
+
+ pdev = tsm->pdev;
+ to_pci_tsm_ops(tsm)->remove(tsm);
+ pdev->tsm = NULL;
+}
+DEFINE_FREE(tsm_remove, struct pci_tsm *, if (_T) tsm_remove(_T))
+
+static void pci_tsm_walk_fns(struct pci_dev *pdev,
+ int (*cb)(struct pci_dev *pdev, void *data),
+ void *data)
+{
+ /* Walk subordinate physical functions */
+ for (int i = 0; i < 8; i++) {
+ struct pci_dev *pf __free(pci_dev_put) = pci_get_slot(
+ pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), i));
+
+ if (!pf)
+ continue;
+
+ /* on entry function 0 has already run @cb */
+ if (i > 0)
+ cb(pf, data);
+
+ /* walk virtual functions of each pf */
+ for (int j = 0; j < pci_num_vf(pf); j++) {
+ struct pci_dev *vf __free(pci_dev_put) =
+ pci_get_domain_bus_and_slot(
+ pci_domain_nr(pf->bus),
+ pci_iov_virtfn_bus(pf, j),
+ pci_iov_virtfn_devfn(pf, j));
+
+ if (!vf)
+ continue;
+
+ cb(vf, data);
+ }
+ }
+
+ /*
+ * Walk downstream devices, assumes that an upstream DSM is
+ * limited to downstream physical functions
+ */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_UPSTREAM && is_dsm(pdev))
+ pci_walk_bus(pdev->subordinate, cb, data);
+}
+
+static void pci_tsm_walk_fns_reverse(struct pci_dev *pdev,
+ int (*cb)(struct pci_dev *pdev,
+ void *data),
+ void *data)
+{
+ /* Reverse walk downstream devices */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_UPSTREAM && is_dsm(pdev))
+ pci_walk_bus_reverse(pdev->subordinate, cb, data);
+
+ /* Reverse walk subordinate physical functions */
+ for (int i = 7; i >= 0; i--) {
+ struct pci_dev *pf __free(pci_dev_put) = pci_get_slot(
+ pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), i));
+
+ if (!pf)
+ continue;
+
+ /* reverse walk virtual functions */
+ for (int j = pci_num_vf(pf) - 1; j >= 0; j--) {
+ struct pci_dev *vf __free(pci_dev_put) =
+ pci_get_domain_bus_and_slot(
+ pci_domain_nr(pf->bus),
+ pci_iov_virtfn_bus(pf, j),
+ pci_iov_virtfn_devfn(pf, j));
+
+ if (!vf)
+ continue;
+ cb(vf, data);
+ }
+
+ /* on exit, caller will run @cb on function 0 */
+ if (i > 0)
+ cb(pf, data);
+ }
+}
+
+static void link_sysfs_disable(struct pci_dev *pdev)
+{
+ sysfs_update_group(&pdev->dev.kobj, &pci_tsm_auth_attr_group);
+ sysfs_update_group(&pdev->dev.kobj, &pci_tsm_attr_group);
+}
+
+static void link_sysfs_enable(struct pci_dev *pdev)
+{
+ bool tee = has_tee(pdev);
+
+ pci_dbg(pdev, "%s Security Manager detected (%s%s%s)\n",
+ pdev->tsm ? "Device" : "Platform TEE",
+ pdev->ide_cap ? "IDE" : "", pdev->ide_cap && tee ? " " : "",
+ tee ? "TEE" : "");
+
+ sysfs_update_group(&pdev->dev.kobj, &pci_tsm_auth_attr_group);
+ sysfs_update_group(&pdev->dev.kobj, &pci_tsm_attr_group);
+}
+
+static int probe_fn(struct pci_dev *pdev, void *dsm)
+{
+ struct pci_dev *dsm_dev = dsm;
+ const struct pci_tsm_ops *ops = to_pci_tsm_ops(dsm_dev->tsm);
+
+ pdev->tsm = ops->probe(dsm_dev->tsm->tsm_dev, pdev);
+ pci_dbg(pdev, "setup TSM context: DSM: %s status: %s\n",
+ pci_name(dsm_dev), pdev->tsm ? "success" : "failed");
+ if (pdev->tsm)
+ link_sysfs_enable(pdev);
+ return 0;
+}
+
+static int pci_tsm_connect(struct pci_dev *pdev, struct tsm_dev *tsm_dev)
+{
+ int rc;
+ struct pci_tsm_pf0 *tsm_pf0;
+ const struct pci_tsm_ops *ops = tsm_dev->pci_ops;
+ struct pci_tsm *pci_tsm __free(tsm_remove) = ops->probe(tsm_dev, pdev);
+
+ /* connect() mutually exclusive with subfunction pci_tsm_init() */
+ lockdep_assert_held_write(&pci_tsm_rwsem);
+
+ if (!pci_tsm)
+ return -ENXIO;
+
+ pdev->tsm = pci_tsm;
+ tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+
+ /* mutex_intr assumes connect() is always sysfs/user driven */
+ ACQUIRE(mutex_intr, lock)(&tsm_pf0->lock);
+ if ((rc = ACQUIRE_ERR(mutex_intr, &lock)))
+ return rc;
+
+ rc = ops->connect(pdev);
+ if (rc)
+ return rc;
+
+ pdev->tsm = no_free_ptr(pci_tsm);
+
+ /*
+ * Now that the DSM is established, probe() all the potential
+ * dependent functions. Failure to probe a function is not fatal
+ * to connect(), it just disables subsequent security operations
+ * for that function.
+ *
+ * Note this is done unconditionally, without regard to finding
+ * PCI_EXP_DEVCAP_TEE on the dependent function, for robustness. The DSM
+ * is the ultimate arbiter of security state relative to a given
+ * interface id, and if it says it can manage TDISP state of a function,
+ * let it.
+ */
+ if (has_tee(pdev))
+ pci_tsm_walk_fns(pdev, probe_fn, pdev);
+ return 0;
+}
+
+static ssize_t connect_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tsm_dev *tsm_dev;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
+ return rc;
+
+ if (!pdev->tsm)
+ return sysfs_emit(buf, "\n");
+
+ tsm_dev = pdev->tsm->tsm_dev;
+ return sysfs_emit(buf, "%s\n", dev_name(&tsm_dev->dev));
+}
+
+/* Is @tsm_dev managing physical link / session properties... */
+static bool is_link_tsm(struct tsm_dev *tsm_dev)
+{
+ return tsm_dev && tsm_dev->pci_ops && tsm_dev->pci_ops->link_ops.probe;
+}
+
+/* ...or is @tsm_dev managing device security state ? */
+static bool is_devsec_tsm(struct tsm_dev *tsm_dev)
+{
+ return tsm_dev && tsm_dev->pci_ops && tsm_dev->pci_ops->devsec_ops.lock;
+}
+
+static ssize_t connect_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int rc, id;
+
+ rc = sscanf(buf, "tsm%d\n", &id);
+ if (rc != 1)
+ return -EINVAL;
+
+ ACQUIRE(rwsem_write_kill, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &lock)))
+ return rc;
+
+ if (pdev->tsm)
+ return -EBUSY;
+
+ struct tsm_dev *tsm_dev __free(put_tsm_dev) = find_tsm_dev(id);
+ if (!is_link_tsm(tsm_dev))
+ return -ENXIO;
+
+ rc = pci_tsm_connect(pdev, tsm_dev);
+ if (rc)
+ return rc;
+ return len;
+}
+static DEVICE_ATTR_RW(connect);
+
+static int remove_fn(struct pci_dev *pdev, void *data)
+{
+ tsm_remove(pdev->tsm);
+ link_sysfs_disable(pdev);
+ return 0;
+}
+
+/*
+ * Note, this helper only returns an error code and takes an argument for
+ * compatibility with the pci_walk_bus() callback prototype. pci_tsm_unbind()
+ * always succeeds.
+ */
+static int __pci_tsm_unbind(struct pci_dev *pdev, void *data)
+{
+ struct pci_tdi *tdi;
+ struct pci_tsm_pf0 *tsm_pf0;
+
+ lockdep_assert_held(&pci_tsm_rwsem);
+
+ if (!pdev->tsm)
+ return 0;
+
+ tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+ guard(mutex)(&tsm_pf0->lock);
+
+ tdi = pdev->tsm->tdi;
+ if (!tdi)
+ return 0;
+
+ to_pci_tsm_ops(pdev->tsm)->unbind(tdi);
+ pdev->tsm->tdi = NULL;
+
+ return 0;
+}
+
+void pci_tsm_unbind(struct pci_dev *pdev)
+{
+ guard(rwsem_read)(&pci_tsm_rwsem);
+ __pci_tsm_unbind(pdev, NULL);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_unbind);
+
+/**
+ * pci_tsm_bind() - Bind @pdev as a TDI for @kvm
+ * @pdev: PCI device function to bind
+ * @kvm: Private memory attach context
+ * @tdi_id: Identifier (virtual BDF) for the TDI as referenced by the TSM and DSM
+ *
+ * Returns 0 on success, or a negative error code on failure.
+ *
+ * Context: Caller is responsible for constraining the bind lifetime to the
+ * registered state of the device. For example, pci_tsm_bind() /
+ * pci_tsm_unbind() limited to the VFIO driver bound state of the device.
+ */
+int pci_tsm_bind(struct pci_dev *pdev, struct kvm *kvm, u32 tdi_id)
+{
+ struct pci_tsm_pf0 *tsm_pf0;
+ struct pci_tdi *tdi;
+
+ if (!kvm)
+ return -EINVAL;
+
+ guard(rwsem_read)(&pci_tsm_rwsem);
+
+ if (!pdev->tsm)
+ return -EINVAL;
+
+ if (!is_link_tsm(pdev->tsm->tsm_dev))
+ return -ENXIO;
+
+ tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+ guard(mutex)(&tsm_pf0->lock);
+
+ /* Resolve races to bind a TDI */
+ if (pdev->tsm->tdi) {
+ if (pdev->tsm->tdi->kvm != kvm)
+ return -EBUSY;
+ return 0;
+ }
+
+ tdi = to_pci_tsm_ops(pdev->tsm)->bind(pdev, kvm, tdi_id);
+ if (IS_ERR(tdi))
+ return PTR_ERR(tdi);
+
+ pdev->tsm->tdi = tdi;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_tsm_bind);
+
+/**
+ * pci_tsm_guest_req() - helper to marshal guest requests to the TSM driver
+ * @pdev: @pdev representing a bound tdi
+ * @scope: caller asserts this passthrough request is limited to TDISP operations
+ * @req_in: Input payload forwarded from the guest
+ * @in_len: Length of @req_in
+ * @req_out: Output payload buffer response to the guest
+ * @out_len: Length of @req_out on input, bytes filled in @req_out on output
+ * @tsm_code: Optional TSM arch specific result code for the guest TSM
+ *
+ * This is a common entry point for requests triggered by userspace KVM-exit
+ * service handlers responding to TDI information or state change requests. The
+ * scope parameter limits requests to TDISP state management, or limited debug.
+ * This path is only suitable for commands and results that are the host kernel
+ * has no use, the host is only facilitating guest to TSM communication.
+ *
+ * Returns 0 on success and -error on failure and positive "residue" on success
+ * but @req_out is filled with less then @out_len, or @req_out is NULL and a
+ * residue number of bytes were not consumed from @req_in. On success or
+ * failure @tsm_code may be populated with a TSM implementation specific result
+ * code for the guest to consume.
+ *
+ * Context: Caller is responsible for calling this within the pci_tsm_bind()
+ * state of the TDI.
+ */
+ssize_t pci_tsm_guest_req(struct pci_dev *pdev, enum pci_tsm_req_scope scope,
+ sockptr_t req_in, size_t in_len, sockptr_t req_out,
+ size_t out_len, u64 *tsm_code)
+{
+ struct pci_tsm_pf0 *tsm_pf0;
+ struct pci_tdi *tdi;
+ int rc;
+
+ /* Forbid requests that are not directly related to TDISP operations */
+ if (scope > PCI_TSM_REQ_STATE_CHANGE)
+ return -EINVAL;
+
+ ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
+ return rc;
+
+ if (!pdev->tsm)
+ return -ENXIO;
+
+ if (!is_link_tsm(pdev->tsm->tsm_dev))
+ return -ENXIO;
+
+ tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+ ACQUIRE(mutex_intr, ops_lock)(&tsm_pf0->lock);
+ if ((rc = ACQUIRE_ERR(mutex_intr, &ops_lock)))
+ return rc;
+
+ tdi = pdev->tsm->tdi;
+ if (!tdi)
+ return -ENXIO;
+ return to_pci_tsm_ops(pdev->tsm)->guest_req(tdi, scope, req_in, in_len,
+ req_out, out_len, tsm_code);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_guest_req);
+
+static void pci_tsm_unbind_all(struct pci_dev *pdev)
+{
+ pci_tsm_walk_fns_reverse(pdev, __pci_tsm_unbind, NULL);
+ __pci_tsm_unbind(pdev, NULL);
+}
+
+static void __pci_tsm_disconnect(struct pci_dev *pdev)
+{
+ struct pci_tsm_pf0 *tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+ const struct pci_tsm_ops *ops = to_pci_tsm_ops(pdev->tsm);
+
+ /* disconnect() mutually exclusive with subfunction pci_tsm_init() */
+ lockdep_assert_held_write(&pci_tsm_rwsem);
+
+ pci_tsm_unbind_all(pdev);
+
+ /*
+ * disconnect() is uninterruptible as it may be called for device
+ * teardown
+ */
+ guard(mutex)(&tsm_pf0->lock);
+ pci_tsm_walk_fns_reverse(pdev, remove_fn, NULL);
+ ops->disconnect(pdev);
+}
+
+static void pci_tsm_disconnect(struct pci_dev *pdev)
+{
+ __pci_tsm_disconnect(pdev);
+ tsm_remove(pdev->tsm);
+}
+
+static ssize_t disconnect_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t len)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tsm_dev *tsm_dev;
+ int rc;
+
+ ACQUIRE(rwsem_write_kill, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &lock)))
+ return rc;
+
+ if (!pdev->tsm)
+ return -ENXIO;
+
+ tsm_dev = pdev->tsm->tsm_dev;
+ if (!sysfs_streq(buf, dev_name(&tsm_dev->dev)))
+ return -EINVAL;
+
+ pci_tsm_disconnect(pdev);
+ return len;
+}
+static DEVICE_ATTR_WO(disconnect);
+
+static ssize_t bound_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_tsm_pf0 *tsm_pf0;
+ struct pci_tsm *tsm;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
+ return rc;
+
+ tsm = pdev->tsm;
+ if (!tsm)
+ return sysfs_emit(buf, "\n");
+ tsm_pf0 = to_pci_tsm_pf0(tsm);
+
+ ACQUIRE(mutex_intr, ops_lock)(&tsm_pf0->lock);
+ if ((rc = ACQUIRE_ERR(mutex_intr, &ops_lock)))
+ return rc;
+
+ if (!tsm->tdi)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%s\n", dev_name(&tsm->tsm_dev->dev));
+}
+static DEVICE_ATTR_RO(bound);
+
+static ssize_t dsm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_tsm *tsm;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
+ return rc;
+
+ tsm = pdev->tsm;
+ if (!tsm)
+ return sysfs_emit(buf, "\n");
+
+ return sysfs_emit(buf, "%s\n", pci_name(tsm->dsm_dev));
+}
+static DEVICE_ATTR_RO(dsm);
+
+/* The 'authenticated' attribute is exclusive to the presence of a 'link' TSM */
+static bool pci_tsm_link_group_visible(struct kobject *kobj)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ if (!pci_tsm_link_count)
+ return false;
+
+ if (!pci_is_pcie(pdev))
+ return false;
+
+ if (is_pci_tsm_pf0(pdev))
+ return true;
+
+ /*
+ * Show 'authenticated' and other attributes for the managed
+ * sub-functions of a DSM.
+ */
+ if (pdev->tsm)
+ return true;
+
+ return false;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(pci_tsm_link);
+
+/*
+ * 'link' and 'devsec' TSMs share the same 'tsm/' sysfs group, so the TSM type
+ * specific attributes need individual visibility checks.
+ */
+static umode_t pci_tsm_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (pci_tsm_link_group_visible(kobj)) {
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ if (attr == &dev_attr_bound.attr) {
+ if (is_pci_tsm_pf0(pdev) && has_tee(pdev))
+ return attr->mode;
+ if (pdev->tsm && has_tee(pdev->tsm->dsm_dev))
+ return attr->mode;
+ }
+
+ if (attr == &dev_attr_dsm.attr) {
+ if (is_pci_tsm_pf0(pdev))
+ return attr->mode;
+ if (pdev->tsm && has_tee(pdev->tsm->dsm_dev))
+ return attr->mode;
+ }
+
+ if (attr == &dev_attr_connect.attr ||
+ attr == &dev_attr_disconnect.attr) {
+ if (is_pci_tsm_pf0(pdev))
+ return attr->mode;
+ }
+ }
+
+ return 0;
+}
+
+static bool pci_tsm_group_visible(struct kobject *kobj)
+{
+ return pci_tsm_link_group_visible(kobj);
+}
+DEFINE_SYSFS_GROUP_VISIBLE(pci_tsm);
+
+static struct attribute *pci_tsm_attrs[] = {
+ &dev_attr_connect.attr,
+ &dev_attr_disconnect.attr,
+ &dev_attr_bound.attr,
+ &dev_attr_dsm.attr,
+ NULL
+};
+
+const struct attribute_group pci_tsm_attr_group = {
+ .name = "tsm",
+ .attrs = pci_tsm_attrs,
+ .is_visible = SYSFS_GROUP_VISIBLE(pci_tsm),
+};
+
+static ssize_t authenticated_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ /*
+ * When the SPDM session established via TSM the 'authenticated' state
+ * of the device is identical to the connect state.
+ */
+ return connect_show(dev, attr, buf);
+}
+static DEVICE_ATTR_RO(authenticated);
+
+static struct attribute *pci_tsm_auth_attrs[] = {
+ &dev_attr_authenticated.attr,
+ NULL
+};
+
+const struct attribute_group pci_tsm_auth_attr_group = {
+ .attrs = pci_tsm_auth_attrs,
+ .is_visible = SYSFS_GROUP_VISIBLE(pci_tsm_link),
+};
+
+/*
+ * Retrieve physical function0 device whether it has TEE capability or not
+ */
+static struct pci_dev *pf0_dev_get(struct pci_dev *pdev)
+{
+ struct pci_dev *pf_dev = pci_physfn(pdev);
+
+ if (PCI_FUNC(pf_dev->devfn) == 0)
+ return pci_dev_get(pf_dev);
+
+ return pci_get_slot(pf_dev->bus,
+ pf_dev->devfn - PCI_FUNC(pf_dev->devfn));
+}
+
+/*
+ * Find the PCI Device instance that serves as the Device Security Manager (DSM)
+ * for @pdev. Note that no additional reference is held for the resulting device
+ * because that resulting object always has a registered lifetime
+ * greater-than-or-equal to that of the @pdev argument. This is by virtue of
+ * @pdev being a descendant of, or identical to, the returned DSM device.
+ */
+static struct pci_dev *find_dsm_dev(struct pci_dev *pdev)
+{
+ struct device *grandparent;
+ struct pci_dev *uport;
+
+ if (is_pci_tsm_pf0(pdev))
+ return pdev;
+
+ struct pci_dev *pf0 __free(pci_dev_put) = pf0_dev_get(pdev);
+ if (!pf0)
+ return NULL;
+
+ if (is_dsm(pf0))
+ return pf0;
+
+ /*
+ * For cases where a switch may be hosting TDISP services on behalf of
+ * downstream devices, check the first upstream port relative to this
+ * endpoint.
+ */
+ if (!pdev->dev.parent)
+ return NULL;
+ grandparent = pdev->dev.parent->parent;
+ if (!grandparent)
+ return NULL;
+ if (!dev_is_pci(grandparent))
+ return NULL;
+ uport = to_pci_dev(grandparent);
+ if (!pci_is_pcie(uport) ||
+ pci_pcie_type(uport) != PCI_EXP_TYPE_UPSTREAM)
+ return NULL;
+
+ if (is_dsm(uport))
+ return uport;
+ return NULL;
+}
+
+/**
+ * pci_tsm_tdi_constructor() - base 'struct pci_tdi' initialization for link TSMs
+ * @pdev: PCI device function representing the TDI
+ * @tdi: context to initialize
+ * @kvm: Private memory attach context
+ * @tdi_id: Identifier (virtual BDF) for the TDI as referenced by the TSM and DSM
+ */
+void pci_tsm_tdi_constructor(struct pci_dev *pdev, struct pci_tdi *tdi,
+ struct kvm *kvm, u32 tdi_id)
+{
+ tdi->pdev = pdev;
+ tdi->kvm = kvm;
+ tdi->tdi_id = tdi_id;
+}
+EXPORT_SYMBOL_GPL(pci_tsm_tdi_constructor);
+
+/**
+ * pci_tsm_link_constructor() - base 'struct pci_tsm' initialization for link TSMs
+ * @pdev: The PCI device
+ * @tsm: context to initialize
+ * @tsm_dev: Platform TEE Security Manager, initiator of security operations
+ */
+int pci_tsm_link_constructor(struct pci_dev *pdev, struct pci_tsm *tsm,
+ struct tsm_dev *tsm_dev)
+{
+ if (!is_link_tsm(tsm_dev))
+ return -EINVAL;
+
+ tsm->dsm_dev = find_dsm_dev(pdev);
+ if (!tsm->dsm_dev) {
+ pci_warn(pdev, "failed to find Device Security Manager\n");
+ return -ENXIO;
+ }
+ tsm->pdev = pdev;
+ tsm->tsm_dev = tsm_dev;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_tsm_link_constructor);
+
+/**
+ * pci_tsm_pf0_constructor() - common 'struct pci_tsm_pf0' (DSM) initialization
+ * @pdev: Physical Function 0 PCI device (as indicated by is_pci_tsm_pf0())
+ * @tsm: context to initialize
+ * @tsm_dev: Platform TEE Security Manager, initiator of security operations
+ */
+int pci_tsm_pf0_constructor(struct pci_dev *pdev, struct pci_tsm_pf0 *tsm,
+ struct tsm_dev *tsm_dev)
+{
+ mutex_init(&tsm->lock);
+ tsm->doe_mb = pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_PCI_SIG,
+ PCI_DOE_FEATURE_CMA);
+ if (!tsm->doe_mb) {
+ pci_warn(pdev, "TSM init failure, no CMA mailbox\n");
+ return -ENODEV;
+ }
+
+ return pci_tsm_link_constructor(pdev, &tsm->base_tsm, tsm_dev);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_pf0_constructor);
+
+void pci_tsm_pf0_destructor(struct pci_tsm_pf0 *pf0_tsm)
+{
+ mutex_destroy(&pf0_tsm->lock);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_pf0_destructor);
+
+int pci_tsm_register(struct tsm_dev *tsm_dev)
+{
+ struct pci_dev *pdev = NULL;
+
+ if (!tsm_dev)
+ return -EINVAL;
+
+ /* The TSM device must only implement one of link_ops or devsec_ops */
+ if (!is_link_tsm(tsm_dev) && !is_devsec_tsm(tsm_dev))
+ return -EINVAL;
+
+ if (is_link_tsm(tsm_dev) && is_devsec_tsm(tsm_dev))
+ return -EINVAL;
+
+ guard(rwsem_write)(&pci_tsm_rwsem);
+
+ /* On first enable, update sysfs groups */
+ if (is_link_tsm(tsm_dev) && pci_tsm_link_count++ == 0) {
+ for_each_pci_dev(pdev)
+ if (is_pci_tsm_pf0(pdev))
+ link_sysfs_enable(pdev);
+ } else if (is_devsec_tsm(tsm_dev)) {
+ pci_tsm_devsec_count++;
+ }
+
+ return 0;
+}
+
+static void pci_tsm_fn_exit(struct pci_dev *pdev)
+{
+ __pci_tsm_unbind(pdev, NULL);
+ tsm_remove(pdev->tsm);
+}
+
+/**
+ * __pci_tsm_destroy() - destroy the TSM context for @pdev
+ * @pdev: device to cleanup
+ * @tsm_dev: the TSM device being removed, or NULL if @pdev is being removed.
+ *
+ * At device removal or TSM unregistration all established context
+ * with the TSM is torn down. Additionally, if there are no more TSMs
+ * registered, the PCI tsm/ sysfs attributes are hidden.
+ */
+static void __pci_tsm_destroy(struct pci_dev *pdev, struct tsm_dev *tsm_dev)
+{
+ struct pci_tsm *tsm = pdev->tsm;
+
+ lockdep_assert_held_write(&pci_tsm_rwsem);
+
+ /*
+ * First, handle the TSM removal case to shutdown @pdev sysfs, this is
+ * skipped if the device itself is being removed since sysfs goes away
+ * naturally at that point
+ */
+ if (is_link_tsm(tsm_dev) && is_pci_tsm_pf0(pdev) && !pci_tsm_link_count)
+ link_sysfs_disable(pdev);
+
+ /* Nothing else to do if this device never attached to the departing TSM */
+ if (!tsm)
+ return;
+
+ /* Now lookup the tsm_dev to destroy TSM context */
+ if (!tsm_dev)
+ tsm_dev = tsm->tsm_dev;
+ else if (tsm_dev != tsm->tsm_dev)
+ return;
+
+ if (is_link_tsm(tsm_dev) && is_pci_tsm_pf0(pdev))
+ pci_tsm_disconnect(pdev);
+ else
+ pci_tsm_fn_exit(pdev);
+}
+
+void pci_tsm_destroy(struct pci_dev *pdev)
+{
+ guard(rwsem_write)(&pci_tsm_rwsem);
+ __pci_tsm_destroy(pdev, NULL);
+}
+
+void pci_tsm_init(struct pci_dev *pdev)
+{
+ guard(rwsem_read)(&pci_tsm_rwsem);
+
+ /*
+ * Subfunctions are either probed synchronous with connect() or later
+ * when either the SR-IOV configuration is changed, or, unlikely,
+ * connect() raced initial bus scanning.
+ */
+ if (pdev->tsm)
+ return;
+
+ if (pci_tsm_link_count) {
+ struct pci_dev *dsm = find_dsm_dev(pdev);
+
+ if (!dsm)
+ return;
+
+ /*
+ * The only path to init a Device Security Manager capable
+ * device is via connect().
+ */
+ if (!dsm->tsm)
+ return;
+
+ probe_fn(pdev, dsm);
+ }
+}
+
+void pci_tsm_unregister(struct tsm_dev *tsm_dev)
+{
+ struct pci_dev *pdev = NULL;
+
+ guard(rwsem_write)(&pci_tsm_rwsem);
+ if (is_link_tsm(tsm_dev))
+ pci_tsm_link_count--;
+ if (is_devsec_tsm(tsm_dev))
+ pci_tsm_devsec_count--;
+ for_each_pci_dev_reverse(pdev)
+ __pci_tsm_destroy(pdev, tsm_dev);
+}
+
+int pci_tsm_doe_transfer(struct pci_dev *pdev, u8 type, const void *req,
+ size_t req_sz, void *resp, size_t resp_sz)
+{
+ struct pci_tsm_pf0 *tsm;
+
+ if (!pdev->tsm || !is_pci_tsm_pf0(pdev))
+ return -ENXIO;
+
+ tsm = to_pci_tsm_pf0(pdev->tsm);
+ if (!tsm->doe_mb)
+ return -ENXIO;
+
+ return pci_doe(tsm->doe_mb, PCI_VENDOR_ID_PCI_SIG, type, req, req_sz,
+ resp, resp_sz);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_doe_transfer);
diff --git a/drivers/pinctrl/nuvoton/pinctrl-ma35.c b/drivers/pinctrl/nuvoton/pinctrl-ma35.c
index cdad01d68a37..8d71dc53cc1d 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-ma35.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-ma35.c
@@ -81,10 +81,6 @@
#define MVOLT_1800 0
#define MVOLT_3300 1
-/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
static const char * const gpio_group_name[] = {
"gpioa", "gpiob", "gpioc", "gpiod", "gpioe", "gpiof", "gpiog",
"gpioh", "gpioi", "gpioj", "gpiok", "gpiol", "gpiom", "gpion",
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
index 585fe1661e17..aba129ead04c 100644
--- a/drivers/pinctrl/pinctrl-zynqmp.c
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -100,7 +100,6 @@ struct zynqmp_pctrl_group {
static struct pinctrl_desc zynqmp_desc;
static u32 family_code;
-static u32 sub_family_code;
static int zynqmp_pctrl_get_groups_count(struct pinctrl_dev *pctldev)
{
@@ -605,7 +604,7 @@ static int zynqmp_pinctrl_prepare_func_groups(struct device *dev, u32 fid,
return -ENOMEM;
for (pin = 0; pin < groups[resp[i]].npins; pin++) {
- if (family_code == ZYNQMP_FAMILY_CODE)
+ if (family_code == PM_ZYNQMP_FAMILY_CODE)
__set_bit(groups[resp[i]].pins[pin], used_pins);
else
__set_bit((u8)groups[resp[i]].pins[pin] - 1, used_pins);
@@ -958,11 +957,11 @@ static int zynqmp_pinctrl_probe(struct platform_device *pdev)
if (!pctrl)
return -ENOMEM;
- ret = zynqmp_pm_get_family_info(&family_code, &sub_family_code);
+ ret = zynqmp_pm_get_family_info(&family_code);
if (ret < 0)
return ret;
- if (family_code == ZYNQMP_FAMILY_CODE) {
+ if (family_code == PM_ZYNQMP_FAMILY_CODE) {
ret = zynqmp_pinctrl_prepare_pin_desc(&pdev->dev, &zynqmp_desc.pins,
&zynqmp_desc.npins);
} else {
diff --git a/drivers/power/reset/sc27xx-poweroff.c b/drivers/power/reset/sc27xx-poweroff.c
index 90287c31992c..393bd1c33b73 100644
--- a/drivers/power/reset/sc27xx-poweroff.c
+++ b/drivers/power/reset/sc27xx-poweroff.c
@@ -28,7 +28,7 @@ static struct regmap *regmap;
* taking cpus down to avoid racing regmap or spi mutex lock when poweroff
* system through PMIC.
*/
-static void sc27xx_poweroff_shutdown(void)
+static void sc27xx_poweroff_shutdown(void *data)
{
#ifdef CONFIG_HOTPLUG_CPU
int cpu;
@@ -40,10 +40,14 @@ static void sc27xx_poweroff_shutdown(void)
#endif
}
-static struct syscore_ops poweroff_syscore_ops = {
+static const struct syscore_ops poweroff_syscore_ops = {
.shutdown = sc27xx_poweroff_shutdown,
};
+static struct syscore poweroff_syscore = {
+ .ops = &poweroff_syscore_ops,
+};
+
static void sc27xx_poweroff_do_poweroff(void)
{
/* Disable the external subsys connection's power firstly */
@@ -62,7 +66,7 @@ static int sc27xx_poweroff_probe(struct platform_device *pdev)
return -ENODEV;
pm_power_off = sc27xx_poweroff_do_poweroff;
- register_syscore_ops(&poweroff_syscore_ops);
+ register_syscore(&poweroff_syscore);
return 0;
}
diff --git a/drivers/pwm/pwm_th1520.rs b/drivers/pwm/pwm_th1520.rs
index 955c359b07fb..e3b7e77356fc 100644
--- a/drivers/pwm/pwm_th1520.rs
+++ b/drivers/pwm/pwm_th1520.rs
@@ -337,7 +337,7 @@ impl platform::Driver for Th1520PwmPlatformDriver {
fn probe(
pdev: &platform::Device<Core>,
_id_info: Option<&Self::IdInfo>,
- ) -> Result<Pin<KBox<Self>>> {
+ ) -> impl PinInit<Self, Error> {
let dev = pdev.as_ref();
let request = pdev.io_request_by_index(0).ok_or(ENODEV)?;
@@ -374,7 +374,7 @@ impl platform::Driver for Th1520PwmPlatformDriver {
pwm::Registration::register(dev, chip)?;
- Ok(KBox::new(Th1520PwmPlatformDriver, GFP_KERNEL)?.into())
+ Ok(Th1520PwmPlatformDriver)
}
}
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index 6e78a01755c7..5130a35214c9 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -261,56 +261,6 @@ static int imx8ulp_dsp_reset(struct imx_dsp_rproc *priv)
return 0;
}
-/* Specific configuration for i.MX8MP */
-static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8mp = {
- .att = imx_dsp_rproc_att_imx8mp,
- .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8mp),
- .method = IMX_RPROC_RESET_CONTROLLER,
-};
-
-static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8mp = {
- .dcfg = &dsp_rproc_cfg_imx8mp,
- .reset = imx8mp_dsp_reset,
-};
-
-/* Specific configuration for i.MX8ULP */
-static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8ulp = {
- .src_reg = IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
- .src_mask = IMX8ULP_SYSCTRL0_DSP_STALL,
- .src_start = 0,
- .src_stop = IMX8ULP_SYSCTRL0_DSP_STALL,
- .att = imx_dsp_rproc_att_imx8ulp,
- .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8ulp),
- .method = IMX_RPROC_MMIO,
-};
-
-static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8ulp = {
- .dcfg = &dsp_rproc_cfg_imx8ulp,
- .reset = imx8ulp_dsp_reset,
-};
-
-/* Specific configuration for i.MX8QXP */
-static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8qxp = {
- .att = imx_dsp_rproc_att_imx8qxp,
- .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8qxp),
- .method = IMX_RPROC_SCU_API,
-};
-
-static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8qxp = {
- .dcfg = &dsp_rproc_cfg_imx8qxp,
-};
-
-/* Specific configuration for i.MX8QM */
-static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8qm = {
- .att = imx_dsp_rproc_att_imx8qm,
- .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8qm),
- .method = IMX_RPROC_SCU_API,
-};
-
-static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8qm = {
- .dcfg = &dsp_rproc_cfg_imx8qm,
-};
-
static int imx_dsp_rproc_ready(struct rproc *rproc)
{
struct imx_dsp_rproc *priv = rproc->priv;
@@ -388,6 +338,28 @@ static int imx_dsp_rproc_handle_rsc(struct rproc *rproc, u32 rsc_type,
return RSC_HANDLED;
}
+static int imx_dsp_rproc_mmio_start(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ const struct imx_rproc_dcfg *dcfg = priv->dsp_dcfg->dcfg;
+
+ return regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask, dcfg->src_start);
+}
+
+static int imx_dsp_rproc_reset_ctrl_start(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+
+ return reset_control_deassert(priv->run_stall);
+}
+
+static int imx_dsp_rproc_scu_api_start(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+
+ return imx_sc_pm_cpu_start(priv->ipc_handle, IMX_SC_R_DSP, true, rproc->bootaddr);
+}
+
/*
* Start function for rproc_ops
*
@@ -404,32 +376,41 @@ static int imx_dsp_rproc_start(struct rproc *rproc)
struct device *dev = rproc->dev.parent;
int ret;
- switch (dcfg->method) {
- case IMX_RPROC_MMIO:
- ret = regmap_update_bits(priv->regmap,
- dcfg->src_reg,
- dcfg->src_mask,
- dcfg->src_start);
- break;
- case IMX_RPROC_SCU_API:
- ret = imx_sc_pm_cpu_start(priv->ipc_handle,
- IMX_SC_R_DSP,
- true,
- rproc->bootaddr);
- break;
- case IMX_RPROC_RESET_CONTROLLER:
- ret = reset_control_deassert(priv->run_stall);
- break;
- default:
+ if (!dcfg->ops || !dcfg->ops->start)
return -EOPNOTSUPP;
- }
- if (ret)
+ ret = dcfg->ops->start(rproc);
+ if (ret) {
dev_err(dev, "Failed to enable remote core!\n");
- else if (priv->flags & WAIT_FW_READY)
+ return ret;
+ }
+
+ if (priv->flags & WAIT_FW_READY)
return imx_dsp_rproc_ready(rproc);
- return ret;
+ return 0;
+}
+
+static int imx_dsp_rproc_mmio_stop(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ const struct imx_rproc_dcfg *dcfg = priv->dsp_dcfg->dcfg;
+
+ return regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask, dcfg->src_stop);
+}
+
+static int imx_dsp_rproc_reset_ctrl_stop(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+
+ return reset_control_assert(priv->run_stall);
+}
+
+static int imx_dsp_rproc_scu_api_stop(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+
+ return imx_sc_pm_cpu_start(priv->ipc_handle, IMX_SC_R_DSP, false, rproc->bootaddr);
}
/*
@@ -449,30 +430,18 @@ static int imx_dsp_rproc_stop(struct rproc *rproc)
return 0;
}
- switch (dcfg->method) {
- case IMX_RPROC_MMIO:
- ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask,
- dcfg->src_stop);
- break;
- case IMX_RPROC_SCU_API:
- ret = imx_sc_pm_cpu_start(priv->ipc_handle,
- IMX_SC_R_DSP,
- false,
- rproc->bootaddr);
- break;
- case IMX_RPROC_RESET_CONTROLLER:
- ret = reset_control_assert(priv->run_stall);
- break;
- default:
+ if (!dcfg->ops || !dcfg->ops->stop)
return -EOPNOTSUPP;
- }
- if (ret)
+ ret = dcfg->ops->stop(rproc);
+ if (ret) {
dev_err(dev, "Failed to stop remote core\n");
- else
- priv->flags &= ~REMOTE_IS_READY;
+ return ret;
+ }
- return ret;
+ priv->flags &= ~REMOTE_IS_READY;
+
+ return 0;
}
/**
@@ -689,11 +658,9 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
struct rproc *rproc = priv->rproc;
struct device *dev = rproc->dev.parent;
struct device_node *np = dev->of_node;
- struct of_phandle_iterator it;
struct rproc_mem_entry *mem;
- struct reserved_mem *rmem;
void __iomem *cpu_addr;
- int a;
+ int a, i = 0;
u64 da;
/* Remap required addresses */
@@ -724,49 +691,40 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
rproc_add_carveout(rproc, mem);
}
- of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
- while (of_phandle_iterator_next(&it) == 0) {
+ while (1) {
+ int err;
+ struct resource res;
+
+ err = of_reserved_mem_region_to_resource(np, i++, &res);
+ if (err)
+ return 0;
+
/*
* Ignore the first memory region which will be used vdev buffer.
* No need to do extra handlings, rproc_add_virtio_dev will handle it.
*/
- if (!strcmp(it.node->name, "vdev0buffer"))
+ if (strstarts(res.name, "vdev0buffer"))
continue;
- rmem = of_reserved_mem_lookup(it.node);
- if (!rmem) {
- of_node_put(it.node);
- dev_err(dev, "unable to acquire memory-region\n");
+ if (imx_dsp_rproc_sys_to_da(priv, res.start, resource_size(&res), &da))
return -EINVAL;
- }
- if (imx_dsp_rproc_sys_to_da(priv, rmem->base, rmem->size, &da)) {
- of_node_put(it.node);
- return -EINVAL;
- }
-
- cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
- if (!cpu_addr) {
- of_node_put(it.node);
- dev_err(dev, "failed to map memory %p\n", &rmem->base);
- return -ENOMEM;
+ cpu_addr = devm_ioremap_resource_wc(dev, &res);
+ if (IS_ERR(cpu_addr)) {
+ dev_err(dev, "failed to map memory %pR\n", &res);
+ return PTR_ERR(cpu_addr);
}
/* Register memory region */
- mem = rproc_mem_entry_init(dev, (void __force *)cpu_addr, (dma_addr_t)rmem->base,
- rmem->size, da, NULL, NULL, it.node->name);
-
- if (mem) {
- rproc_coredump_add_segment(rproc, da, rmem->size);
- } else {
- of_node_put(it.node);
+ mem = rproc_mem_entry_init(dev, (void __force *)cpu_addr, (dma_addr_t)res.start,
+ resource_size(&res), da, NULL, NULL,
+ "%.*s", strchrnul(res.name, '@') - res.name, res.name);
+ if (!mem)
return -ENOMEM;
- }
+ rproc_coredump_add_segment(rproc, da, resource_size(&res));
rproc_add_carveout(rproc, mem);
}
-
- return 0;
}
/* Prepare function for rproc_ops */
@@ -784,7 +742,7 @@ static int imx_dsp_rproc_prepare(struct rproc *rproc)
pm_runtime_get_sync(dev);
- return 0;
+ return 0;
}
/* Unprepare function for rproc_ops */
@@ -792,7 +750,7 @@ static int imx_dsp_rproc_unprepare(struct rproc *rproc)
{
pm_runtime_put_sync(rproc->dev.parent);
- return 0;
+ return 0;
}
/* Kick function for rproc_ops */
@@ -1062,14 +1020,50 @@ static const struct rproc_ops imx_dsp_rproc_ops = {
static int imx_dsp_attach_pm_domains(struct imx_dsp_rproc *priv)
{
struct device *dev = priv->rproc->dev.parent;
- int ret;
/* A single PM domain is already attached. */
if (dev->pm_domain)
return 0;
- ret = dev_pm_domain_attach_list(dev, NULL, &priv->pd_list);
- return ret < 0 ? ret : 0;
+ return devm_pm_domain_attach_list(dev, NULL, &priv->pd_list);
+}
+
+static int imx_dsp_rproc_mmio_detect_mode(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ struct regmap *regmap;
+
+ regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,dsp-ctrl");
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to find syscon\n");
+ return PTR_ERR(regmap);
+ }
+
+ priv->regmap = regmap;
+
+ return 0;
+}
+
+static int imx_dsp_rproc_reset_ctrl_detect_mode(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+
+ priv->run_stall = devm_reset_control_get_exclusive(dev, "runstall");
+ if (IS_ERR(priv->run_stall)) {
+ dev_err(dev, "Failed to get DSP runstall reset control\n");
+ return PTR_ERR(priv->run_stall);
+ }
+
+ return 0;
+}
+
+static int imx_dsp_rproc_scu_api_detect_mode(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+
+ return imx_scu_get_handle(&priv->ipc_handle);
}
/**
@@ -1087,38 +1081,12 @@ static int imx_dsp_attach_pm_domains(struct imx_dsp_rproc *priv)
static int imx_dsp_rproc_detect_mode(struct imx_dsp_rproc *priv)
{
const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
- struct device *dev = priv->rproc->dev.parent;
- struct regmap *regmap;
- int ret = 0;
-
- switch (dsp_dcfg->dcfg->method) {
- case IMX_RPROC_SCU_API:
- ret = imx_scu_get_handle(&priv->ipc_handle);
- if (ret)
- return ret;
- break;
- case IMX_RPROC_MMIO:
- regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,dsp-ctrl");
- if (IS_ERR(regmap)) {
- dev_err(dev, "failed to find syscon\n");
- return PTR_ERR(regmap);
- }
+ const struct imx_rproc_dcfg *dcfg = dsp_dcfg->dcfg;
- priv->regmap = regmap;
- break;
- case IMX_RPROC_RESET_CONTROLLER:
- priv->run_stall = devm_reset_control_get_exclusive(dev, "runstall");
- if (IS_ERR(priv->run_stall)) {
- dev_err(dev, "Failed to get DSP runstall reset control\n");
- return PTR_ERR(priv->run_stall);
- }
- break;
- default:
- ret = -EOPNOTSUPP;
- break;
- }
+ if (dcfg->ops && dcfg->ops->detect_mode)
+ return dcfg->ops->detect_mode(priv->rproc);
- return ret;
+ return -EOPNOTSUPP;
}
static const char *imx_dsp_clks_names[DSP_RPROC_CLK_MAX] = {
@@ -1152,11 +1120,8 @@ static int imx_dsp_rproc_probe(struct platform_device *pdev)
return -ENODEV;
ret = rproc_of_parse_firmware(dev, 0, &fw_name);
- if (ret) {
- dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to parse firmware-name property\n");
rproc = devm_rproc_alloc(dev, "imx-dsp-rproc", &imx_dsp_rproc_ops,
fw_name, sizeof(*priv));
@@ -1179,52 +1144,28 @@ static int imx_dsp_rproc_probe(struct platform_device *pdev)
INIT_WORK(&priv->rproc_work, imx_dsp_rproc_vq_work);
ret = imx_dsp_rproc_detect_mode(priv);
- if (ret) {
- dev_err(dev, "failed on imx_dsp_rproc_detect_mode\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed on imx_dsp_rproc_detect_mode\n");
/* There are multiple power domains required by DSP on some platform */
ret = imx_dsp_attach_pm_domains(priv);
- if (ret) {
- dev_err(dev, "failed on imx_dsp_attach_pm_domains\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed on imx_dsp_attach_pm_domains\n");
+
/* Get clocks */
ret = imx_dsp_rproc_clk_get(priv);
- if (ret) {
- dev_err(dev, "failed on imx_dsp_rproc_clk_get\n");
- goto err_detach_domains;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed on imx_dsp_rproc_clk_get\n");
init_completion(&priv->pm_comp);
rproc->auto_boot = false;
- ret = rproc_add(rproc);
- if (ret) {
- dev_err(dev, "rproc_add failed\n");
- goto err_detach_domains;
- }
+ ret = devm_rproc_add(dev, rproc);
+ if (ret)
+ return dev_err_probe(dev, ret, "rproc_add failed\n");
rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_XTENSA);
- pm_runtime_enable(dev);
-
- return 0;
-
-err_detach_domains:
- dev_pm_domain_detach_list(priv->pd_list);
-
- return ret;
-}
-
-static void imx_dsp_rproc_remove(struct platform_device *pdev)
-{
- struct rproc *rproc = platform_get_drvdata(pdev);
- struct imx_dsp_rproc *priv = rproc->priv;
-
- pm_runtime_disable(&pdev->dev);
- rproc_del(rproc);
- dev_pm_domain_detach_list(priv->pd_list);
+ return devm_pm_runtime_enable(dev);
}
/* pm runtime functions */
@@ -1364,6 +1305,74 @@ static const struct dev_pm_ops imx_dsp_rproc_pm_ops = {
RUNTIME_PM_OPS(imx_dsp_runtime_suspend, imx_dsp_runtime_resume, NULL)
};
+static const struct imx_rproc_plat_ops imx_dsp_rproc_ops_mmio = {
+ .start = imx_dsp_rproc_mmio_start,
+ .stop = imx_dsp_rproc_mmio_stop,
+ .detect_mode = imx_dsp_rproc_mmio_detect_mode,
+};
+
+static const struct imx_rproc_plat_ops imx_dsp_rproc_ops_reset_ctrl = {
+ .start = imx_dsp_rproc_reset_ctrl_start,
+ .stop = imx_dsp_rproc_reset_ctrl_stop,
+ .detect_mode = imx_dsp_rproc_reset_ctrl_detect_mode,
+};
+
+static const struct imx_rproc_plat_ops imx_dsp_rproc_ops_scu_api = {
+ .start = imx_dsp_rproc_scu_api_start,
+ .stop = imx_dsp_rproc_scu_api_stop,
+ .detect_mode = imx_dsp_rproc_scu_api_detect_mode,
+};
+
+/* Specific configuration for i.MX8MP */
+static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8mp = {
+ .att = imx_dsp_rproc_att_imx8mp,
+ .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8mp),
+ .ops = &imx_dsp_rproc_ops_reset_ctrl,
+};
+
+static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8mp = {
+ .dcfg = &dsp_rproc_cfg_imx8mp,
+ .reset = imx8mp_dsp_reset,
+};
+
+/* Specific configuration for i.MX8ULP */
+static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8ulp = {
+ .src_reg = IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
+ .src_mask = IMX8ULP_SYSCTRL0_DSP_STALL,
+ .src_start = 0,
+ .src_stop = IMX8ULP_SYSCTRL0_DSP_STALL,
+ .att = imx_dsp_rproc_att_imx8ulp,
+ .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8ulp),
+ .ops = &imx_dsp_rproc_ops_mmio,
+};
+
+static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8ulp = {
+ .dcfg = &dsp_rproc_cfg_imx8ulp,
+ .reset = imx8ulp_dsp_reset,
+};
+
+/* Specific configuration for i.MX8QXP */
+static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8qxp = {
+ .att = imx_dsp_rproc_att_imx8qxp,
+ .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8qxp),
+ .ops = &imx_dsp_rproc_ops_scu_api,
+};
+
+static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8qxp = {
+ .dcfg = &dsp_rproc_cfg_imx8qxp,
+};
+
+/* Specific configuration for i.MX8QM */
+static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8qm = {
+ .att = imx_dsp_rproc_att_imx8qm,
+ .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8qm),
+ .ops = &imx_dsp_rproc_ops_scu_api,
+};
+
+static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8qm = {
+ .dcfg = &dsp_rproc_cfg_imx8qm,
+};
+
static const struct of_device_id imx_dsp_rproc_of_match[] = {
{ .compatible = "fsl,imx8qxp-hifi4", .data = &imx_dsp_rproc_cfg_imx8qxp },
{ .compatible = "fsl,imx8qm-hifi4", .data = &imx_dsp_rproc_cfg_imx8qm },
@@ -1375,7 +1384,6 @@ MODULE_DEVICE_TABLE(of, imx_dsp_rproc_of_match);
static struct platform_driver imx_dsp_rproc_driver = {
.probe = imx_dsp_rproc_probe,
- .remove = imx_dsp_rproc_remove,
.driver = {
.name = "imx-dsp-rproc",
.of_match_table = imx_dsp_rproc_of_match,
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index bb25221a4a89..3be8790c14a2 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -93,7 +93,7 @@ struct imx_rproc_mem {
#define ATT_CORE(I) BIT((I))
static int imx_rproc_xtr_mbox_init(struct rproc *rproc, bool tx_block);
-static void imx_rproc_free_mbox(struct rproc *rproc);
+static void imx_rproc_free_mbox(void *data);
struct imx_rproc {
struct device *dev;
@@ -490,50 +490,44 @@ static int imx_rproc_prepare(struct rproc *rproc)
{
struct imx_rproc *priv = rproc->priv;
struct device_node *np = priv->dev->of_node;
- struct of_phandle_iterator it;
struct rproc_mem_entry *mem;
- struct reserved_mem *rmem;
+ int i = 0;
u32 da;
/* Register associated reserved memory regions */
- of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
- while (of_phandle_iterator_next(&it) == 0) {
+ while (1) {
+ int err;
+ struct resource res;
+
+ err = of_reserved_mem_region_to_resource(np, i++, &res);
+ if (err)
+ return 0;
+
/*
* Ignore the first memory region which will be used vdev buffer.
* No need to do extra handlings, rproc_add_virtio_dev will handle it.
*/
- if (!strcmp(it.node->name, "vdev0buffer"))
+ if (strstarts(res.name, "vdev0buffer"))
continue;
- if (!strcmp(it.node->name, "rsc-table"))
+ if (strstarts(res.name, "rsc-table"))
continue;
- rmem = of_reserved_mem_lookup(it.node);
- if (!rmem) {
- of_node_put(it.node);
- dev_err(priv->dev, "unable to acquire memory-region\n");
- return -EINVAL;
- }
-
/* No need to translate pa to da, i.MX use same map */
- da = rmem->base;
+ da = res.start;
/* Register memory region */
- mem = rproc_mem_entry_init(priv->dev, NULL, (dma_addr_t)rmem->base, rmem->size, da,
+ mem = rproc_mem_entry_init(priv->dev, NULL, (dma_addr_t)res.start,
+ resource_size(&res), da,
imx_rproc_mem_alloc, imx_rproc_mem_release,
- it.node->name);
-
- if (mem) {
- rproc_coredump_add_segment(rproc, da, rmem->size);
- } else {
- of_node_put(it.node);
+ "%.*s", strchrnul(res.name, '@') - res.name,
+ res.name);
+ if (!mem)
return -ENOMEM;
- }
+ rproc_coredump_add_segment(rproc, da, resource_size(&res));
rproc_add_carveout(rproc, mem);
}
-
- return 0;
}
static int imx_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
@@ -575,13 +569,9 @@ static int imx_rproc_attach(struct rproc *rproc)
return imx_rproc_xtr_mbox_init(rproc, true);
}
-static int imx_rproc_detach(struct rproc *rproc)
+static int imx_rproc_scu_api_detach(struct rproc *rproc)
{
struct imx_rproc *priv = rproc->priv;
- const struct imx_rproc_dcfg *dcfg = priv->dcfg;
-
- if (dcfg->method != IMX_RPROC_SCU_API)
- return -EOPNOTSUPP;
if (imx_sc_rm_is_resource_owned(priv->ipc_handle, priv->rsrc_id))
return -EOPNOTSUPP;
@@ -591,6 +581,17 @@ static int imx_rproc_detach(struct rproc *rproc)
return 0;
}
+static int imx_rproc_detach(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+ const struct imx_rproc_dcfg *dcfg = priv->dcfg;
+
+ if (!dcfg->ops || !dcfg->ops->detach)
+ return -EOPNOTSUPP;
+
+ return dcfg->ops->detach(rproc);
+}
+
static struct resource_table *imx_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
{
struct imx_rproc *priv = rproc->priv;
@@ -664,47 +665,37 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
}
/* memory-region is optional property */
- nph = of_count_phandle_with_args(np, "memory-region", NULL);
+ nph = of_reserved_mem_region_count(np);
if (nph <= 0)
return 0;
/* remap optional addresses */
for (a = 0; a < nph; a++) {
- struct device_node *node;
struct resource res;
- node = of_parse_phandle(np, "memory-region", a);
- if (!node)
- continue;
- /* Not map vdevbuffer, vdevring region */
- if (!strncmp(node->name, "vdev", strlen("vdev"))) {
- of_node_put(node);
- continue;
- }
- err = of_address_to_resource(node, 0, &res);
+ err = of_reserved_mem_region_to_resource(np, a, &res);
if (err) {
dev_err(dev, "unable to resolve memory region\n");
- of_node_put(node);
return err;
}
- if (b >= IMX_RPROC_MEM_MAX) {
- of_node_put(node);
+ /* Not map vdevbuffer, vdevring region */
+ if (strstarts(res.name, "vdev"))
+ continue;
+
+ if (b >= IMX_RPROC_MEM_MAX)
break;
- }
/* Not use resource version, because we might share region */
- priv->mem[b].cpu_addr = devm_ioremap_wc(&pdev->dev, res.start, resource_size(&res));
+ priv->mem[b].cpu_addr = devm_ioremap_resource_wc(&pdev->dev, &res);
if (!priv->mem[b].cpu_addr) {
dev_err(dev, "failed to remap %pr\n", &res);
- of_node_put(node);
return -ENOMEM;
}
priv->mem[b].sys_addr = res.start;
priv->mem[b].size = resource_size(&res);
- if (!strcmp(node->name, "rsc-table"))
+ if (!strcmp(res.name, "rsc-table"))
priv->rsc_table = priv->mem[b].cpu_addr;
- of_node_put(node);
b++;
}
@@ -780,8 +771,9 @@ static int imx_rproc_xtr_mbox_init(struct rproc *rproc, bool tx_block)
return 0;
}
-static void imx_rproc_free_mbox(struct rproc *rproc)
+static void imx_rproc_free_mbox(void *data)
{
+ struct rproc *rproc = data;
struct imx_rproc *priv = rproc->priv;
if (priv->tx_ch) {
@@ -795,13 +787,9 @@ static void imx_rproc_free_mbox(struct rproc *rproc)
}
}
-static void imx_rproc_put_scu(struct rproc *rproc)
+static void imx_rproc_put_scu(void *data)
{
- struct imx_rproc *priv = rproc->priv;
- const struct imx_rproc_dcfg *dcfg = priv->dcfg;
-
- if (dcfg->method != IMX_RPROC_SCU_API)
- return;
+ struct imx_rproc *priv = data;
if (imx_sc_rm_is_resource_owned(priv->ipc_handle, priv->rsrc_id)) {
dev_pm_domain_detach_list(priv->pd_list);
@@ -943,6 +931,10 @@ static int imx_rproc_scu_api_detect_mode(struct rproc *rproc)
else
priv->core_index = 0;
+ ret = devm_add_action_or_reset(dev, imx_rproc_put_scu, priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add action for put scu\n");
+
/*
* If Mcore resource is not owned by Acore partition, It is kicked by ROM,
* and Linux could only do IPC with Mcore and nothing else.
@@ -1001,35 +993,6 @@ static int imx_rproc_detect_mode(struct imx_rproc *priv)
return dcfg->ops->detect_mode(priv->rproc);
}
-static int imx_rproc_clk_enable(struct imx_rproc *priv)
-{
- const struct imx_rproc_dcfg *dcfg = priv->dcfg;
- struct device *dev = priv->dev;
- int ret;
-
- /* Remote core is not under control of Linux or it is managed by SCU API */
- if (dcfg->method == IMX_RPROC_NONE || dcfg->method == IMX_RPROC_SCU_API)
- return 0;
-
- priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "Failed to get clock\n");
- return PTR_ERR(priv->clk);
- }
-
- /*
- * clk for M4 block including memory. Should be
- * enabled before .start for FW transfer.
- */
- ret = clk_prepare_enable(priv->clk);
- if (ret) {
- dev_err(dev, "Failed to enable clock\n");
- return ret;
- }
-
- return 0;
-}
-
static int imx_rproc_sys_off_handler(struct sys_off_data *data)
{
struct rproc *rproc = data->cb_data;
@@ -1046,6 +1009,13 @@ static int imx_rproc_sys_off_handler(struct sys_off_data *data)
return NOTIFY_DONE;
}
+static void imx_rproc_destroy_workqueue(void *data)
+{
+ struct workqueue_struct *workqueue = data;
+
+ destroy_workqueue(workqueue);
+}
+
static int imx_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1077,25 +1047,38 @@ static int imx_rproc_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ ret = devm_add_action_or_reset(dev, imx_rproc_destroy_workqueue, priv->workqueue);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add devm destroy workqueue action\n");
+
INIT_WORK(&priv->rproc_work, imx_rproc_vq_work);
ret = imx_rproc_xtr_mbox_init(rproc, true);
if (ret)
- goto err_put_wkq;
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, imx_rproc_free_mbox, rproc);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to add devm free mbox action: %d\n", ret);
ret = imx_rproc_addr_init(priv, pdev);
- if (ret) {
- dev_err(dev, "failed on imx_rproc_addr_init\n");
- goto err_put_mbox;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed on imx_rproc_addr_init\n");
ret = imx_rproc_detect_mode(priv);
if (ret)
- goto err_put_mbox;
+ return dev_err_probe(dev, ret, "failed on detect mode\n");
- ret = imx_rproc_clk_enable(priv);
- if (ret)
- goto err_put_scu;
+ /*
+ * Handle clocks when remote core is under control of Linux AND the
+ * clocks are not managed by system firmware.
+ */
+ if (dcfg->flags & IMX_RPROC_NEED_CLKS) {
+ priv->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to enable clock\n");
+ }
if (rproc->state != RPROC_DETACHED)
rproc->auto_boot = of_property_read_bool(np, "fsl,auto-boot");
@@ -1110,45 +1093,32 @@ static int imx_rproc_probe(struct platform_device *pdev)
ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_POWER_OFF_PREPARE,
SYS_OFF_PRIO_DEFAULT,
imx_rproc_sys_off_handler, rproc);
- if (ret) {
- dev_err(dev, "register power off handler failure\n");
- goto err_put_clk;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "register power off handler failure\n");
ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_RESTART_PREPARE,
SYS_OFF_PRIO_DEFAULT,
imx_rproc_sys_off_handler, rproc);
- if (ret) {
- dev_err(dev, "register restart handler failure\n");
- goto err_put_clk;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "register restart handler failure\n");
}
- if (dcfg->method == IMX_RPROC_SCU_API) {
- pm_runtime_enable(dev);
- ret = pm_runtime_resume_and_get(dev);
- if (ret) {
- dev_err(dev, "pm_runtime get failed: %d\n", ret);
- goto err_put_clk;
- }
- }
+ pm_runtime_enable(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "pm_runtime get failed\n");
- ret = rproc_add(rproc);
+ ret = devm_rproc_add(dev, rproc);
if (ret) {
dev_err(dev, "rproc_add failed\n");
- goto err_put_clk;
+ goto err_put_pm;
}
return 0;
-err_put_clk:
- clk_disable_unprepare(priv->clk);
-err_put_scu:
- imx_rproc_put_scu(rproc);
-err_put_mbox:
- imx_rproc_free_mbox(rproc);
-err_put_wkq:
- destroy_workqueue(priv->workqueue);
+err_put_pm:
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
return ret;
}
@@ -1158,15 +1128,8 @@ static void imx_rproc_remove(struct platform_device *pdev)
struct rproc *rproc = platform_get_drvdata(pdev);
struct imx_rproc *priv = rproc->priv;
- if (priv->dcfg->method == IMX_RPROC_SCU_API) {
- pm_runtime_disable(priv->dev);
- pm_runtime_put(priv->dev);
- }
- clk_disable_unprepare(priv->clk);
- rproc_del(rproc);
- imx_rproc_put_scu(rproc);
- imx_rproc_free_mbox(rproc);
- destroy_workqueue(priv->workqueue);
+ pm_runtime_disable(priv->dev);
+ pm_runtime_put_noidle(priv->dev);
}
static const struct imx_rproc_plat_ops imx_rproc_ops_arm_smc = {
@@ -1184,6 +1147,7 @@ static const struct imx_rproc_plat_ops imx_rproc_ops_mmio = {
static const struct imx_rproc_plat_ops imx_rproc_ops_scu_api = {
.start = imx_rproc_scu_api_start,
.stop = imx_rproc_scu_api_stop,
+ .detach = imx_rproc_scu_api_detach,
.detect_mode = imx_rproc_scu_api_detect_mode,
};
@@ -1196,15 +1160,15 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn_mmio = {
.gpr_wait = IMX8M_GPR22_CM7_CPUWAIT,
.att = imx_rproc_att_imx8mn,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
- .method = IMX_RPROC_MMIO,
.ops = &imx_rproc_ops_mmio,
+ .flags = IMX_RPROC_NEED_CLKS,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn = {
.att = imx_rproc_att_imx8mn,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
- .method = IMX_RPROC_SMC,
.ops = &imx_rproc_ops_arm_smc,
+ .flags = IMX_RPROC_NEED_CLKS,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mq = {
@@ -1214,34 +1178,30 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mq = {
.src_stop = IMX7D_M4_STOP,
.att = imx_rproc_att_imx8mq,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8mq),
- .method = IMX_RPROC_MMIO,
.ops = &imx_rproc_ops_mmio,
+ .flags = IMX_RPROC_NEED_CLKS,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8qm = {
.att = imx_rproc_att_imx8qm,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8qm),
- .method = IMX_RPROC_SCU_API,
.ops = &imx_rproc_ops_scu_api,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8qxp = {
.att = imx_rproc_att_imx8qxp,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8qxp),
- .method = IMX_RPROC_SCU_API,
.ops = &imx_rproc_ops_scu_api,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8ulp = {
.att = imx_rproc_att_imx8ulp,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8ulp),
- .method = IMX_RPROC_NONE,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx7ulp = {
.att = imx_rproc_att_imx7ulp,
.att_size = ARRAY_SIZE(imx_rproc_att_imx7ulp),
- .method = IMX_RPROC_NONE,
.flags = IMX_RPROC_NEED_SYSTEM_OFF,
};
@@ -1252,8 +1212,8 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = {
.src_stop = IMX7D_M4_STOP,
.att = imx_rproc_att_imx7d,
.att_size = ARRAY_SIZE(imx_rproc_att_imx7d),
- .method = IMX_RPROC_MMIO,
.ops = &imx_rproc_ops_mmio,
+ .flags = IMX_RPROC_NEED_CLKS,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = {
@@ -1263,15 +1223,15 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = {
.src_stop = IMX6SX_M4_STOP,
.att = imx_rproc_att_imx6sx,
.att_size = ARRAY_SIZE(imx_rproc_att_imx6sx),
- .method = IMX_RPROC_MMIO,
.ops = &imx_rproc_ops_mmio,
+ .flags = IMX_RPROC_NEED_CLKS,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx93 = {
.att = imx_rproc_att_imx93,
.att_size = ARRAY_SIZE(imx_rproc_att_imx93),
- .method = IMX_RPROC_SMC,
.ops = &imx_rproc_ops_arm_smc,
+ .flags = IMX_RPROC_NEED_CLKS,
};
static const struct of_device_id imx_rproc_of_match[] = {
diff --git a/drivers/remoteproc/imx_rproc.h b/drivers/remoteproc/imx_rproc.h
index 3a9adaaf048b..1b2d9f4d6d19 100644
--- a/drivers/remoteproc/imx_rproc.h
+++ b/drivers/remoteproc/imx_rproc.h
@@ -15,25 +15,14 @@ struct imx_rproc_att {
int flags;
};
-/* Remote core start/stop method */
-enum imx_rproc_method {
- IMX_RPROC_NONE,
- /* Through syscon regmap */
- IMX_RPROC_MMIO,
- /* Through ARM SMCCC */
- IMX_RPROC_SMC,
- /* Through System Control Unit API */
- IMX_RPROC_SCU_API,
- /* Through Reset Controller API */
- IMX_RPROC_RESET_CONTROLLER,
-};
-
/* dcfg flags */
#define IMX_RPROC_NEED_SYSTEM_OFF BIT(0)
+#define IMX_RPROC_NEED_CLKS BIT(1)
struct imx_rproc_plat_ops {
int (*start)(struct rproc *rproc);
int (*stop)(struct rproc *rproc);
+ int (*detach)(struct rproc *rproc);
int (*detect_mode)(struct rproc *rproc);
};
@@ -46,7 +35,6 @@ struct imx_rproc_dcfg {
u32 gpr_wait;
const struct imx_rproc_att *att;
size_t att_size;
- enum imx_rproc_method method;
u32 flags;
const struct imx_rproc_plat_ops *ops;
};
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index 8206a1766481..db8fd045468d 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -16,6 +16,7 @@
#include <linux/remoteproc.h>
#include <linux/remoteproc/mtk_scp.h>
#include <linux/rpmsg/mtk_rpmsg.h>
+#include <linux/string.h>
#include "mtk_common.h"
#include "remoteproc_internal.h"
@@ -1093,22 +1094,74 @@ static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
}
}
+/**
+ * scp_get_default_fw_path() - Get default SCP firmware path
+ * @dev: SCP Device
+ * @core_id: SCP Core number
+ *
+ * This function generates a path based on the following format:
+ * mediatek/(soc_model)/scp(_cX).img; for multi-core or
+ * mediatek/(soc_model)/scp.img for single core SCP HW
+ *
+ * Return: A devm allocated string containing the full path to
+ * a SCP firmware or an error pointer
+ */
+static const char *scp_get_default_fw_path(struct device *dev, int core_id)
+{
+ struct device_node *np = core_id < 0 ? dev->of_node : dev->parent->of_node;
+ const char *compatible, *soc;
+ char scp_fw_file[7];
+ int ret;
+
+ /* Use only the first compatible string */
+ ret = of_property_read_string_index(np, "compatible", 0, &compatible);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* If the compatible string's length is implausible bail out early */
+ if (strlen(compatible) < strlen("mediatek,mtXXXX-scp"))
+ return ERR_PTR(-EINVAL);
+
+ /* If the compatible string starts with "mediatek,mt" assume that it's ok */
+ if (!str_has_prefix(compatible, "mediatek,mt"))
+ return ERR_PTR(-EINVAL);
+
+ if (core_id >= 0)
+ ret = snprintf(scp_fw_file, sizeof(scp_fw_file), "scp_c%d", core_id);
+ else
+ ret = snprintf(scp_fw_file, sizeof(scp_fw_file), "scp");
+ if (ret >= sizeof(scp_fw_file))
+ return ERR_PTR(-ENAMETOOLONG);
+
+ /* Not using strchr here, as strlen of a const gets optimized by compiler */
+ soc = &compatible[strlen("mediatek,")];
+
+ return devm_kasprintf(dev, GFP_KERNEL, "mediatek/%.*s/%s.img",
+ (int)strlen("mtXXXX"), soc, scp_fw_file);
+}
+
static struct mtk_scp *scp_rproc_init(struct platform_device *pdev,
struct mtk_scp_of_cluster *scp_cluster,
- const struct mtk_scp_of_data *of_data)
+ const struct mtk_scp_of_data *of_data,
+ int core_id)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct mtk_scp *scp;
struct rproc *rproc;
struct resource *res;
- const char *fw_name = "scp.img";
+ const char *fw_name;
int ret, i;
const struct mtk_scp_sizes_data *scp_sizes;
ret = rproc_of_parse_firmware(dev, 0, &fw_name);
- if (ret < 0 && ret != -EINVAL)
- return ERR_PTR(ret);
+ if (ret) {
+ fw_name = scp_get_default_fw_path(dev, core_id);
+ if (IS_ERR(fw_name)) {
+ dev_err(dev, "Cannot get firmware path: %ld\n", PTR_ERR(fw_name));
+ return ERR_CAST(fw_name);
+ }
+ }
rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
if (!rproc) {
@@ -1212,7 +1265,7 @@ static int scp_add_single_core(struct platform_device *pdev,
struct mtk_scp *scp;
int ret;
- scp = scp_rproc_init(pdev, scp_cluster, of_device_get_match_data(dev));
+ scp = scp_rproc_init(pdev, scp_cluster, of_device_get_match_data(dev), -1);
if (IS_ERR(scp))
return PTR_ERR(scp);
@@ -1259,7 +1312,7 @@ static int scp_add_multi_core(struct platform_device *pdev,
goto init_fail;
}
- scp = scp_rproc_init(cpdev, scp_cluster, cluster_of_data[core_id]);
+ scp = scp_rproc_init(cpdev, scp_cluster, cluster_of_data[core_id], core_id);
put_device(&cpdev->dev);
if (IS_ERR(scp)) {
ret = PTR_ERR(scp);
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 9c9e9c3cf378..cb01354248af 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -555,7 +555,6 @@ static void omap_rproc_kick(struct rproc *rproc, int vqid)
dev_err(dev, "failed to send mailbox message, status = %d\n",
ret);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
@@ -656,7 +655,6 @@ static int omap_rproc_start(struct rproc *rproc)
pm_runtime_use_autosuspend(dev);
pm_runtime_get_noresume(dev);
pm_runtime_enable(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
@@ -714,7 +712,6 @@ enable_device:
reset_control_deassert(oproc->reset);
out:
/* schedule the next auto-suspend */
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index e98b7e03162c..b5c8d6d38c9c 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -625,27 +625,22 @@ static int adsp_init_mmio(struct qcom_adsp *adsp,
static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
{
- struct reserved_mem *rmem = NULL;
- struct device_node *node;
-
- node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0);
- if (node)
- rmem = of_reserved_mem_lookup(node);
- of_node_put(node);
+ int ret;
+ struct resource res;
- if (!rmem) {
+ ret = of_reserved_mem_region_to_resource(adsp->dev->of_node, 0, &res);
+ if (ret) {
dev_err(adsp->dev, "unable to resolve memory-region\n");
- return -EINVAL;
+ return ret;
}
- adsp->mem_phys = adsp->mem_reloc = rmem->base;
- adsp->mem_size = rmem->size;
- adsp->mem_region = devm_ioremap_wc(adsp->dev,
- adsp->mem_phys, adsp->mem_size);
- if (!adsp->mem_region) {
- dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n",
- &rmem->base, adsp->mem_size);
- return -EBUSY;
+ adsp->mem_phys = adsp->mem_reloc = res.start;
+ adsp->mem_size = resource_size(&res);
+ adsp->mem_region = devm_ioremap_resource_wc(adsp->dev, &res);
+ if (IS_ERR(adsp->mem_region)) {
+ dev_err(adsp->dev, "unable to map memory region: %pR\n", &res);
+ return PTR_ERR(adsp->mem_region);
+
}
return 0;
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 3087d895b87f..91940977ca89 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -1970,8 +1970,8 @@ static int q6v5_init_reset(struct q6v5 *qproc)
static int q6v5_alloc_memory_region(struct q6v5 *qproc)
{
struct device_node *child;
- struct reserved_mem *rmem;
- struct device_node *node;
+ struct resource res;
+ int ret;
/*
* In the absence of mba/mpss sub-child, extract the mba and mpss
@@ -1979,71 +1979,49 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
*/
child = of_get_child_by_name(qproc->dev->of_node, "mba");
if (!child) {
- node = of_parse_phandle(qproc->dev->of_node,
- "memory-region", 0);
+ ret = of_reserved_mem_region_to_resource(qproc->dev->of_node, 0, &res);
} else {
- node = of_parse_phandle(child, "memory-region", 0);
+ ret = of_reserved_mem_region_to_resource(child, 0, &res);
of_node_put(child);
}
- if (!node) {
- dev_err(qproc->dev, "no mba memory-region specified\n");
- return -EINVAL;
- }
-
- rmem = of_reserved_mem_lookup(node);
- of_node_put(node);
- if (!rmem) {
+ if (ret) {
dev_err(qproc->dev, "unable to resolve mba region\n");
- return -EINVAL;
+ return ret;
}
- qproc->mba_phys = rmem->base;
- qproc->mba_size = rmem->size;
+ qproc->mba_phys = res.start;
+ qproc->mba_size = resource_size(&res);
if (!child) {
- node = of_parse_phandle(qproc->dev->of_node,
- "memory-region", 1);
+ ret = of_reserved_mem_region_to_resource(qproc->dev->of_node, 1, &res);
} else {
child = of_get_child_by_name(qproc->dev->of_node, "mpss");
- node = of_parse_phandle(child, "memory-region", 0);
+ ret = of_reserved_mem_region_to_resource(child, 0, &res);
of_node_put(child);
}
- if (!node) {
- dev_err(qproc->dev, "no mpss memory-region specified\n");
- return -EINVAL;
- }
-
- rmem = of_reserved_mem_lookup(node);
- of_node_put(node);
- if (!rmem) {
+ if (ret) {
dev_err(qproc->dev, "unable to resolve mpss region\n");
- return -EINVAL;
+ return ret;
}
- qproc->mpss_phys = qproc->mpss_reloc = rmem->base;
- qproc->mpss_size = rmem->size;
+ qproc->mpss_phys = qproc->mpss_reloc = res.start;
+ qproc->mpss_size = resource_size(&res);
if (!child) {
- node = of_parse_phandle(qproc->dev->of_node, "memory-region", 2);
+ ret = of_reserved_mem_region_to_resource(qproc->dev->of_node, 2, &res);
} else {
child = of_get_child_by_name(qproc->dev->of_node, "metadata");
- node = of_parse_phandle(child, "memory-region", 0);
+ ret = of_reserved_mem_region_to_resource(child, 0, &res);
of_node_put(child);
}
- if (!node)
+ if (ret)
return 0;
- rmem = of_reserved_mem_lookup(node);
- if (!rmem) {
- dev_err(qproc->dev, "unable to resolve metadata region\n");
- return -EINVAL;
- }
-
- qproc->mdata_phys = rmem->base;
- qproc->mdata_size = rmem->size;
+ qproc->mdata_phys = res.start;
+ qproc->mdata_size = resource_size(&res);
return 0;
}
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 158bcd6cc85c..52680ac99589 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -547,54 +547,38 @@ static void qcom_pas_pds_detach(struct qcom_pas *pas, struct device **pds, size_
static int qcom_pas_alloc_memory_region(struct qcom_pas *pas)
{
- struct reserved_mem *rmem;
- struct device_node *node;
-
- node = of_parse_phandle(pas->dev->of_node, "memory-region", 0);
- if (!node) {
- dev_err(pas->dev, "no memory-region specified\n");
- return -EINVAL;
- }
+ struct resource res;
+ int ret;
- rmem = of_reserved_mem_lookup(node);
- of_node_put(node);
- if (!rmem) {
+ ret = of_reserved_mem_region_to_resource(pas->dev->of_node, 0, &res);
+ if (ret) {
dev_err(pas->dev, "unable to resolve memory-region\n");
- return -EINVAL;
+ return ret;
}
- pas->mem_phys = pas->mem_reloc = rmem->base;
- pas->mem_size = rmem->size;
- pas->mem_region = devm_ioremap_wc(pas->dev, pas->mem_phys, pas->mem_size);
- if (!pas->mem_region) {
- dev_err(pas->dev, "unable to map memory region: %pa+%zx\n",
- &rmem->base, pas->mem_size);
- return -EBUSY;
+ pas->mem_phys = pas->mem_reloc = res.start;
+ pas->mem_size = resource_size(&res);
+ pas->mem_region = devm_ioremap_resource_wc(pas->dev, &res);
+ if (IS_ERR(pas->mem_region)) {
+ dev_err(pas->dev, "unable to map memory region: %pR\n", &res);
+ return PTR_ERR(pas->mem_region);
}
if (!pas->dtb_pas_id)
return 0;
- node = of_parse_phandle(pas->dev->of_node, "memory-region", 1);
- if (!node) {
- dev_err(pas->dev, "no dtb memory-region specified\n");
- return -EINVAL;
- }
-
- rmem = of_reserved_mem_lookup(node);
- of_node_put(node);
- if (!rmem) {
+ ret = of_reserved_mem_region_to_resource(pas->dev->of_node, 1, &res);
+ if (ret) {
dev_err(pas->dev, "unable to resolve dtb memory-region\n");
- return -EINVAL;
+ return ret;
}
- pas->dtb_mem_phys = pas->dtb_mem_reloc = rmem->base;
- pas->dtb_mem_size = rmem->size;
- pas->dtb_mem_region = devm_ioremap_wc(pas->dev, pas->dtb_mem_phys, pas->dtb_mem_size);
- if (!pas->dtb_mem_region) {
- dev_err(pas->dev, "unable to map dtb memory region: %pa+%zx\n",
- &rmem->base, pas->dtb_mem_size);
- return -EBUSY;
+ pas->dtb_mem_phys = pas->dtb_mem_reloc = res.start;
+ pas->dtb_mem_size = resource_size(&res);
+ pas->dtb_mem_region = devm_ioremap_resource_wc(pas->dev, &res);
+ if (IS_ERR(pas->dtb_mem_region)) {
+ dev_err(pas->dev, "unable to map dtb memory region: %pR\n", &res);
+ return PTR_ERR(pas->dtb_mem_region);
}
return 0;
@@ -603,7 +587,6 @@ static int qcom_pas_alloc_memory_region(struct qcom_pas *pas)
static int qcom_pas_assign_memory_region(struct qcom_pas *pas)
{
struct qcom_scm_vmperm perm[MAX_ASSIGN_COUNT];
- struct device_node *node;
unsigned int perm_size;
int offset;
int ret;
@@ -612,17 +595,15 @@ static int qcom_pas_assign_memory_region(struct qcom_pas *pas)
return 0;
for (offset = 0; offset < pas->region_assign_count; ++offset) {
- struct reserved_mem *rmem = NULL;
-
- node = of_parse_phandle(pas->dev->of_node, "memory-region",
- pas->region_assign_idx + offset);
- if (node)
- rmem = of_reserved_mem_lookup(node);
- of_node_put(node);
- if (!rmem) {
+ struct resource res;
+
+ ret = of_reserved_mem_region_to_resource(pas->dev->of_node,
+ pas->region_assign_idx + offset,
+ &res);
+ if (ret) {
dev_err(pas->dev, "unable to resolve shareable memory-region index %d\n",
offset);
- return -EINVAL;
+ return ret;
}
if (pas->region_assign_shared) {
@@ -637,8 +618,8 @@ static int qcom_pas_assign_memory_region(struct qcom_pas *pas)
perm_size = 1;
}
- pas->region_assign_phys[offset] = rmem->base;
- pas->region_assign_size[offset] = rmem->size;
+ pas->region_assign_phys[offset] = res.start;
+ pas->region_assign_size[offset] = resource_size(&res);
pas->region_assign_owners[offset] = BIT(QCOM_SCM_VMID_HLOS);
ret = qcom_scm_assign_mem(pas->region_assign_phys[offset],
@@ -1461,7 +1442,7 @@ static const struct of_device_id qcom_pas_of_match[] = {
{ .compatible = "qcom,milos-wpss-pas", .data = &sc7280_wpss_resource},
{ .compatible = "qcom,msm8226-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource},
- { .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
+ { .compatible = "qcom,msm8974-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8996-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8996-slpi-pil", .data = &msm8996_slpi_resource_init},
{ .compatible = "qcom,msm8998-adsp-pas", .data = &msm8996_adsp_resource},
@@ -1488,6 +1469,7 @@ static const struct of_device_id qcom_pas_of_match[] = {
{ .compatible = "qcom,sc8280xp-nsp0-pas", .data = &sc8280xp_nsp0_resource},
{ .compatible = "qcom,sc8280xp-nsp1-pas", .data = &sc8280xp_nsp1_resource},
{ .compatible = "qcom,sdm660-adsp-pas", .data = &adsp_resource_init},
+ { .compatible = "qcom,sdm660-cdsp-pas", .data = &cdsp_resource_init},
{ .compatible = "qcom,sdm845-adsp-pas", .data = &sdm845_adsp_resource_init},
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &sdm845_cdsp_resource_init},
{ .compatible = "qcom,sdm845-slpi-pas", .data = &sdm845_slpi_resource_init},
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index 07c88623f597..c27200159a88 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -85,7 +85,7 @@
#define TCSR_WCSS_CLK_MASK 0x1F
#define TCSR_WCSS_CLK_ENABLE 0x14
-#define MAX_HALT_REG 3
+#define MAX_HALT_REG 4
enum {
WCSS_IPQ8074,
WCSS_QCS404,
@@ -811,7 +811,8 @@ static int q6v5_wcss_init_reset(struct q6v5_wcss *wcss,
}
}
- wcss->wcss_q6_bcr_reset = devm_reset_control_get_exclusive(dev, "wcss_q6_bcr_reset");
+ wcss->wcss_q6_bcr_reset = devm_reset_control_get_optional_exclusive(dev,
+ "wcss_q6_bcr_reset");
if (IS_ERR(wcss->wcss_q6_bcr_reset)) {
dev_err(wcss->dev, "unable to acquire wcss_q6_bcr_reset\n");
return PTR_ERR(wcss->wcss_q6_bcr_reset);
@@ -864,37 +865,32 @@ static int q6v5_wcss_init_mmio(struct q6v5_wcss *wcss,
return -EINVAL;
}
- wcss->halt_q6 = halt_reg[0];
- wcss->halt_wcss = halt_reg[1];
- wcss->halt_nc = halt_reg[2];
+ wcss->halt_q6 = halt_reg[1];
+ wcss->halt_wcss = halt_reg[2];
+ wcss->halt_nc = halt_reg[3];
return 0;
}
static int q6v5_alloc_memory_region(struct q6v5_wcss *wcss)
{
- struct reserved_mem *rmem = NULL;
- struct device_node *node;
struct device *dev = wcss->dev;
+ struct resource res;
+ int ret;
- node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (node)
- rmem = of_reserved_mem_lookup(node);
- of_node_put(node);
-
- if (!rmem) {
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &res);
+ if (ret) {
dev_err(dev, "unable to acquire memory-region\n");
- return -EINVAL;
+ return ret;
}
- wcss->mem_phys = rmem->base;
- wcss->mem_reloc = rmem->base;
- wcss->mem_size = rmem->size;
- wcss->mem_region = devm_ioremap_wc(dev, wcss->mem_phys, wcss->mem_size);
- if (!wcss->mem_region) {
- dev_err(dev, "unable to map memory region: %pa+%pa\n",
- &rmem->base, &rmem->size);
- return -EBUSY;
+ wcss->mem_phys = res.start;
+ wcss->mem_reloc = res.start;
+ wcss->mem_size = resource_size(&res);
+ wcss->mem_region = devm_ioremap_resource_wc(dev, &res);
+ if (IS_ERR(wcss->mem_region)) {
+ dev_err(dev, "unable to map memory region: %pR\n", &res);
+ return PTR_ERR(wcss->mem_region);
}
return 0;
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 2c7e519a2254..ee18bf2e8054 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -526,26 +526,21 @@ static int wcnss_request_irq(struct qcom_wcnss *wcnss,
static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
{
- struct reserved_mem *rmem = NULL;
- struct device_node *node;
-
- node = of_parse_phandle(wcnss->dev->of_node, "memory-region", 0);
- if (node)
- rmem = of_reserved_mem_lookup(node);
- of_node_put(node);
+ struct resource res;
+ int ret;
- if (!rmem) {
+ ret = of_reserved_mem_region_to_resource(wcnss->dev->of_node, 0, &res);
+ if (ret) {
dev_err(wcnss->dev, "unable to resolve memory-region\n");
- return -EINVAL;
+ return ret;
}
- wcnss->mem_phys = wcnss->mem_reloc = rmem->base;
- wcnss->mem_size = rmem->size;
- wcnss->mem_region = devm_ioremap_wc(wcnss->dev, wcnss->mem_phys, wcnss->mem_size);
- if (!wcnss->mem_region) {
- dev_err(wcnss->dev, "unable to map memory region: %pa+%zx\n",
- &rmem->base, wcnss->mem_size);
- return -EBUSY;
+ wcnss->mem_phys = wcnss->mem_reloc = res.start;
+ wcnss->mem_size = resource_size(&res);
+ wcnss->mem_region = devm_ioremap_resource_wc(wcnss->dev, &res);
+ if (IS_ERR(wcnss->mem_region)) {
+ dev_err(wcnss->dev, "unable to map memory region: %pR\n", &res);
+ return PTR_ERR(wcnss->mem_region);
}
return 0;
diff --git a/drivers/remoteproc/rcar_rproc.c b/drivers/remoteproc/rcar_rproc.c
index 921d853594f4..3c25625f966d 100644
--- a/drivers/remoteproc/rcar_rproc.c
+++ b/drivers/remoteproc/rcar_rproc.c
@@ -52,46 +52,36 @@ static int rcar_rproc_prepare(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
struct device_node *np = dev->of_node;
- struct of_phandle_iterator it;
struct rproc_mem_entry *mem;
- struct reserved_mem *rmem;
+ int i = 0;
u32 da;
/* Register associated reserved memory regions */
- of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
- while (of_phandle_iterator_next(&it) == 0) {
-
- rmem = of_reserved_mem_lookup(it.node);
- if (!rmem) {
- of_node_put(it.node);
- dev_err(&rproc->dev,
- "unable to acquire memory-region\n");
- return -EINVAL;
- }
+ while (1) {
+ struct resource res;
+ int ret;
+
+ ret = of_reserved_mem_region_to_resource(np, i++, &res);
+ if (ret)
+ return 0;
- if (rmem->base > U32_MAX) {
- of_node_put(it.node);
+ if (res.start > U32_MAX)
return -EINVAL;
- }
/* No need to translate pa to da, R-Car use same map */
- da = rmem->base;
+ da = res.start;
mem = rproc_mem_entry_init(dev, NULL,
- rmem->base,
- rmem->size, da,
+ res.start,
+ resource_size(&res), da,
rcar_rproc_mem_alloc,
rcar_rproc_mem_release,
- it.node->name);
+ res.name);
- if (!mem) {
- of_node_put(it.node);
+ if (!mem)
return -ENOMEM;
- }
rproc_add_carveout(rproc, mem);
}
-
- return 0;
}
static int rcar_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 825672100528..aada2780b343 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -16,29 +16,25 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
+#include <asm/byteorder.h>
#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/device.h>
-#include <linux/panic_notifier.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
#include <linux/dma-mapping.h>
+#include <linux/elf.h>
#include <linux/firmware.h>
-#include <linux/string.h>
-#include <linux/debugfs.h>
-#include <linux/rculist.h>
-#include <linux/remoteproc.h>
-#include <linux/iommu.h>
#include <linux/idr.h>
-#include <linux/elf.h>
-#include <linux/crc32.h>
+#include <linux/iommu.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of_platform.h>
-#include <linux/of_reserved_mem.h>
-#include <linux/virtio_ids.h>
-#include <linux/virtio_ring.h>
-#include <asm/byteorder.h>
+#include <linux/panic_notifier.h>
#include <linux/platform_device.h>
+#include <linux/rculist.h>
+#include <linux/remoteproc.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/virtio_ring.h>
#include "remoteproc_internal.h"
@@ -159,7 +155,6 @@ phys_addr_t rproc_va_to_pa(void *cpu_addr)
WARN_ON(!virt_addr_valid(cpu_addr));
return virt_to_phys(cpu_addr);
}
-EXPORT_SYMBOL(rproc_va_to_pa);
/**
* rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address
@@ -1989,7 +1984,7 @@ EXPORT_SYMBOL(rproc_boot);
int rproc_shutdown(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
- int ret = 0;
+ int ret;
ret = mutex_lock_interruptible(&rproc->lock);
if (ret) {
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index e6566a9839dc..a07edf7217d2 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -120,43 +120,41 @@ static int st_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
struct device *dev = rproc->dev.parent;
struct device_node *np = dev->of_node;
struct rproc_mem_entry *mem;
- struct reserved_mem *rmem;
- struct of_phandle_iterator it;
- int index = 0;
-
- of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
- while (of_phandle_iterator_next(&it) == 0) {
- rmem = of_reserved_mem_lookup(it.node);
- if (!rmem) {
- of_node_put(it.node);
- dev_err(dev, "unable to acquire memory-region\n");
- return -EINVAL;
- }
+ int entries;
+
+ entries = of_reserved_mem_region_count(np);
+
+ for (int index = 0; index < entries; index++) {
+ struct resource res;
+ int ret;
+
+ ret = of_reserved_mem_region_to_resource(np, index, &res);
+ if (ret)
+ return ret;
/* No need to map vdev buffer */
- if (strcmp(it.node->name, "vdev0buffer")) {
+ if (!strstarts(res.name, "vdev0buffer")) {
/* Register memory region */
mem = rproc_mem_entry_init(dev, NULL,
- (dma_addr_t)rmem->base,
- rmem->size, rmem->base,
+ (dma_addr_t)res.start,
+ resource_size(&res), res.start,
st_rproc_mem_alloc,
st_rproc_mem_release,
- it.node->name);
+ "%.*s",
+ strchrnul(res.name, '@') - res.name,
+ res.name);
} else {
/* Register reserved memory for vdev buffer allocation */
mem = rproc_of_resm_mem_entry_init(dev, index,
- rmem->size,
- rmem->base,
- it.node->name);
+ resource_size(&res),
+ res.start,
+ "vdev0buffer");
}
- if (!mem) {
- of_node_put(it.node);
+ if (!mem)
return -ENOMEM;
- }
rproc_add_carveout(rproc, mem);
- index++;
}
return rproc_elf_load_rsc_table(rproc, fw);
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index 431648607d53..c28679d3b43c 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -213,60 +213,52 @@ static int stm32_rproc_prepare(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
struct device_node *np = dev->of_node;
- struct of_phandle_iterator it;
struct rproc_mem_entry *mem;
- struct reserved_mem *rmem;
u64 da;
- int index = 0;
+ int index = 0, mr = 0;
/* Register associated reserved memory regions */
- of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
- while (of_phandle_iterator_next(&it) == 0) {
- rmem = of_reserved_mem_lookup(it.node);
- if (!rmem) {
- of_node_put(it.node);
- dev_err(dev, "unable to acquire memory-region\n");
- return -EINVAL;
- }
+ while (1) {
+ struct resource res;
+ int ret;
+
+ ret = of_reserved_mem_region_to_resource(np, mr++, &res);
+ if (ret)
+ return 0;
- if (stm32_rproc_pa_to_da(rproc, rmem->base, &da) < 0) {
- of_node_put(it.node);
- dev_err(dev, "memory region not valid %pa\n",
- &rmem->base);
+ if (stm32_rproc_pa_to_da(rproc, res.start, &da) < 0) {
+ dev_err(dev, "memory region not valid %pR\n", &res);
return -EINVAL;
}
/* No need to map vdev buffer */
- if (strcmp(it.node->name, "vdev0buffer")) {
+ if (!strstarts(res.name, "vdev0buffer")) {
/* Register memory region */
mem = rproc_mem_entry_init(dev, NULL,
- (dma_addr_t)rmem->base,
- rmem->size, da,
+ (dma_addr_t)res.start,
+ resource_size(&res), da,
stm32_rproc_mem_alloc,
stm32_rproc_mem_release,
- it.node->name);
-
+ "%.*s", strchrnul(res.name, '@') - res.name,
+ res.name);
if (mem)
rproc_coredump_add_segment(rproc, da,
- rmem->size);
+ resource_size(&res));
} else {
/* Register reserved memory for vdev buffer alloc */
mem = rproc_of_resm_mem_entry_init(dev, index,
- rmem->size,
- rmem->base,
- it.node->name);
+ resource_size(&res),
+ res.start,
+ "vdev0buffer");
}
if (!mem) {
- of_node_put(it.node);
return -ENOMEM;
}
rproc_add_carveout(rproc, mem);
index++;
}
-
- return 0;
}
static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
diff --git a/drivers/remoteproc/ti_k3_common.c b/drivers/remoteproc/ti_k3_common.c
index 56b71652e449..32aa954dc5be 100644
--- a/drivers/remoteproc/ti_k3_common.c
+++ b/drivers/remoteproc/ti_k3_common.c
@@ -470,13 +470,10 @@ int k3_reserved_mem_init(struct k3_rproc *kproc)
{
struct device *dev = kproc->dev;
struct device_node *np = dev->of_node;
- struct device_node *rmem_np;
- struct reserved_mem *rmem;
int num_rmems;
int ret, i;
- num_rmems = of_property_count_elems_of_size(np, "memory-region",
- sizeof(phandle));
+ num_rmems = of_reserved_mem_region_count(np);
if (num_rmems < 0) {
dev_err(dev, "device does not reserved memory regions (%d)\n",
num_rmems);
@@ -505,23 +502,20 @@ int k3_reserved_mem_init(struct k3_rproc *kproc)
/* use remaining reserved memory regions for static carveouts */
for (i = 0; i < num_rmems; i++) {
- rmem_np = of_parse_phandle(np, "memory-region", i + 1);
- if (!rmem_np)
- return -EINVAL;
+ struct resource res;
- rmem = of_reserved_mem_lookup(rmem_np);
- of_node_put(rmem_np);
- if (!rmem)
- return -EINVAL;
+ ret = of_reserved_mem_region_to_resource(np, i + 1, &res);
+ if (ret)
+ return ret;
- kproc->rmem[i].bus_addr = rmem->base;
+ kproc->rmem[i].bus_addr = res.start;
/* 64-bit address regions currently not supported */
- kproc->rmem[i].dev_addr = (u32)rmem->base;
- kproc->rmem[i].size = rmem->size;
- kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
+ kproc->rmem[i].dev_addr = (u32)res.start;
+ kproc->rmem[i].size = resource_size(&res);
+ kproc->rmem[i].cpu_addr = devm_ioremap_resource_wc(dev, &res);
if (!kproc->rmem[i].cpu_addr) {
- dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
- i + 1, &rmem->base, &rmem->size);
+ dev_err(dev, "failed to map reserved memory#%d at %pR\n",
+ i + 1, &res);
return -ENOMEM;
}
diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 0b7b173d0d26..a7b75235f53e 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -492,53 +492,46 @@ static int add_mem_regions_carveout(struct rproc *rproc)
{
struct rproc_mem_entry *rproc_mem;
struct zynqmp_r5_core *r5_core;
- struct of_phandle_iterator it;
- struct reserved_mem *rmem;
int i = 0;
r5_core = rproc->priv;
/* Register associated reserved memory regions */
- of_phandle_iterator_init(&it, r5_core->np, "memory-region", NULL, 0);
+ while (1) {
+ int err;
+ struct resource res;
- while (of_phandle_iterator_next(&it) == 0) {
- rmem = of_reserved_mem_lookup(it.node);
- if (!rmem) {
- of_node_put(it.node);
- dev_err(&rproc->dev, "unable to acquire memory-region\n");
- return -EINVAL;
- }
+ err = of_reserved_mem_region_to_resource(r5_core->np, i, &res);
+ if (err)
+ return 0;
- if (!strcmp(it.node->name, "vdev0buffer")) {
+ if (strstarts(res.name, "vdev0buffer")) {
/* Init reserved memory for vdev buffer */
rproc_mem = rproc_of_resm_mem_entry_init(&rproc->dev, i,
- rmem->size,
- rmem->base,
- it.node->name);
+ resource_size(&res),
+ res.start,
+ "vdev0buffer");
} else {
/* Register associated reserved memory regions */
rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL,
- (dma_addr_t)rmem->base,
- rmem->size, rmem->base,
+ (dma_addr_t)res.start,
+ resource_size(&res), res.start,
zynqmp_r5_mem_region_map,
zynqmp_r5_mem_region_unmap,
- it.node->name);
+ "%.*s",
+ strchrnul(res.name, '@') - res.name,
+ res.name);
}
- if (!rproc_mem) {
- of_node_put(it.node);
+ if (!rproc_mem)
return -ENOMEM;
- }
rproc_add_carveout(rproc, rproc_mem);
- rproc_coredump_add_segment(rproc, rmem->base, rmem->size);
+ rproc_coredump_add_segment(rproc, res.start, resource_size(&res));
- dev_dbg(&rproc->dev, "reserved mem carveout %s addr=%llx, size=0x%llx",
- it.node->name, rmem->base, rmem->size);
+ dev_dbg(&rproc->dev, "reserved mem carveout %pR\n", &res);
i++;
}
-
- return 0;
}
static int add_sram_carveouts(struct rproc *rproc)
@@ -808,7 +801,6 @@ static int zynqmp_r5_get_rsc_table_va(struct zynqmp_r5_core *r5_core)
struct device *dev = r5_core->dev;
struct rsc_tbl_data *rsc_data_va;
struct resource res_mem;
- struct device_node *np;
int ret;
/*
@@ -818,14 +810,7 @@ static int zynqmp_r5_get_rsc_table_va(struct zynqmp_r5_core *r5_core)
* contains that data structure which holds resource table address, size
* and some magic number to validate correct resource table entry.
*/
- np = of_parse_phandle(r5_core->np, "memory-region", 0);
- if (!np) {
- dev_err(dev, "failed to get memory region dev node\n");
- return -EINVAL;
- }
-
- ret = of_address_to_resource(np, 0, &res_mem);
- of_node_put(np);
+ ret = of_reserved_mem_region_to_resource(r5_core->np, 0, &res_mem);
if (ret) {
dev_err(dev, "failed to get memory-region resource addr\n");
return -EINVAL;
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 30659115de55..8d5ad0c1b27f 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -73,6 +73,16 @@ config RESET_BRCMSTB_RESCAL
This enables the RESCAL reset controller for SATA, PCIe0, or PCIe1 on
BCM7216 or the BCM2712.
+config RESET_EIC7700
+ bool "Reset controller driver for ESWIN SoCs"
+ depends on ARCH_ESWIN || COMPILE_TEST
+ default ARCH_ESWIN
+ help
+ This enables the reset controller driver for ESWIN SoCs. This driver is
+ specific to ESWIN SoCs and should only be enabled if using such hardware.
+ The driver supports eic7700 series chips and provides functionality for
+ asserting and deasserting resets on the chip.
+
config RESET_EYEQ
bool "Mobileye EyeQ reset controller"
depends on MACH_EYEQ5 || MACH_EYEQ6H || COMPILE_TEST
@@ -171,7 +181,7 @@ config RESET_LPC18XX
config RESET_MCHP_SPARX5
tristate "Microchip Sparx5 reset driver"
- depends on ARCH_SPARX5 || SOC_LAN966 || MCHP_LAN966X_PCI || COMPILE_TEST
+ depends on ARCH_SPARX5 || ARCH_LAN969X || SOC_LAN966 || MCHP_LAN966X_PCI || COMPILE_TEST
default y if SPARX5_SWITCH
select MFD_SYSCON
help
@@ -238,6 +248,7 @@ config RESET_RASPBERRYPI
config RESET_RZG2L_USBPHY_CTRL
tristate "Renesas RZ/G2L USBPHY control driver"
depends on ARCH_RZG2L || COMPILE_TEST
+ select MFD_SYSCON
help
Support for USBPHY Control found on RZ/G2L family. It mainly
controls reset and power down of the USB/PHY.
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index f7934f9fb90b..9c3e484dfd81 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_RESET_BCM6345) += reset-bcm6345.o
obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
obj-$(CONFIG_RESET_BRCMSTB) += reset-brcmstb.o
obj-$(CONFIG_RESET_BRCMSTB_RESCAL) += reset-brcmstb-rescal.o
+obj-$(CONFIG_RESET_EIC7700) += reset-eic7700.o
obj-$(CONFIG_RESET_EYEQ) += reset-eyeq.o
obj-$(CONFIG_RESET_GPIO) += reset-gpio.o
obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 843cffc93909..0135dd0ae204 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -27,9 +27,6 @@
static DEFINE_MUTEX(reset_list_mutex);
static LIST_HEAD(reset_controller_list);
-static DEFINE_MUTEX(reset_lookup_mutex);
-static LIST_HEAD(reset_lookup_list);
-
/* Protects reset_gpio_lookup_list */
static DEFINE_MUTEX(reset_gpio_lookup_mutex);
static LIST_HEAD(reset_gpio_lookup_list);
@@ -194,33 +191,6 @@ int devm_reset_controller_register(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_reset_controller_register);
-/**
- * reset_controller_add_lookup - register a set of lookup entries
- * @lookup: array of reset lookup entries
- * @num_entries: number of entries in the lookup array
- */
-void reset_controller_add_lookup(struct reset_control_lookup *lookup,
- unsigned int num_entries)
-{
- struct reset_control_lookup *entry;
- unsigned int i;
-
- mutex_lock(&reset_lookup_mutex);
- for (i = 0; i < num_entries; i++) {
- entry = &lookup[i];
-
- if (!entry->dev_id || !entry->provider) {
- pr_warn("%s(): reset lookup entry badly specified, skipping\n",
- __func__);
- continue;
- }
-
- list_add_tail(&entry->list, &reset_lookup_list);
- }
- mutex_unlock(&reset_lookup_mutex);
-}
-EXPORT_SYMBOL_GPL(reset_controller_add_lookup);
-
static inline struct reset_control_array *
rstc_to_array(struct reset_control *rstc) {
return container_of(rstc, struct reset_control_array, base);
@@ -1103,75 +1073,12 @@ out_put:
}
EXPORT_SYMBOL_GPL(__of_reset_control_get);
-static struct reset_controller_dev *
-__reset_controller_by_name(const char *name)
-{
- struct reset_controller_dev *rcdev;
-
- lockdep_assert_held(&reset_list_mutex);
-
- list_for_each_entry(rcdev, &reset_controller_list, list) {
- if (!rcdev->dev)
- continue;
-
- if (!strcmp(name, dev_name(rcdev->dev)))
- return rcdev;
- }
-
- return NULL;
-}
-
-static struct reset_control *
-__reset_control_get_from_lookup(struct device *dev, const char *con_id,
- enum reset_control_flags flags)
-{
- bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
- const struct reset_control_lookup *lookup;
- struct reset_controller_dev *rcdev;
- const char *dev_id = dev_name(dev);
- struct reset_control *rstc = NULL;
-
- mutex_lock(&reset_lookup_mutex);
-
- list_for_each_entry(lookup, &reset_lookup_list, list) {
- if (strcmp(lookup->dev_id, dev_id))
- continue;
-
- if ((!con_id && !lookup->con_id) ||
- ((con_id && lookup->con_id) &&
- !strcmp(con_id, lookup->con_id))) {
- mutex_lock(&reset_list_mutex);
- rcdev = __reset_controller_by_name(lookup->provider);
- if (!rcdev) {
- mutex_unlock(&reset_list_mutex);
- mutex_unlock(&reset_lookup_mutex);
- /* Reset provider may not be ready yet. */
- return ERR_PTR(-EPROBE_DEFER);
- }
-
- flags &= ~RESET_CONTROL_FLAGS_BIT_OPTIONAL;
-
- rstc = __reset_control_get_internal(rcdev,
- lookup->index,
- flags);
- mutex_unlock(&reset_list_mutex);
- break;
- }
- }
-
- mutex_unlock(&reset_lookup_mutex);
-
- if (!rstc)
- return optional ? NULL : ERR_PTR(-ENOENT);
-
- return rstc;
-}
-
struct reset_control *__reset_control_get(struct device *dev, const char *id,
int index, enum reset_control_flags flags)
{
bool shared = flags & RESET_CONTROL_FLAGS_BIT_SHARED;
bool acquired = flags & RESET_CONTROL_FLAGS_BIT_ACQUIRED;
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
if (WARN_ON(shared && acquired))
return ERR_PTR(-EINVAL);
@@ -1179,7 +1086,7 @@ struct reset_control *__reset_control_get(struct device *dev, const char *id,
if (dev->of_node)
return __of_reset_control_get(dev->of_node, id, index, flags);
- return __reset_control_get_from_lookup(dev, id, flags);
+ return optional ? NULL : ERR_PTR(-ENOENT);
}
EXPORT_SYMBOL_GPL(__reset_control_get);
@@ -1514,31 +1421,6 @@ devm_reset_control_array_get(struct device *dev, enum reset_control_flags flags)
}
EXPORT_SYMBOL_GPL(devm_reset_control_array_get);
-static int reset_control_get_count_from_lookup(struct device *dev)
-{
- const struct reset_control_lookup *lookup;
- const char *dev_id;
- int count = 0;
-
- if (!dev)
- return -EINVAL;
-
- dev_id = dev_name(dev);
- mutex_lock(&reset_lookup_mutex);
-
- list_for_each_entry(lookup, &reset_lookup_list, list) {
- if (!strcmp(lookup->dev_id, dev_id))
- count++;
- }
-
- mutex_unlock(&reset_lookup_mutex);
-
- if (count == 0)
- count = -ENOENT;
-
- return count;
-}
-
/**
* reset_control_get_count - Count number of resets available with a device
*
@@ -1552,6 +1434,6 @@ int reset_control_get_count(struct device *dev)
if (dev->of_node)
return of_reset_control_get_count(dev->of_node);
- return reset_control_get_count_from_lookup(dev);
+ return -ENOENT;
}
EXPORT_SYMBOL_GPL(reset_control_get_count);
diff --git a/drivers/reset/reset-eic7700.c b/drivers/reset/reset-eic7700.c
new file mode 100644
index 000000000000..b72283b18b08
--- /dev/null
+++ b/drivers/reset/reset-eic7700.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025, Beijing ESWIN Computing Technology Co., Ltd..
+ * All rights reserved.
+ *
+ * ESWIN Reset Driver
+ *
+ * Authors:
+ * Yifeng Huang <huangyifeng@eswincomputing.com>
+ * Xuyang Dong <dongxuyang@eswincomputing.com>
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <dt-bindings/reset/eswin,eic7700-reset.h>
+
+#define SYSCRG_CLEAR_BOOT_INFO_OFFSET 0xC
+#define CLEAR_BOOT_FLAG_BIT BIT(0)
+#define SYSCRG_RESET_OFFSET 0x100
+
+/**
+ * struct eic7700_reset_data - reset controller information structure
+ * @rcdev: reset controller entity
+ * @regmap: regmap handle containing the memory-mapped reset registers
+ */
+struct eic7700_reset_data {
+ struct reset_controller_dev rcdev;
+ struct regmap *regmap;
+};
+
+static const struct regmap_config eic7700_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x1fc,
+};
+
+struct eic7700_reg {
+ u32 reg;
+ u32 bit;
+};
+
+static inline struct eic7700_reset_data *
+to_eic7700_reset_data(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct eic7700_reset_data, rcdev);
+}
+
+#define EIC7700_RESET(id, reg, bit)[id] = \
+ { SYSCRG_RESET_OFFSET + (reg) * sizeof(u32), BIT(bit) }
+
+/* mapping table for reset ID to register offset and reset bit */
+static const struct eic7700_reg eic7700_reset[] = {
+ EIC7700_RESET(EIC7700_RESET_NOC_NSP, 0, 0),
+ EIC7700_RESET(EIC7700_RESET_NOC_CFG, 0, 1),
+ EIC7700_RESET(EIC7700_RESET_RNOC_NSP, 0, 2),
+ EIC7700_RESET(EIC7700_RESET_SNOC_TCU, 0, 3),
+ EIC7700_RESET(EIC7700_RESET_SNOC_U84, 0, 4),
+ EIC7700_RESET(EIC7700_RESET_SNOC_PCIE_XSR, 0, 5),
+ EIC7700_RESET(EIC7700_RESET_SNOC_PCIE_XMR, 0, 6),
+ EIC7700_RESET(EIC7700_RESET_SNOC_PCIE_PR, 0, 7),
+ EIC7700_RESET(EIC7700_RESET_SNOC_NPU, 0, 8),
+ EIC7700_RESET(EIC7700_RESET_SNOC_JTAG, 0, 9),
+ EIC7700_RESET(EIC7700_RESET_SNOC_DSP, 0, 10),
+ EIC7700_RESET(EIC7700_RESET_SNOC_DDRC1_P2, 0, 11),
+ EIC7700_RESET(EIC7700_RESET_SNOC_DDRC1_P1, 0, 12),
+ EIC7700_RESET(EIC7700_RESET_SNOC_DDRC0_P2, 0, 13),
+ EIC7700_RESET(EIC7700_RESET_SNOC_DDRC0_P1, 0, 14),
+ EIC7700_RESET(EIC7700_RESET_SNOC_D2D, 0, 15),
+ EIC7700_RESET(EIC7700_RESET_SNOC_AON, 0, 16),
+ EIC7700_RESET(EIC7700_RESET_GPU_AXI, 1, 0),
+ EIC7700_RESET(EIC7700_RESET_GPU_CFG, 1, 1),
+ EIC7700_RESET(EIC7700_RESET_GPU_GRAY, 1, 2),
+ EIC7700_RESET(EIC7700_RESET_GPU_JONES, 1, 3),
+ EIC7700_RESET(EIC7700_RESET_GPU_SPU, 1, 4),
+ EIC7700_RESET(EIC7700_RESET_DSP_AXI, 2, 0),
+ EIC7700_RESET(EIC7700_RESET_DSP_CFG, 2, 1),
+ EIC7700_RESET(EIC7700_RESET_DSP_DIV4, 2, 2),
+ EIC7700_RESET(EIC7700_RESET_DSP_DIV0, 2, 4),
+ EIC7700_RESET(EIC7700_RESET_DSP_DIV1, 2, 5),
+ EIC7700_RESET(EIC7700_RESET_DSP_DIV2, 2, 6),
+ EIC7700_RESET(EIC7700_RESET_DSP_DIV3, 2, 7),
+ EIC7700_RESET(EIC7700_RESET_D2D_AXI, 3, 0),
+ EIC7700_RESET(EIC7700_RESET_D2D_CFG, 3, 1),
+ EIC7700_RESET(EIC7700_RESET_D2D_PRST, 3, 2),
+ EIC7700_RESET(EIC7700_RESET_D2D_RAW_PCS, 3, 4),
+ EIC7700_RESET(EIC7700_RESET_D2D_RX, 3, 5),
+ EIC7700_RESET(EIC7700_RESET_D2D_TX, 3, 6),
+ EIC7700_RESET(EIC7700_RESET_D2D_CORE, 3, 7),
+ EIC7700_RESET(EIC7700_RESET_DDR1_ARST, 4, 0),
+ EIC7700_RESET(EIC7700_RESET_DDR1_TRACE, 4, 6),
+ EIC7700_RESET(EIC7700_RESET_DDR0_ARST, 4, 16),
+ EIC7700_RESET(EIC7700_RESET_DDR_CFG, 4, 21),
+ EIC7700_RESET(EIC7700_RESET_DDR0_TRACE, 4, 22),
+ EIC7700_RESET(EIC7700_RESET_DDR_CORE, 4, 23),
+ EIC7700_RESET(EIC7700_RESET_DDR_PRST, 4, 26),
+ EIC7700_RESET(EIC7700_RESET_TCU_AXI, 5, 0),
+ EIC7700_RESET(EIC7700_RESET_TCU_CFG, 5, 1),
+ EIC7700_RESET(EIC7700_RESET_TCU_TBU0, 5, 4),
+ EIC7700_RESET(EIC7700_RESET_TCU_TBU1, 5, 5),
+ EIC7700_RESET(EIC7700_RESET_TCU_TBU2, 5, 6),
+ EIC7700_RESET(EIC7700_RESET_TCU_TBU3, 5, 7),
+ EIC7700_RESET(EIC7700_RESET_TCU_TBU4, 5, 8),
+ EIC7700_RESET(EIC7700_RESET_TCU_TBU5, 5, 9),
+ EIC7700_RESET(EIC7700_RESET_TCU_TBU6, 5, 10),
+ EIC7700_RESET(EIC7700_RESET_TCU_TBU7, 5, 11),
+ EIC7700_RESET(EIC7700_RESET_TCU_TBU8, 5, 12),
+ EIC7700_RESET(EIC7700_RESET_TCU_TBU9, 5, 13),
+ EIC7700_RESET(EIC7700_RESET_TCU_TBU10, 5, 14),
+ EIC7700_RESET(EIC7700_RESET_TCU_TBU11, 5, 15),
+ EIC7700_RESET(EIC7700_RESET_TCU_TBU12, 5, 16),
+ EIC7700_RESET(EIC7700_RESET_TCU_TBU13, 5, 17),
+ EIC7700_RESET(EIC7700_RESET_TCU_TBU14, 5, 18),
+ EIC7700_RESET(EIC7700_RESET_TCU_TBU15, 5, 19),
+ EIC7700_RESET(EIC7700_RESET_TCU_TBU16, 5, 20),
+ EIC7700_RESET(EIC7700_RESET_NPU_AXI, 6, 0),
+ EIC7700_RESET(EIC7700_RESET_NPU_CFG, 6, 1),
+ EIC7700_RESET(EIC7700_RESET_NPU_CORE, 6, 2),
+ EIC7700_RESET(EIC7700_RESET_NPU_E31CORE, 6, 3),
+ EIC7700_RESET(EIC7700_RESET_NPU_E31BUS, 6, 4),
+ EIC7700_RESET(EIC7700_RESET_NPU_E31DBG, 6, 5),
+ EIC7700_RESET(EIC7700_RESET_NPU_LLC, 6, 6),
+ EIC7700_RESET(EIC7700_RESET_HSP_AXI, 7, 0),
+ EIC7700_RESET(EIC7700_RESET_HSP_CFG, 7, 1),
+ EIC7700_RESET(EIC7700_RESET_HSP_POR, 7, 2),
+ EIC7700_RESET(EIC7700_RESET_MSHC0_PHY, 7, 3),
+ EIC7700_RESET(EIC7700_RESET_MSHC1_PHY, 7, 4),
+ EIC7700_RESET(EIC7700_RESET_MSHC2_PHY, 7, 5),
+ EIC7700_RESET(EIC7700_RESET_MSHC0_TXRX, 7, 6),
+ EIC7700_RESET(EIC7700_RESET_MSHC1_TXRX, 7, 7),
+ EIC7700_RESET(EIC7700_RESET_MSHC2_TXRX, 7, 8),
+ EIC7700_RESET(EIC7700_RESET_SATA_ASIC0, 7, 9),
+ EIC7700_RESET(EIC7700_RESET_SATA_OOB, 7, 10),
+ EIC7700_RESET(EIC7700_RESET_SATA_PMALIVE, 7, 11),
+ EIC7700_RESET(EIC7700_RESET_SATA_RBC, 7, 12),
+ EIC7700_RESET(EIC7700_RESET_DMA0, 7, 13),
+ EIC7700_RESET(EIC7700_RESET_HSP_DMA, 7, 14),
+ EIC7700_RESET(EIC7700_RESET_USB0_VAUX, 7, 15),
+ EIC7700_RESET(EIC7700_RESET_USB1_VAUX, 7, 16),
+ EIC7700_RESET(EIC7700_RESET_HSP_SD1_PRST, 7, 17),
+ EIC7700_RESET(EIC7700_RESET_HSP_SD0_PRST, 7, 18),
+ EIC7700_RESET(EIC7700_RESET_HSP_EMMC_PRST, 7, 19),
+ EIC7700_RESET(EIC7700_RESET_HSP_DMA_PRST, 7, 20),
+ EIC7700_RESET(EIC7700_RESET_HSP_SD1_ARST, 7, 21),
+ EIC7700_RESET(EIC7700_RESET_HSP_SD0_ARST, 7, 22),
+ EIC7700_RESET(EIC7700_RESET_HSP_EMMC_ARST, 7, 23),
+ EIC7700_RESET(EIC7700_RESET_HSP_DMA_ARST, 7, 24),
+ EIC7700_RESET(EIC7700_RESET_HSP_ETH1_ARST, 7, 25),
+ EIC7700_RESET(EIC7700_RESET_HSP_ETH0_ARST, 7, 26),
+ EIC7700_RESET(EIC7700_RESET_SATA_ARST, 7, 27),
+ EIC7700_RESET(EIC7700_RESET_PCIE_CFG, 8, 0),
+ EIC7700_RESET(EIC7700_RESET_PCIE_POWEUP, 8, 1),
+ EIC7700_RESET(EIC7700_RESET_PCIE_PERST, 8, 2),
+ EIC7700_RESET(EIC7700_RESET_I2C0, 9, 0),
+ EIC7700_RESET(EIC7700_RESET_I2C1, 9, 1),
+ EIC7700_RESET(EIC7700_RESET_I2C2, 9, 2),
+ EIC7700_RESET(EIC7700_RESET_I2C3, 9, 3),
+ EIC7700_RESET(EIC7700_RESET_I2C4, 9, 4),
+ EIC7700_RESET(EIC7700_RESET_I2C5, 9, 5),
+ EIC7700_RESET(EIC7700_RESET_I2C6, 9, 6),
+ EIC7700_RESET(EIC7700_RESET_I2C7, 9, 7),
+ EIC7700_RESET(EIC7700_RESET_I2C8, 9, 8),
+ EIC7700_RESET(EIC7700_RESET_I2C9, 9, 9),
+ EIC7700_RESET(EIC7700_RESET_FAN, 10, 0),
+ EIC7700_RESET(EIC7700_RESET_PVT0, 11, 0),
+ EIC7700_RESET(EIC7700_RESET_PVT1, 11, 1),
+ EIC7700_RESET(EIC7700_RESET_MBOX0, 12, 0),
+ EIC7700_RESET(EIC7700_RESET_MBOX1, 12, 1),
+ EIC7700_RESET(EIC7700_RESET_MBOX2, 12, 2),
+ EIC7700_RESET(EIC7700_RESET_MBOX3, 12, 3),
+ EIC7700_RESET(EIC7700_RESET_MBOX4, 12, 4),
+ EIC7700_RESET(EIC7700_RESET_MBOX5, 12, 5),
+ EIC7700_RESET(EIC7700_RESET_MBOX6, 12, 6),
+ EIC7700_RESET(EIC7700_RESET_MBOX7, 12, 7),
+ EIC7700_RESET(EIC7700_RESET_MBOX8, 12, 8),
+ EIC7700_RESET(EIC7700_RESET_MBOX9, 12, 9),
+ EIC7700_RESET(EIC7700_RESET_MBOX10, 12, 10),
+ EIC7700_RESET(EIC7700_RESET_MBOX11, 12, 11),
+ EIC7700_RESET(EIC7700_RESET_MBOX12, 12, 12),
+ EIC7700_RESET(EIC7700_RESET_MBOX13, 12, 13),
+ EIC7700_RESET(EIC7700_RESET_MBOX14, 12, 14),
+ EIC7700_RESET(EIC7700_RESET_MBOX15, 12, 15),
+ EIC7700_RESET(EIC7700_RESET_UART0, 13, 0),
+ EIC7700_RESET(EIC7700_RESET_UART1, 13, 1),
+ EIC7700_RESET(EIC7700_RESET_UART2, 13, 2),
+ EIC7700_RESET(EIC7700_RESET_UART3, 13, 3),
+ EIC7700_RESET(EIC7700_RESET_UART4, 13, 4),
+ EIC7700_RESET(EIC7700_RESET_GPIO0, 14, 0),
+ EIC7700_RESET(EIC7700_RESET_GPIO1, 14, 1),
+ EIC7700_RESET(EIC7700_RESET_TIMER, 15, 0),
+ EIC7700_RESET(EIC7700_RESET_SSI0, 16, 0),
+ EIC7700_RESET(EIC7700_RESET_SSI1, 16, 1),
+ EIC7700_RESET(EIC7700_RESET_WDT0, 17, 0),
+ EIC7700_RESET(EIC7700_RESET_WDT1, 17, 1),
+ EIC7700_RESET(EIC7700_RESET_WDT2, 17, 2),
+ EIC7700_RESET(EIC7700_RESET_WDT3, 17, 3),
+ EIC7700_RESET(EIC7700_RESET_LSP_CFG, 18, 0),
+ EIC7700_RESET(EIC7700_RESET_U84_CORE0, 19, 0),
+ EIC7700_RESET(EIC7700_RESET_U84_CORE1, 19, 1),
+ EIC7700_RESET(EIC7700_RESET_U84_CORE2, 19, 2),
+ EIC7700_RESET(EIC7700_RESET_U84_CORE3, 19, 3),
+ EIC7700_RESET(EIC7700_RESET_U84_BUS, 19, 4),
+ EIC7700_RESET(EIC7700_RESET_U84_DBG, 19, 5),
+ EIC7700_RESET(EIC7700_RESET_U84_TRACECOM, 19, 6),
+ EIC7700_RESET(EIC7700_RESET_U84_TRACE0, 19, 8),
+ EIC7700_RESET(EIC7700_RESET_U84_TRACE1, 19, 9),
+ EIC7700_RESET(EIC7700_RESET_U84_TRACE2, 19, 10),
+ EIC7700_RESET(EIC7700_RESET_U84_TRACE3, 19, 11),
+ EIC7700_RESET(EIC7700_RESET_SCPU_CORE, 20, 0),
+ EIC7700_RESET(EIC7700_RESET_SCPU_BUS, 20, 1),
+ EIC7700_RESET(EIC7700_RESET_SCPU_DBG, 20, 2),
+ EIC7700_RESET(EIC7700_RESET_LPCPU_CORE, 21, 0),
+ EIC7700_RESET(EIC7700_RESET_LPCPU_BUS, 21, 1),
+ EIC7700_RESET(EIC7700_RESET_LPCPU_DBG, 21, 2),
+ EIC7700_RESET(EIC7700_RESET_VC_CFG, 22, 0),
+ EIC7700_RESET(EIC7700_RESET_VC_AXI, 22, 1),
+ EIC7700_RESET(EIC7700_RESET_VC_MONCFG, 22, 2),
+ EIC7700_RESET(EIC7700_RESET_JD_CFG, 23, 0),
+ EIC7700_RESET(EIC7700_RESET_JD_AXI, 23, 1),
+ EIC7700_RESET(EIC7700_RESET_JE_CFG, 24, 0),
+ EIC7700_RESET(EIC7700_RESET_JE_AXI, 24, 1),
+ EIC7700_RESET(EIC7700_RESET_VD_CFG, 25, 0),
+ EIC7700_RESET(EIC7700_RESET_VD_AXI, 25, 1),
+ EIC7700_RESET(EIC7700_RESET_VE_AXI, 26, 0),
+ EIC7700_RESET(EIC7700_RESET_VE_CFG, 26, 1),
+ EIC7700_RESET(EIC7700_RESET_G2D_CORE, 27, 0),
+ EIC7700_RESET(EIC7700_RESET_G2D_CFG, 27, 1),
+ EIC7700_RESET(EIC7700_RESET_G2D_AXI, 27, 2),
+ EIC7700_RESET(EIC7700_RESET_VI_AXI, 28, 0),
+ EIC7700_RESET(EIC7700_RESET_VI_CFG, 28, 1),
+ EIC7700_RESET(EIC7700_RESET_VI_DWE, 28, 2),
+ EIC7700_RESET(EIC7700_RESET_DVP, 29, 0),
+ EIC7700_RESET(EIC7700_RESET_ISP0, 30, 0),
+ EIC7700_RESET(EIC7700_RESET_ISP1, 31, 0),
+ EIC7700_RESET(EIC7700_RESET_SHUTTR0, 32, 0),
+ EIC7700_RESET(EIC7700_RESET_SHUTTR1, 32, 1),
+ EIC7700_RESET(EIC7700_RESET_SHUTTR2, 32, 2),
+ EIC7700_RESET(EIC7700_RESET_SHUTTR3, 32, 3),
+ EIC7700_RESET(EIC7700_RESET_SHUTTR4, 32, 4),
+ EIC7700_RESET(EIC7700_RESET_SHUTTR5, 32, 5),
+ EIC7700_RESET(EIC7700_RESET_VO_MIPI, 33, 0),
+ EIC7700_RESET(EIC7700_RESET_VO_PRST, 33, 1),
+ EIC7700_RESET(EIC7700_RESET_VO_HDMI_PRST, 33, 3),
+ EIC7700_RESET(EIC7700_RESET_VO_HDMI_PHY, 33, 4),
+ EIC7700_RESET(EIC7700_RESET_VO_HDMI, 33, 5),
+ EIC7700_RESET(EIC7700_RESET_VO_I2S, 34, 0),
+ EIC7700_RESET(EIC7700_RESET_VO_I2S_PRST, 34, 1),
+ EIC7700_RESET(EIC7700_RESET_VO_AXI, 35, 0),
+ EIC7700_RESET(EIC7700_RESET_VO_CFG, 35, 1),
+ EIC7700_RESET(EIC7700_RESET_VO_DC, 35, 2),
+ EIC7700_RESET(EIC7700_RESET_VO_DC_PRST, 35, 3),
+ EIC7700_RESET(EIC7700_RESET_BOOTSPI_HRST, 36, 0),
+ EIC7700_RESET(EIC7700_RESET_BOOTSPI, 36, 1),
+ EIC7700_RESET(EIC7700_RESET_ANO1, 37, 0),
+ EIC7700_RESET(EIC7700_RESET_ANO0, 38, 0),
+ EIC7700_RESET(EIC7700_RESET_DMA1_ARST, 39, 0),
+ EIC7700_RESET(EIC7700_RESET_DMA1_HRST, 39, 1),
+ EIC7700_RESET(EIC7700_RESET_FPRT, 40, 0),
+ EIC7700_RESET(EIC7700_RESET_HBLOCK, 41, 0),
+ EIC7700_RESET(EIC7700_RESET_SECSR, 42, 0),
+ EIC7700_RESET(EIC7700_RESET_OTP, 43, 0),
+ EIC7700_RESET(EIC7700_RESET_PKA, 44, 0),
+ EIC7700_RESET(EIC7700_RESET_SPACC, 45, 0),
+ EIC7700_RESET(EIC7700_RESET_TRNG, 46, 0),
+ EIC7700_RESET(EIC7700_RESET_TIMER0_0, 48, 0),
+ EIC7700_RESET(EIC7700_RESET_TIMER0_1, 48, 1),
+ EIC7700_RESET(EIC7700_RESET_TIMER0_2, 48, 2),
+ EIC7700_RESET(EIC7700_RESET_TIMER0_3, 48, 3),
+ EIC7700_RESET(EIC7700_RESET_TIMER0_4, 48, 4),
+ EIC7700_RESET(EIC7700_RESET_TIMER0_5, 48, 5),
+ EIC7700_RESET(EIC7700_RESET_TIMER0_6, 48, 6),
+ EIC7700_RESET(EIC7700_RESET_TIMER0_7, 48, 7),
+ EIC7700_RESET(EIC7700_RESET_TIMER0_N, 48, 8),
+ EIC7700_RESET(EIC7700_RESET_TIMER1_0, 49, 0),
+ EIC7700_RESET(EIC7700_RESET_TIMER1_1, 49, 1),
+ EIC7700_RESET(EIC7700_RESET_TIMER1_2, 49, 2),
+ EIC7700_RESET(EIC7700_RESET_TIMER1_3, 49, 3),
+ EIC7700_RESET(EIC7700_RESET_TIMER1_4, 49, 4),
+ EIC7700_RESET(EIC7700_RESET_TIMER1_5, 49, 5),
+ EIC7700_RESET(EIC7700_RESET_TIMER1_6, 49, 6),
+ EIC7700_RESET(EIC7700_RESET_TIMER1_7, 49, 7),
+ EIC7700_RESET(EIC7700_RESET_TIMER1_N, 49, 8),
+ EIC7700_RESET(EIC7700_RESET_TIMER2_0, 50, 0),
+ EIC7700_RESET(EIC7700_RESET_TIMER2_1, 50, 1),
+ EIC7700_RESET(EIC7700_RESET_TIMER2_2, 50, 2),
+ EIC7700_RESET(EIC7700_RESET_TIMER2_3, 50, 3),
+ EIC7700_RESET(EIC7700_RESET_TIMER2_4, 50, 4),
+ EIC7700_RESET(EIC7700_RESET_TIMER2_5, 50, 5),
+ EIC7700_RESET(EIC7700_RESET_TIMER2_6, 50, 6),
+ EIC7700_RESET(EIC7700_RESET_TIMER2_7, 50, 7),
+ EIC7700_RESET(EIC7700_RESET_TIMER2_N, 50, 8),
+ EIC7700_RESET(EIC7700_RESET_TIMER3_0, 51, 0),
+ EIC7700_RESET(EIC7700_RESET_TIMER3_1, 51, 1),
+ EIC7700_RESET(EIC7700_RESET_TIMER3_2, 51, 2),
+ EIC7700_RESET(EIC7700_RESET_TIMER3_3, 51, 3),
+ EIC7700_RESET(EIC7700_RESET_TIMER3_4, 51, 4),
+ EIC7700_RESET(EIC7700_RESET_TIMER3_5, 51, 5),
+ EIC7700_RESET(EIC7700_RESET_TIMER3_6, 51, 6),
+ EIC7700_RESET(EIC7700_RESET_TIMER3_7, 51, 7),
+ EIC7700_RESET(EIC7700_RESET_TIMER3_N, 51, 8),
+ EIC7700_RESET(EIC7700_RESET_RTC, 52, 0),
+ EIC7700_RESET(EIC7700_RESET_MNOC_SNOC_NSP, 53, 0),
+ EIC7700_RESET(EIC7700_RESET_MNOC_VC, 53, 1),
+ EIC7700_RESET(EIC7700_RESET_MNOC_CFG, 53, 2),
+ EIC7700_RESET(EIC7700_RESET_MNOC_HSP, 53, 3),
+ EIC7700_RESET(EIC7700_RESET_MNOC_GPU, 53, 4),
+ EIC7700_RESET(EIC7700_RESET_MNOC_DDRC1_P3, 53, 5),
+ EIC7700_RESET(EIC7700_RESET_MNOC_DDRC0_P3, 53, 6),
+ EIC7700_RESET(EIC7700_RESET_RNOC_VO, 54, 0),
+ EIC7700_RESET(EIC7700_RESET_RNOC_VI, 54, 1),
+ EIC7700_RESET(EIC7700_RESET_RNOC_SNOC_NSP, 54, 2),
+ EIC7700_RESET(EIC7700_RESET_RNOC_CFG, 54, 3),
+ EIC7700_RESET(EIC7700_RESET_MNOC_DDRC1_P4, 54, 4),
+ EIC7700_RESET(EIC7700_RESET_MNOC_DDRC0_P4, 54, 5),
+ EIC7700_RESET(EIC7700_RESET_CNOC_VO_CFG, 55, 0),
+ EIC7700_RESET(EIC7700_RESET_CNOC_VI_CFG, 55, 1),
+ EIC7700_RESET(EIC7700_RESET_CNOC_VC_CFG, 55, 2),
+ EIC7700_RESET(EIC7700_RESET_CNOC_TCU_CFG, 55, 3),
+ EIC7700_RESET(EIC7700_RESET_CNOC_PCIE_CFG, 55, 4),
+ EIC7700_RESET(EIC7700_RESET_CNOC_NPU_CFG, 55, 5),
+ EIC7700_RESET(EIC7700_RESET_CNOC_LSP_CFG, 55, 6),
+ EIC7700_RESET(EIC7700_RESET_CNOC_HSP_CFG, 55, 7),
+ EIC7700_RESET(EIC7700_RESET_CNOC_GPU_CFG, 55, 8),
+ EIC7700_RESET(EIC7700_RESET_CNOC_DSPT_CFG, 55, 9),
+ EIC7700_RESET(EIC7700_RESET_CNOC_DDRT1_CFG, 55, 10),
+ EIC7700_RESET(EIC7700_RESET_CNOC_DDRT0_CFG, 55, 11),
+ EIC7700_RESET(EIC7700_RESET_CNOC_D2D_CFG, 55, 12),
+ EIC7700_RESET(EIC7700_RESET_CNOC_CFG, 55, 13),
+ EIC7700_RESET(EIC7700_RESET_CNOC_CLMM_CFG, 55, 14),
+ EIC7700_RESET(EIC7700_RESET_CNOC_AON_CFG, 55, 15),
+ EIC7700_RESET(EIC7700_RESET_LNOC_CFG, 56, 0),
+ EIC7700_RESET(EIC7700_RESET_LNOC_NPU_LLC, 56, 1),
+ EIC7700_RESET(EIC7700_RESET_LNOC_DDRC1_P0, 56, 2),
+ EIC7700_RESET(EIC7700_RESET_LNOC_DDRC0_P0, 56, 3),
+};
+
+static int eic7700_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct eic7700_reset_data *data = to_eic7700_reset_data(rcdev);
+
+ return regmap_clear_bits(data->regmap, eic7700_reset[id].reg,
+ eic7700_reset[id].bit);
+}
+
+static int eic7700_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct eic7700_reset_data *data = to_eic7700_reset_data(rcdev);
+
+ return regmap_set_bits(data->regmap, eic7700_reset[id].reg,
+ eic7700_reset[id].bit);
+}
+
+static int eic7700_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ int ret;
+
+ ret = eic7700_reset_assert(rcdev, id);
+ if (ret)
+ return ret;
+
+ usleep_range(10, 15);
+
+ return eic7700_reset_deassert(rcdev, id);
+}
+
+static const struct reset_control_ops eic7700_reset_ops = {
+ .reset = eic7700_reset_reset,
+ .assert = eic7700_reset_assert,
+ .deassert = eic7700_reset_deassert,
+};
+
+static const struct of_device_id eic7700_reset_dt_ids[] = {
+ { .compatible = "eswin,eic7700-reset", },
+ { /* sentinel */ }
+};
+
+static int eic7700_reset_probe(struct platform_device *pdev)
+{
+ struct eic7700_reset_data *data;
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ data->regmap = devm_regmap_init_mmio(dev, base, &eic7700_regmap_config);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(dev, PTR_ERR(data->regmap),
+ "failed to get regmap!\n");
+
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.ops = &eic7700_reset_ops;
+ data->rcdev.of_node = dev->of_node;
+ data->rcdev.of_reset_n_cells = 1;
+ data->rcdev.dev = dev;
+ data->rcdev.nr_resets = ARRAY_SIZE(eic7700_reset);
+
+ /* clear boot flag so u84 and scpu could be reseted by software */
+ regmap_set_bits(data->regmap, SYSCRG_CLEAR_BOOT_INFO_OFFSET,
+ CLEAR_BOOT_FLAG_BIT);
+ msleep(50);
+
+ return devm_reset_controller_register(dev, &data->rcdev);
+}
+
+static struct platform_driver eic7700_reset_driver = {
+ .probe = eic7700_reset_probe,
+ .driver = {
+ .name = "eic7700-reset",
+ .of_match_table = eic7700_reset_dt_ids,
+ },
+};
+
+builtin_platform_driver(eic7700_reset_driver);
diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
index 8a7f167e405e..4ecb9acb2641 100644
--- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c
+++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
@@ -13,6 +13,7 @@
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/reset-controller.h>
+#include <linux/mfd/syscon.h>
#define RESET 0x000
#define VBENCTL 0x03c
@@ -91,8 +92,14 @@ static int rzg2l_usbphy_ctrl_status(struct reset_controller_dev *rcdev,
return !!(readl(priv->base + RESET) & port_mask);
}
+#define RZG2L_USBPHY_CTRL_PWRRDY 1
+
static const struct of_device_id rzg2l_usbphy_ctrl_match_table[] = {
{ .compatible = "renesas,rzg2l-usbphy-ctrl" },
+ {
+ .compatible = "renesas,r9a08g045-usbphy-ctrl",
+ .data = (void *)RZG2L_USBPHY_CTRL_PWRRDY
+ },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, rzg2l_usbphy_ctrl_match_table);
@@ -110,6 +117,55 @@ static const struct regmap_config rzg2l_usb_regconf = {
.max_register = 1,
};
+static void rzg2l_usbphy_ctrl_set_pwrrdy(struct regmap_field *pwrrdy,
+ bool power_on)
+{
+ u32 val = power_on ? 0 : 1;
+
+ /* The initialization path guarantees that the mask is 1 bit long. */
+ regmap_field_update_bits(pwrrdy, 1, val);
+}
+
+static void rzg2l_usbphy_ctrl_pwrrdy_off(void *data)
+{
+ rzg2l_usbphy_ctrl_set_pwrrdy(data, false);
+}
+
+static int rzg2l_usbphy_ctrl_pwrrdy_init(struct device *dev)
+{
+ struct regmap_field *pwrrdy;
+ struct reg_field field;
+ struct regmap *regmap;
+ const int *data;
+ u32 args[2];
+
+ data = device_get_match_data(dev);
+ if ((uintptr_t)data != RZG2L_USBPHY_CTRL_PWRRDY)
+ return 0;
+
+ regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "renesas,sysc-pwrrdy",
+ ARRAY_SIZE(args), args);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* Don't allow more than one bit in mask. */
+ if (hweight32(args[1]) != 1)
+ return -EINVAL;
+
+ field.reg = args[0];
+ field.lsb = __ffs(args[1]);
+ field.msb = __fls(args[1]);
+
+ pwrrdy = devm_regmap_field_alloc(dev, regmap, field);
+ if (IS_ERR(pwrrdy))
+ return PTR_ERR(pwrrdy);
+
+ rzg2l_usbphy_ctrl_set_pwrrdy(pwrrdy, true);
+
+ return devm_add_action_or_reset(dev, rzg2l_usbphy_ctrl_pwrrdy_off, pwrrdy);
+}
+
static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -132,6 +188,10 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
+ error = rzg2l_usbphy_ctrl_pwrrdy_init(dev);
+ if (error)
+ return error;
+
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(priv->rstc))
return dev_err_probe(dev, PTR_ERR(priv->rstc),
diff --git a/drivers/reset/reset-th1520.c b/drivers/reset/reset-th1520.c
index 14d964a9c6b6..fd32e991c4cb 100644
--- a/drivers/reset/reset-th1520.c
+++ b/drivers/reset/reset-th1520.c
@@ -11,6 +11,85 @@
#include <dt-bindings/reset/thead,th1520-reset.h>
+ /* register offset in RSTGEN_R */
+#define TH1520_BROM_RST_CFG 0x0
+#define TH1520_C910_RST_CFG 0x4
+#define TH1520_CHIP_DBG_RST_CFG 0xc
+#define TH1520_AXI4_CPUSYS2_RST_CFG 0x10
+#define TH1520_X2H_CPUSYS_RST_CFG 0x18
+#define TH1520_AHB2_CPUSYS_RST_CFG 0x1c
+#define TH1520_APB3_CPUSYS_RST_CFG 0x20
+#define TH1520_MBOX0_RST_CFG 0x24
+#define TH1520_MBOX1_RST_CFG 0x28
+#define TH1520_MBOX2_RST_CFG 0x2c
+#define TH1520_MBOX3_RST_CFG 0x30
+#define TH1520_WDT0_RST_CFG 0x34
+#define TH1520_WDT1_RST_CFG 0x38
+#define TH1520_TIMER0_RST_CFG 0x3c
+#define TH1520_TIMER1_RST_CFG 0x40
+#define TH1520_PERISYS_AHB_RST_CFG 0x44
+#define TH1520_PERISYS_APB1_RST_CFG 0x48
+#define TH1520_PERISYS_APB2_RST_CFG 0x4c
+#define TH1520_GMAC0_RST_CFG 0x68
+#define TH1520_UART0_RST_CFG 0x70
+#define TH1520_UART1_RST_CFG 0x74
+#define TH1520_UART2_RST_CFG 0x78
+#define TH1520_UART3_RST_CFG 0x7c
+#define TH1520_UART4_RST_CFG 0x80
+#define TH1520_UART5_RST_CFG 0x84
+#define TH1520_QSPI0_RST_CFG 0x8c
+#define TH1520_QSPI1_RST_CFG 0x90
+#define TH1520_SPI_RST_CFG 0x94
+#define TH1520_I2C0_RST_CFG 0x98
+#define TH1520_I2C1_RST_CFG 0x9c
+#define TH1520_I2C2_RST_CFG 0xa0
+#define TH1520_I2C3_RST_CFG 0xa4
+#define TH1520_I2C4_RST_CFG 0xa8
+#define TH1520_I2C5_RST_CFG 0xac
+#define TH1520_GPIO0_RST_CFG 0xb0
+#define TH1520_GPIO1_RST_CFG 0xb4
+#define TH1520_GPIO2_RST_CFG 0xb8
+#define TH1520_PWM_RST_CFG 0xc0
+#define TH1520_PADCTRL0_APSYS_RST_CFG 0xc4
+#define TH1520_CPU2PERI_X2H_RST_CFG 0xcc
+#define TH1520_CPU2AON_X2H_RST_CFG 0xe4
+#define TH1520_AON2CPU_A2X_RST_CFG 0xfc
+#define TH1520_NPUSYS_AXI_RST_CFG 0x128
+#define TH1520_CPU2VP_X2P_RST_CFG 0x12c
+#define TH1520_CPU2VI_X2H_RST_CFG 0x138
+#define TH1520_BMU_C910_RST_CFG 0x148
+#define TH1520_DMAC_CPUSYS_RST_CFG 0x14c
+#define TH1520_SPINLOCK_RST_CFG 0x178
+#define TH1520_CFG2TEE_X2H_RST_CFG 0x188
+#define TH1520_DSMART_RST_CFG 0x18c
+#define TH1520_GPIO3_RST_CFG 0x1a8
+#define TH1520_I2S_RST_CFG 0x1ac
+#define TH1520_IMG_NNA_RST_CFG 0x1b0
+#define TH1520_PERI_APB3_RST_CFG 0x1dc
+#define TH1520_VP_SUBSYS_RST_CFG 0x1ec
+#define TH1520_PERISYS_APB4_RST_CFG 0x1f8
+#define TH1520_GMAC1_RST_CFG 0x204
+#define TH1520_GMAC_AXI_RST_CFG 0x208
+#define TH1520_PADCTRL1_APSYS_RST_CFG 0x20c
+#define TH1520_VOSYS_AXI_RST_CFG 0x210
+#define TH1520_VOSYS_X2X_RST_CFG 0x214
+#define TH1520_MISC2VP_X2X_RST_CFG 0x218
+#define TH1520_SUBSYS_RST_CFG 0x220
+
+ /* register offset in DSP_REGMAP */
+#define TH1520_DSPSYS_RST_CFG 0x0
+
+ /* register offset in MISCSYS_REGMAP */
+#define TH1520_EMMC_RST_CFG 0x0
+#define TH1520_MISCSYS_AXI_RST_CFG 0x8
+#define TH1520_SDIO0_RST_CFG 0xc
+#define TH1520_SDIO1_RST_CFG 0x10
+#define TH1520_USB3_DRD_RST_CFG 0x14
+
+ /* register offset in VISYS_REGMAP */
+#define TH1520_VISYS_RST_CFG 0x0
+#define TH1520_VISYS_2_RST_CFG 0x4
+
/* register offset in VOSYS_REGMAP */
#define TH1520_GPU_RST_CFG 0x0
#define TH1520_GPU_RST_CFG_MASK GENMASK(1, 0)
@@ -18,6 +97,8 @@
#define TH1520_DSI0_RST_CFG 0x8
#define TH1520_DSI1_RST_CFG 0xc
#define TH1520_HDMI_RST_CFG 0x14
+#define TH1520_AXI4_VO_DW_AXI_RST_CFG 0x18
+#define TH1520_X2H_X4_VOSYS_DW_RST_CFG 0x20
/* register values */
#define TH1520_GPU_SW_GPU_RST BIT(0)
@@ -29,14 +110,27 @@
#define TH1520_HDMI_SW_MAIN_RST BIT(0)
#define TH1520_HDMI_SW_PRST BIT(1)
+ /* register offset in VPSYS_REGMAP */
+#define TH1520_AXIBUS_RST_CFG 0x0
+#define TH1520_FCE_RST_CFG 0x4
+#define TH1520_G2D_RST_CFG 0x8
+#define TH1520_VDEC_RST_CFG 0xc
+#define TH1520_VENC_RST_CFG 0x10
+
+struct th1520_reset_map {
+ u32 bit;
+ u32 reg;
+};
+
struct th1520_reset_priv {
struct reset_controller_dev rcdev;
struct regmap *map;
+ const struct th1520_reset_map *resets;
};
-struct th1520_reset_map {
- u32 bit;
- u32 reg;
+struct th1520_reset_data {
+ const struct th1520_reset_map *resets;
+ size_t num;
};
static const struct th1520_reset_map th1520_resets[] = {
@@ -76,6 +170,681 @@ static const struct th1520_reset_map th1520_resets[] = {
.bit = TH1520_HDMI_SW_PRST,
.reg = TH1520_HDMI_RST_CFG,
},
+ [TH1520_RESET_ID_VOAXI] = {
+ .bit = BIT(0),
+ .reg = TH1520_AXI4_VO_DW_AXI_RST_CFG,
+ },
+ [TH1520_RESET_ID_VOAXI_APB] = {
+ .bit = BIT(1),
+ .reg = TH1520_AXI4_VO_DW_AXI_RST_CFG,
+ },
+ [TH1520_RESET_ID_X2H_DPU_AXI] = {
+ .bit = BIT(0),
+ .reg = TH1520_X2H_X4_VOSYS_DW_RST_CFG,
+ },
+ [TH1520_RESET_ID_X2H_DPU_AHB] = {
+ .bit = BIT(1),
+ .reg = TH1520_X2H_X4_VOSYS_DW_RST_CFG,
+ },
+ [TH1520_RESET_ID_X2H_DPU1_AXI] = {
+ .bit = BIT(2),
+ .reg = TH1520_X2H_X4_VOSYS_DW_RST_CFG,
+ },
+ [TH1520_RESET_ID_X2H_DPU1_AHB] = {
+ .bit = BIT(3),
+ .reg = TH1520_X2H_X4_VOSYS_DW_RST_CFG,
+ },
+};
+
+static const struct th1520_reset_map th1520_ap_resets[] = {
+ [TH1520_RESET_ID_BROM] = {
+ .bit = BIT(0),
+ .reg = TH1520_BROM_RST_CFG,
+ },
+ [TH1520_RESET_ID_C910_TOP] = {
+ .bit = BIT(0),
+ .reg = TH1520_C910_RST_CFG,
+ },
+ [TH1520_RESET_ID_NPU] = {
+ .bit = BIT(0),
+ .reg = TH1520_IMG_NNA_RST_CFG,
+ },
+ [TH1520_RESET_ID_WDT0] = {
+ .bit = BIT(0),
+ .reg = TH1520_WDT0_RST_CFG,
+ },
+ [TH1520_RESET_ID_WDT1] = {
+ .bit = BIT(0),
+ .reg = TH1520_WDT1_RST_CFG,
+ },
+ [TH1520_RESET_ID_C910_C0] = {
+ .bit = BIT(1),
+ .reg = TH1520_C910_RST_CFG,
+ },
+ [TH1520_RESET_ID_C910_C1] = {
+ .bit = BIT(2),
+ .reg = TH1520_C910_RST_CFG,
+ },
+ [TH1520_RESET_ID_C910_C2] = {
+ .bit = BIT(3),
+ .reg = TH1520_C910_RST_CFG,
+ },
+ [TH1520_RESET_ID_C910_C3] = {
+ .bit = BIT(4),
+ .reg = TH1520_C910_RST_CFG,
+ },
+ [TH1520_RESET_ID_CHIP_DBG_CORE] = {
+ .bit = BIT(0),
+ .reg = TH1520_CHIP_DBG_RST_CFG,
+ },
+ [TH1520_RESET_ID_CHIP_DBG_AXI] = {
+ .bit = BIT(1),
+ .reg = TH1520_CHIP_DBG_RST_CFG,
+ },
+ [TH1520_RESET_ID_AXI4_CPUSYS2_AXI] = {
+ .bit = BIT(0),
+ .reg = TH1520_AXI4_CPUSYS2_RST_CFG,
+ },
+ [TH1520_RESET_ID_AXI4_CPUSYS2_APB] = {
+ .bit = BIT(1),
+ .reg = TH1520_AXI4_CPUSYS2_RST_CFG,
+ },
+ [TH1520_RESET_ID_X2H_CPUSYS] = {
+ .bit = BIT(0),
+ .reg = TH1520_X2H_CPUSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_AHB2_CPUSYS] = {
+ .bit = BIT(0),
+ .reg = TH1520_AHB2_CPUSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_APB3_CPUSYS] = {
+ .bit = BIT(0),
+ .reg = TH1520_APB3_CPUSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_MBOX0_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_MBOX0_RST_CFG,
+ },
+ [TH1520_RESET_ID_MBOX1_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_MBOX1_RST_CFG,
+ },
+ [TH1520_RESET_ID_MBOX2_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_MBOX2_RST_CFG,
+ },
+ [TH1520_RESET_ID_MBOX3_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_MBOX3_RST_CFG,
+ },
+ [TH1520_RESET_ID_TIMER0_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_TIMER0_RST_CFG,
+ },
+ [TH1520_RESET_ID_TIMER0_CORE] = {
+ .bit = BIT(1),
+ .reg = TH1520_TIMER0_RST_CFG,
+ },
+ [TH1520_RESET_ID_TIMER1_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_TIMER1_RST_CFG,
+ },
+ [TH1520_RESET_ID_TIMER1_CORE] = {
+ .bit = BIT(1),
+ .reg = TH1520_TIMER1_RST_CFG,
+ },
+ [TH1520_RESET_ID_PERISYS_AHB] = {
+ .bit = BIT(0),
+ .reg = TH1520_PERISYS_AHB_RST_CFG,
+ },
+ [TH1520_RESET_ID_PERISYS_APB1] = {
+ .bit = BIT(0),
+ .reg = TH1520_PERISYS_APB1_RST_CFG,
+ },
+ [TH1520_RESET_ID_PERISYS_APB2] = {
+ .bit = BIT(0),
+ .reg = TH1520_PERISYS_APB2_RST_CFG,
+ },
+ [TH1520_RESET_ID_GMAC0_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_GMAC0_RST_CFG,
+ },
+ [TH1520_RESET_ID_GMAC0_AHB] = {
+ .bit = BIT(1),
+ .reg = TH1520_GMAC0_RST_CFG,
+ },
+ [TH1520_RESET_ID_GMAC0_CLKGEN] = {
+ .bit = BIT(2),
+ .reg = TH1520_GMAC0_RST_CFG,
+ },
+ [TH1520_RESET_ID_GMAC0_AXI] = {
+ .bit = BIT(3),
+ .reg = TH1520_GMAC0_RST_CFG,
+ },
+ [TH1520_RESET_ID_UART0_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_UART0_RST_CFG,
+ },
+ [TH1520_RESET_ID_UART0_IF] = {
+ .bit = BIT(1),
+ .reg = TH1520_UART0_RST_CFG,
+ },
+ [TH1520_RESET_ID_UART1_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_UART1_RST_CFG,
+ },
+ [TH1520_RESET_ID_UART1_IF] = {
+ .bit = BIT(1),
+ .reg = TH1520_UART1_RST_CFG,
+ },
+ [TH1520_RESET_ID_UART2_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_UART2_RST_CFG,
+ },
+ [TH1520_RESET_ID_UART2_IF] = {
+ .bit = BIT(1),
+ .reg = TH1520_UART2_RST_CFG,
+ },
+ [TH1520_RESET_ID_UART3_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_UART3_RST_CFG,
+ },
+ [TH1520_RESET_ID_UART3_IF] = {
+ .bit = BIT(1),
+ .reg = TH1520_UART3_RST_CFG,
+ },
+ [TH1520_RESET_ID_UART4_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_UART4_RST_CFG,
+ },
+ [TH1520_RESET_ID_UART4_IF] = {
+ .bit = BIT(1),
+ .reg = TH1520_UART4_RST_CFG,
+ },
+ [TH1520_RESET_ID_UART5_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_UART5_RST_CFG,
+ },
+ [TH1520_RESET_ID_UART5_IF] = {
+ .bit = BIT(1),
+ .reg = TH1520_UART5_RST_CFG,
+ },
+ [TH1520_RESET_ID_QSPI0_IF] = {
+ .bit = BIT(0),
+ .reg = TH1520_QSPI0_RST_CFG,
+ },
+ [TH1520_RESET_ID_QSPI0_APB] = {
+ .bit = BIT(1),
+ .reg = TH1520_QSPI0_RST_CFG,
+ },
+ [TH1520_RESET_ID_QSPI1_IF] = {
+ .bit = BIT(0),
+ .reg = TH1520_QSPI1_RST_CFG,
+ },
+ [TH1520_RESET_ID_QSPI1_APB] = {
+ .bit = BIT(1),
+ .reg = TH1520_QSPI1_RST_CFG,
+ },
+ [TH1520_RESET_ID_SPI_IF] = {
+ .bit = BIT(0),
+ .reg = TH1520_SPI_RST_CFG,
+ },
+ [TH1520_RESET_ID_SPI_APB] = {
+ .bit = BIT(1),
+ .reg = TH1520_SPI_RST_CFG,
+ },
+ [TH1520_RESET_ID_I2C0_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_I2C0_RST_CFG,
+ },
+ [TH1520_RESET_ID_I2C0_CORE] = {
+ .bit = BIT(1),
+ .reg = TH1520_I2C0_RST_CFG,
+ },
+ [TH1520_RESET_ID_I2C1_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_I2C1_RST_CFG,
+ },
+ [TH1520_RESET_ID_I2C1_CORE] = {
+ .bit = BIT(1),
+ .reg = TH1520_I2C1_RST_CFG,
+ },
+ [TH1520_RESET_ID_I2C2_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_I2C2_RST_CFG,
+ },
+ [TH1520_RESET_ID_I2C2_CORE] = {
+ .bit = BIT(1),
+ .reg = TH1520_I2C2_RST_CFG,
+ },
+ [TH1520_RESET_ID_I2C3_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_I2C3_RST_CFG,
+ },
+ [TH1520_RESET_ID_I2C3_CORE] = {
+ .bit = BIT(1),
+ .reg = TH1520_I2C3_RST_CFG,
+ },
+ [TH1520_RESET_ID_I2C4_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_I2C4_RST_CFG,
+ },
+ [TH1520_RESET_ID_I2C4_CORE] = {
+ .bit = BIT(1),
+ .reg = TH1520_I2C4_RST_CFG,
+ },
+ [TH1520_RESET_ID_I2C5_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_I2C5_RST_CFG,
+ },
+ [TH1520_RESET_ID_I2C5_CORE] = {
+ .bit = BIT(1),
+ .reg = TH1520_I2C5_RST_CFG,
+ },
+ [TH1520_RESET_ID_GPIO0_DB] = {
+ .bit = BIT(0),
+ .reg = TH1520_GPIO0_RST_CFG,
+ },
+ [TH1520_RESET_ID_GPIO0_APB] = {
+ .bit = BIT(1),
+ .reg = TH1520_GPIO0_RST_CFG,
+ },
+ [TH1520_RESET_ID_GPIO1_DB] = {
+ .bit = BIT(0),
+ .reg = TH1520_GPIO1_RST_CFG,
+ },
+ [TH1520_RESET_ID_GPIO1_APB] = {
+ .bit = BIT(1),
+ .reg = TH1520_GPIO1_RST_CFG,
+ },
+ [TH1520_RESET_ID_GPIO2_DB] = {
+ .bit = BIT(0),
+ .reg = TH1520_GPIO2_RST_CFG,
+ },
+ [TH1520_RESET_ID_GPIO2_APB] = {
+ .bit = BIT(1),
+ .reg = TH1520_GPIO2_RST_CFG,
+ },
+ [TH1520_RESET_ID_PWM_COUNTER] = {
+ .bit = BIT(0),
+ .reg = TH1520_PWM_RST_CFG,
+ },
+ [TH1520_RESET_ID_PWM_APB] = {
+ .bit = BIT(1),
+ .reg = TH1520_PWM_RST_CFG,
+ },
+ [TH1520_RESET_ID_PADCTRL0_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_PADCTRL0_APSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_CPU2PERI_X2H] = {
+ .bit = BIT(1),
+ .reg = TH1520_CPU2PERI_X2H_RST_CFG,
+ },
+ [TH1520_RESET_ID_CPU2AON_X2H] = {
+ .bit = BIT(0),
+ .reg = TH1520_CPU2AON_X2H_RST_CFG,
+ },
+ [TH1520_RESET_ID_AON2CPU_A2X] = {
+ .bit = BIT(0),
+ .reg = TH1520_AON2CPU_A2X_RST_CFG,
+ },
+ [TH1520_RESET_ID_NPUSYS_AXI] = {
+ .bit = BIT(0),
+ .reg = TH1520_NPUSYS_AXI_RST_CFG,
+ },
+ [TH1520_RESET_ID_NPUSYS_AXI_APB] = {
+ .bit = BIT(1),
+ .reg = TH1520_NPUSYS_AXI_RST_CFG,
+ },
+ [TH1520_RESET_ID_CPU2VP_X2P] = {
+ .bit = BIT(0),
+ .reg = TH1520_CPU2VP_X2P_RST_CFG,
+ },
+ [TH1520_RESET_ID_CPU2VI_X2H] = {
+ .bit = BIT(0),
+ .reg = TH1520_CPU2VI_X2H_RST_CFG,
+ },
+ [TH1520_RESET_ID_BMU_AXI] = {
+ .bit = BIT(0),
+ .reg = TH1520_BMU_C910_RST_CFG,
+ },
+ [TH1520_RESET_ID_BMU_APB] = {
+ .bit = BIT(1),
+ .reg = TH1520_BMU_C910_RST_CFG,
+ },
+ [TH1520_RESET_ID_DMAC_CPUSYS_AXI] = {
+ .bit = BIT(0),
+ .reg = TH1520_DMAC_CPUSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_DMAC_CPUSYS_AHB] = {
+ .bit = BIT(1),
+ .reg = TH1520_DMAC_CPUSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_SPINLOCK] = {
+ .bit = BIT(0),
+ .reg = TH1520_SPINLOCK_RST_CFG,
+ },
+ [TH1520_RESET_ID_CFG2TEE] = {
+ .bit = BIT(0),
+ .reg = TH1520_CFG2TEE_X2H_RST_CFG,
+ },
+ [TH1520_RESET_ID_DSMART] = {
+ .bit = BIT(0),
+ .reg = TH1520_DSMART_RST_CFG,
+ },
+ [TH1520_RESET_ID_GPIO3_DB] = {
+ .bit = BIT(0),
+ .reg = TH1520_GPIO3_RST_CFG,
+ },
+ [TH1520_RESET_ID_GPIO3_APB] = {
+ .bit = BIT(1),
+ .reg = TH1520_GPIO3_RST_CFG,
+ },
+ [TH1520_RESET_ID_PERI_I2S] = {
+ .bit = BIT(0),
+ .reg = TH1520_I2S_RST_CFG,
+ },
+ [TH1520_RESET_ID_PERI_APB3] = {
+ .bit = BIT(0),
+ .reg = TH1520_PERI_APB3_RST_CFG,
+ },
+ [TH1520_RESET_ID_PERI2PERI1_APB] = {
+ .bit = BIT(1),
+ .reg = TH1520_PERI_APB3_RST_CFG,
+ },
+ [TH1520_RESET_ID_VPSYS_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_VP_SUBSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_PERISYS_APB4] = {
+ .bit = BIT(0),
+ .reg = TH1520_PERISYS_APB4_RST_CFG,
+ },
+ [TH1520_RESET_ID_GMAC1_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_GMAC1_RST_CFG,
+ },
+ [TH1520_RESET_ID_GMAC1_AHB] = {
+ .bit = BIT(1),
+ .reg = TH1520_GMAC1_RST_CFG,
+ },
+ [TH1520_RESET_ID_GMAC1_CLKGEN] = {
+ .bit = BIT(2),
+ .reg = TH1520_GMAC1_RST_CFG,
+ },
+ [TH1520_RESET_ID_GMAC1_AXI] = {
+ .bit = BIT(3),
+ .reg = TH1520_GMAC1_RST_CFG,
+ },
+ [TH1520_RESET_ID_GMAC_AXI] = {
+ .bit = BIT(0),
+ .reg = TH1520_GMAC_AXI_RST_CFG,
+ },
+ [TH1520_RESET_ID_GMAC_AXI_APB] = {
+ .bit = BIT(1),
+ .reg = TH1520_GMAC_AXI_RST_CFG,
+ },
+ [TH1520_RESET_ID_PADCTRL1_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_PADCTRL1_APSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_VOSYS_AXI] = {
+ .bit = BIT(0),
+ .reg = TH1520_VOSYS_AXI_RST_CFG,
+ },
+ [TH1520_RESET_ID_VOSYS_AXI_APB] = {
+ .bit = BIT(1),
+ .reg = TH1520_VOSYS_AXI_RST_CFG,
+ },
+ [TH1520_RESET_ID_VOSYS_AXI_X2X] = {
+ .bit = BIT(0),
+ .reg = TH1520_VOSYS_X2X_RST_CFG,
+ },
+ [TH1520_RESET_ID_MISC2VP_X2X] = {
+ .bit = BIT(0),
+ .reg = TH1520_MISC2VP_X2X_RST_CFG,
+ },
+ [TH1520_RESET_ID_DSPSYS] = {
+ .bit = BIT(0),
+ .reg = TH1520_SUBSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_VISYS] = {
+ .bit = BIT(1),
+ .reg = TH1520_SUBSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_VOSYS] = {
+ .bit = BIT(2),
+ .reg = TH1520_SUBSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_VPSYS] = {
+ .bit = BIT(3),
+ .reg = TH1520_SUBSYS_RST_CFG,
+ },
+};
+
+static const struct th1520_reset_map th1520_dsp_resets[] = {
+ [TH1520_RESET_ID_X2X_DSP1] = {
+ .bit = BIT(0),
+ .reg = TH1520_DSPSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_X2X_DSP0] = {
+ .bit = BIT(1),
+ .reg = TH1520_DSPSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_X2X_SLAVE_DSP1] = {
+ .bit = BIT(2),
+ .reg = TH1520_DSPSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_X2X_SLAVE_DSP0] = {
+ .bit = BIT(3),
+ .reg = TH1520_DSPSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_DSP0_CORE] = {
+ .bit = BIT(8),
+ .reg = TH1520_DSPSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_DSP0_DEBUG] = {
+ .bit = BIT(9),
+ .reg = TH1520_DSPSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_DSP0_APB] = {
+ .bit = BIT(10),
+ .reg = TH1520_DSPSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_DSP1_CORE] = {
+ .bit = BIT(12),
+ .reg = TH1520_DSPSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_DSP1_DEBUG] = {
+ .bit = BIT(13),
+ .reg = TH1520_DSPSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_DSP1_APB] = {
+ .bit = BIT(14),
+ .reg = TH1520_DSPSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_DSPSYS_APB] = {
+ .bit = BIT(16),
+ .reg = TH1520_DSPSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_AXI4_DSPSYS_SLV] = {
+ .bit = BIT(20),
+ .reg = TH1520_DSPSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_AXI4_DSPSYS] = {
+ .bit = BIT(24),
+ .reg = TH1520_DSPSYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_AXI4_DSP_RS] = {
+ .bit = BIT(26),
+ .reg = TH1520_DSPSYS_RST_CFG,
+ },
+};
+
+static const struct th1520_reset_map th1520_misc_resets[] = {
+ [TH1520_RESET_ID_EMMC_SDIO_CLKGEN] = {
+ .bit = BIT(0),
+ .reg = TH1520_EMMC_RST_CFG,
+ },
+ [TH1520_RESET_ID_EMMC] = {
+ .bit = BIT(1),
+ .reg = TH1520_EMMC_RST_CFG,
+ },
+ [TH1520_RESET_ID_MISCSYS_AXI] = {
+ .bit = BIT(0),
+ .reg = TH1520_MISCSYS_AXI_RST_CFG,
+ },
+ [TH1520_RESET_ID_MISCSYS_AXI_APB] = {
+ .bit = BIT(1),
+ .reg = TH1520_MISCSYS_AXI_RST_CFG,
+ },
+ [TH1520_RESET_ID_SDIO0] = {
+ .bit = BIT(0),
+ .reg = TH1520_SDIO0_RST_CFG,
+ },
+ [TH1520_RESET_ID_SDIO1] = {
+ .bit = BIT(1),
+ .reg = TH1520_SDIO1_RST_CFG,
+ },
+ [TH1520_RESET_ID_USB3_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_USB3_DRD_RST_CFG,
+ },
+ [TH1520_RESET_ID_USB3_PHY] = {
+ .bit = BIT(1),
+ .reg = TH1520_USB3_DRD_RST_CFG,
+ },
+ [TH1520_RESET_ID_USB3_VCC] = {
+ .bit = BIT(2),
+ .reg = TH1520_USB3_DRD_RST_CFG,
+ },
+};
+
+static const struct th1520_reset_map th1520_vi_resets[] = {
+ [TH1520_RESET_ID_ISP0] = {
+ .bit = BIT(0),
+ .reg = TH1520_VISYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_ISP1] = {
+ .bit = BIT(4),
+ .reg = TH1520_VISYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_CSI0_APB] = {
+ .bit = BIT(16),
+ .reg = TH1520_VISYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_CSI1_APB] = {
+ .bit = BIT(17),
+ .reg = TH1520_VISYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_CSI2_APB] = {
+ .bit = BIT(18),
+ .reg = TH1520_VISYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_MIPI_FIFO] = {
+ .bit = BIT(20),
+ .reg = TH1520_VISYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_ISP_VENC_APB] = {
+ .bit = BIT(24),
+ .reg = TH1520_VISYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_VIPRE_APB] = {
+ .bit = BIT(28),
+ .reg = TH1520_VISYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_VIPRE_AXI] = {
+ .bit = BIT(29),
+ .reg = TH1520_VISYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_DW200_APB] = {
+ .bit = BIT(31),
+ .reg = TH1520_VISYS_RST_CFG,
+ },
+ [TH1520_RESET_ID_VISYS3_AXI] = {
+ .bit = BIT(8),
+ .reg = TH1520_VISYS_2_RST_CFG,
+ },
+ [TH1520_RESET_ID_VISYS2_AXI] = {
+ .bit = BIT(9),
+ .reg = TH1520_VISYS_2_RST_CFG,
+ },
+ [TH1520_RESET_ID_VISYS1_AXI] = {
+ .bit = BIT(10),
+ .reg = TH1520_VISYS_2_RST_CFG,
+ },
+ [TH1520_RESET_ID_VISYS_AXI] = {
+ .bit = BIT(12),
+ .reg = TH1520_VISYS_2_RST_CFG,
+ },
+ [TH1520_RESET_ID_VISYS_APB] = {
+ .bit = BIT(16),
+ .reg = TH1520_VISYS_2_RST_CFG,
+ },
+ [TH1520_RESET_ID_ISP_VENC_AXI] = {
+ .bit = BIT(20),
+ .reg = TH1520_VISYS_2_RST_CFG,
+ },
+};
+
+static const struct th1520_reset_map th1520_vp_resets[] = {
+ [TH1520_RESET_ID_VPSYS_AXI_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_AXIBUS_RST_CFG,
+ },
+ [TH1520_RESET_ID_VPSYS_AXI] = {
+ .bit = BIT(1),
+ .reg = TH1520_AXIBUS_RST_CFG,
+ },
+ [TH1520_RESET_ID_FCE_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_FCE_RST_CFG,
+ },
+ [TH1520_RESET_ID_FCE_CORE] = {
+ .bit = BIT(1),
+ .reg = TH1520_FCE_RST_CFG,
+ },
+ [TH1520_RESET_ID_FCE_X2X_MASTER] = {
+ .bit = BIT(4),
+ .reg = TH1520_FCE_RST_CFG,
+ },
+ [TH1520_RESET_ID_FCE_X2X_SLAVE] = {
+ .bit = BIT(5),
+ .reg = TH1520_FCE_RST_CFG,
+ },
+ [TH1520_RESET_ID_G2D_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_G2D_RST_CFG,
+ },
+ [TH1520_RESET_ID_G2D_ACLK] = {
+ .bit = BIT(1),
+ .reg = TH1520_G2D_RST_CFG,
+ },
+ [TH1520_RESET_ID_G2D_CORE] = {
+ .bit = BIT(2),
+ .reg = TH1520_G2D_RST_CFG,
+ },
+ [TH1520_RESET_ID_VDEC_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_VDEC_RST_CFG,
+ },
+ [TH1520_RESET_ID_VDEC_ACLK] = {
+ .bit = BIT(1),
+ .reg = TH1520_VDEC_RST_CFG,
+ },
+ [TH1520_RESET_ID_VDEC_CORE] = {
+ .bit = BIT(2),
+ .reg = TH1520_VDEC_RST_CFG,
+ },
+ [TH1520_RESET_ID_VENC_APB] = {
+ .bit = BIT(0),
+ .reg = TH1520_VENC_RST_CFG,
+ },
+ [TH1520_RESET_ID_VENC_CORE] = {
+ .bit = BIT(1),
+ .reg = TH1520_VENC_RST_CFG,
+ },
};
static inline struct th1520_reset_priv *
@@ -90,7 +859,7 @@ static int th1520_reset_assert(struct reset_controller_dev *rcdev,
struct th1520_reset_priv *priv = to_th1520_reset(rcdev);
const struct th1520_reset_map *reset;
- reset = &th1520_resets[id];
+ reset = &priv->resets[id];
return regmap_update_bits(priv->map, reset->reg, reset->bit, 0);
}
@@ -101,7 +870,7 @@ static int th1520_reset_deassert(struct reset_controller_dev *rcdev,
struct th1520_reset_priv *priv = to_th1520_reset(rcdev);
const struct th1520_reset_map *reset;
- reset = &th1520_resets[id];
+ reset = &priv->resets[id];
return regmap_update_bits(priv->map, reset->reg, reset->bit,
reset->bit);
@@ -120,11 +889,14 @@ static const struct regmap_config th1520_reset_regmap_config = {
static int th1520_reset_probe(struct platform_device *pdev)
{
+ const struct th1520_reset_data *data;
struct device *dev = &pdev->dev;
struct th1520_reset_priv *priv;
void __iomem *base;
int ret;
+ data = device_get_match_data(dev);
+
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -138,22 +910,61 @@ static int th1520_reset_probe(struct platform_device *pdev)
if (IS_ERR(priv->map))
return PTR_ERR(priv->map);
- /* Initialize GPU resets to asserted state */
- ret = regmap_update_bits(priv->map, TH1520_GPU_RST_CFG,
- TH1520_GPU_RST_CFG_MASK, 0);
- if (ret)
- return ret;
+ if (of_device_is_compatible(dev->of_node, "thead,th1520-reset")) {
+ /* Initialize GPU resets to asserted state */
+ ret = regmap_update_bits(priv->map, TH1520_GPU_RST_CFG,
+ TH1520_GPU_RST_CFG_MASK, 0);
+ if (ret)
+ return ret;
+ }
priv->rcdev.owner = THIS_MODULE;
- priv->rcdev.nr_resets = ARRAY_SIZE(th1520_resets);
+ priv->rcdev.nr_resets = data->num;
priv->rcdev.ops = &th1520_reset_ops;
priv->rcdev.of_node = dev->of_node;
+ priv->resets = data->resets;
+
return devm_reset_controller_register(dev, &priv->rcdev);
}
+static const struct th1520_reset_data th1520_reset_data = {
+ .resets = th1520_resets,
+ .num = ARRAY_SIZE(th1520_resets),
+};
+
+static const struct th1520_reset_data th1520_ap_reset_data = {
+ .resets = th1520_ap_resets,
+ .num = ARRAY_SIZE(th1520_ap_resets),
+};
+
+static const struct th1520_reset_data th1520_dsp_reset_data = {
+ .resets = th1520_dsp_resets,
+ .num = ARRAY_SIZE(th1520_dsp_resets),
+};
+
+static const struct th1520_reset_data th1520_misc_reset_data = {
+ .resets = th1520_misc_resets,
+ .num = ARRAY_SIZE(th1520_misc_resets),
+};
+
+static const struct th1520_reset_data th1520_vi_reset_data = {
+ .resets = th1520_vi_resets,
+ .num = ARRAY_SIZE(th1520_vi_resets),
+};
+
+static const struct th1520_reset_data th1520_vp_reset_data = {
+ .resets = th1520_vp_resets,
+ .num = ARRAY_SIZE(th1520_vp_resets),
+};
+
static const struct of_device_id th1520_reset_match[] = {
- { .compatible = "thead,th1520-reset" },
+ { .compatible = "thead,th1520-reset", .data = &th1520_reset_data },
+ { .compatible = "thead,th1520-reset-ap", .data = &th1520_ap_reset_data },
+ { .compatible = "thead,th1520-reset-dsp", .data = &th1520_dsp_reset_data },
+ { .compatible = "thead,th1520-reset-misc", .data = &th1520_misc_reset_data },
+ { .compatible = "thead,th1520-reset-vi", .data = &th1520_vi_reset_data },
+ { .compatible = "thead,th1520-reset-vp", .data = &th1520_vp_reset_data },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, th1520_reset_match);
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 820a6ca5b1d7..5ea096acc858 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -1395,6 +1395,19 @@ static int qcom_glink_announce_create(struct rpmsg_device *rpdev)
return 0;
}
+static void qcom_glink_remove_rpmsg_device(struct qcom_glink *glink, struct glink_channel *channel)
+{
+ struct rpmsg_channel_info chinfo;
+
+ if (channel->rpdev) {
+ strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
+ chinfo.src = RPMSG_ADDR_ANY;
+ chinfo.dst = RPMSG_ADDR_ANY;
+ rpmsg_unregister_device(glink->dev, &chinfo);
+ }
+ channel->rpdev = NULL;
+}
+
static void qcom_glink_destroy_ept(struct rpmsg_endpoint *ept)
{
struct glink_channel *channel = to_glink_channel(ept);
@@ -1406,7 +1419,7 @@ static void qcom_glink_destroy_ept(struct rpmsg_endpoint *ept)
spin_unlock_irqrestore(&channel->recv_lock, flags);
/* Decouple the potential rpdev from the channel */
- channel->rpdev = NULL;
+ qcom_glink_remove_rpmsg_device(glink, channel);
qcom_glink_send_close_req(glink, channel);
}
@@ -1697,7 +1710,6 @@ free_channel:
static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
{
- struct rpmsg_channel_info chinfo;
struct glink_channel *channel;
unsigned long flags;
@@ -1713,14 +1725,7 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
/* cancel pending rx_done work */
cancel_work_sync(&channel->intent_work);
- if (channel->rpdev) {
- strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
- chinfo.src = RPMSG_ADDR_ANY;
- chinfo.dst = RPMSG_ADDR_ANY;
-
- rpmsg_unregister_device(glink->dev, &chinfo);
- }
- channel->rpdev = NULL;
+ qcom_glink_remove_rpmsg_device(glink, channel);
qcom_glink_send_close_ack(glink, channel);
@@ -1734,7 +1739,6 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid)
{
- struct rpmsg_channel_info chinfo;
struct glink_channel *channel;
unsigned long flags;
@@ -1756,14 +1760,7 @@ static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid)
spin_unlock_irqrestore(&glink->idr_lock, flags);
/* Decouple the potential rpdev from the channel */
- if (channel->rpdev) {
- strscpy(chinfo.name, channel->name, sizeof(chinfo.name));
- chinfo.src = RPMSG_ADDR_ANY;
- chinfo.dst = RPMSG_ADDR_ANY;
-
- rpmsg_unregister_device(glink->dev, &chinfo);
- }
- channel->rpdev = NULL;
+ qcom_glink_remove_rpmsg_device(glink, channel);
kref_put(&channel->refcount, qcom_glink_channel_release);
}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index ea66196ef7c7..82c6e7c7cdaf 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -242,7 +242,7 @@ static int aac_queuecommand(struct Scsi_Host *shost,
{
aac_priv(cmd)->owner = AAC_OWNER_LOWLEVEL;
- return aac_scsi_cmd(cmd) ? FAILED : 0;
+ return aac_scsi_cmd(cmd) ? SCSI_MLQUEUE_HOST_BUSY : 0;
}
/**
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 063e1b5818d3..06223b5ee6da 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2401,8 +2401,7 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)
struct asc_board *boardp = shost_priv(s);
printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
- printk(" host_busy %d, host_no %d,\n",
- scsi_host_busy(s), s->host_no);
+ printk(" host_no %d,\n", s->host_no);
printk(" base 0x%lx, io_port 0x%lx, irq %d,\n",
(ulong)s->base, (ulong)s->io_port, boardp->irq);
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index adf3d9145606..95f3620059f7 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -882,6 +882,9 @@ static void asd_pci_remove(struct pci_dev *dev)
asd_disable_ints(asd_ha);
+ /* Ensure all scheduled tasklets complete before freeing resources */
+ tasklet_kill(&asd_ha->seq.dl_tasklet);
+
asd_remove_dev_attrs(asd_ha);
/* XXX more here as needed */
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index dc88bc46dcc0..a0e794ffc980 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -5633,7 +5633,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
- phba->wq = alloc_workqueue("beiscsi_%02x_wq", WQ_MEM_RECLAIM, 1,
+ phba->wq = alloc_workqueue("beiscsi_%02x_wq",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 1,
phba->shost->host_no);
if (!phba->wq) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 58da993251e9..0f68739d380a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2695,7 +2695,7 @@ static int __init bnx2fc_mod_init(void)
if (rc)
goto detach_ft;
- bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0);
+ bnx2fc_wq = alloc_workqueue("bnx2fc", WQ_PERCPU, 0);
if (!bnx2fc_wq) {
rc = -ENOMEM;
goto release_bt;
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 1bf5948d1188..6fd89ae33059 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1300,7 +1300,7 @@ static int __init alua_init(void)
{
int r;
- kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0);
+ kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!kaluad_wq)
return -ENOMEM;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 4912087de10d..c8c5dfb3ba9a 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -2438,7 +2438,7 @@ static int __init fcoe_init(void)
unsigned int cpu;
int rc = 0;
- fcoe_wq = alloc_workqueue("fcoe", 0, 0);
+ fcoe_wq = alloc_workqueue("fcoe", WQ_PERCPU, 0);
if (!fcoe_wq)
return -ENOMEM;
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
index 763475587b7f..9801e5fbb0dd 100644
--- a/drivers/scsi/fnic/fnic_res.c
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -134,7 +134,6 @@ int fnic_get_vnic_config(struct fnic *fnic)
c->luns_per_tgt));
c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
- c->intr_timer_type = c->intr_timer_type;
/* for older firmware, GET_CONFIG will not return anything */
if (c->wq_copy_count == 0)
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 17173239301e..1b3fbd328277 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -231,6 +231,12 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto fail;
}
+ if (shost->nr_reserved_cmds && !sht->queue_reserved_command) {
+ shost_printk(KERN_ERR, shost,
+ "nr_reserved_cmds set but no method to queue\n");
+ goto fail;
+ }
+
/* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
shost->can_queue);
@@ -307,6 +313,14 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
if (error)
goto out_del_dev;
+ if (shost->nr_reserved_cmds) {
+ shost->pseudo_sdev = scsi_get_pseudo_sdev(shost);
+ if (!shost->pseudo_sdev) {
+ error = -ENOMEM;
+ goto out_del_dev;
+ }
+ }
+
scsi_proc_host_add(shost);
scsi_autopm_put_host(shost);
return error;
@@ -436,6 +450,7 @@ struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int priv
shost->hostt = sht;
shost->this_id = sht->this_id;
shost->can_queue = sht->can_queue;
+ shost->nr_reserved_cmds = sht->nr_reserved_cmds;
shost->sg_tablesize = sht->sg_tablesize;
shost->sg_prot_tablesize = sht->sg_prot_tablesize;
shost->cmd_per_lun = sht->cmd_per_lun;
@@ -604,8 +619,8 @@ static bool scsi_host_check_in_flight(struct request *rq, void *data)
}
/**
- * scsi_host_busy - Return the host busy counter
- * @shost: Pointer to Scsi_Host to inc.
+ * scsi_host_busy - Return the count of in-flight commands
+ * @shost: Pointer to Scsi_Host
**/
int scsi_host_busy(struct Scsi_Host *shost)
{
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 5a3787f27369..f259746bc804 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3533,7 +3533,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
init_completion(&vscsi->wait_idle);
init_completion(&vscsi->unconfig);
- vscsi->work_q = alloc_workqueue("ibmvscsis%s", WQ_MEM_RECLAIM, 1,
+ vscsi->work_q = alloc_workqueue("ibmvscsis%s",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 1,
dev_name(&vdev->dev));
if (!vscsi->work_q) {
rc = -ENOMEM;
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index f96633fa6939..d05d09c1263d 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -85,15 +85,17 @@ struct isci_tmf {
struct completion *complete;
enum sas_protocol proto;
+ unsigned char lun[8];
+ u16 io_tag;
+ enum isci_tmf_function_codes tmf_code;
+ int status;
+
+ /* Must be last --ends in a flexible-array member. */
union {
struct ssp_response_iu resp_iu;
struct dev_to_host_fis d2h_fis;
u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
} resp;
- unsigned char lun[8];
- u16 io_tag;
- enum isci_tmf_function_codes tmf_code;
- int status;
};
static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 224edacf2d8e..689793d03c20 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -311,7 +311,6 @@ struct lpfc_defer_flogi_acc {
u16 rx_id;
u16 ox_id;
struct lpfc_nodelist *ndlp;
-
};
#define LPFC_VMID_TIMER 300 /* timer interval in seconds */
@@ -634,6 +633,7 @@ struct lpfc_vport {
#define FC_CT_RSPN_ID 0x8 /* RSPN_ID accepted by switch */
#define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */
#define FC_CT_RPRT_DEFER 0x20 /* Defer issuing FDMI RPRT */
+#define FC_CT_RSPNI_PNI 0x40 /* RSPNI_PNI accepted by switch */
struct list_head fc_nodes;
spinlock_t fc_nodes_list_lock; /* spinlock for fc_nodes list */
@@ -1078,6 +1078,8 @@ struct lpfc_hba {
uint32_t nport_event_cnt; /* timestamp for nlplist entry */
+ unsigned long pni; /* 64-bit Platform Name Identifier */
+
uint8_t wwnn[8];
uint8_t wwpn[8];
uint32_t RandomData[7];
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index f93f8dca65bd..d3caac394291 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1743,6 +1743,28 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
static void
+lpfc_cmpl_ct_cmd_rspni_pni(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport;
+ struct lpfc_dmabuf *outp;
+ struct lpfc_sli_ct_request *ctrsp;
+ u32 ulp_status;
+
+ vport = cmdiocb->vport;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+
+ if (ulp_status == IOSTAT_SUCCESS) {
+ outp = cmdiocb->rsp_dmabuf;
+ ctrsp = (struct lpfc_sli_ct_request *)outp->virt;
+ if (be16_to_cpu(ctrsp->CommandResponse.bits.CmdRsp) ==
+ SLI_CT_RESPONSE_FS_ACC)
+ vport->ct_flags |= FC_CT_RSPNI_PNI;
+ }
+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+}
+
+static void
lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
@@ -1956,6 +1978,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
bpl->tus.f.bdeSize = RSPN_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RSNN_NN)
bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_RSPNI_PNI)
+ bpl->tus.f.bdeSize = RSPNI_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_DA_ID)
bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RFF_ID)
@@ -2077,6 +2101,18 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
CtReq->un.rsnn.symbname, size);
cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
break;
+ case SLI_CTNS_RSPNI_PNI:
+ vport->ct_flags &= ~FC_CT_RSPNI_PNI;
+ CtReq->CommandResponse.bits.CmdRsp =
+ cpu_to_be16(SLI_CTNS_RSPNI_PNI);
+ CtReq->un.rspni.pni = cpu_to_be64(phba->pni);
+ scnprintf(CtReq->un.rspni.symbname,
+ sizeof(CtReq->un.rspni.symbname), "OS Host Name::%s",
+ phba->os_host_name);
+ CtReq->un.rspni.len = strnlen(CtReq->un.rspni.symbname,
+ sizeof(CtReq->un.rspni.symbname));
+ cmpl = lpfc_cmpl_ct_cmd_rspni_pni;
+ break;
case SLI_CTNS_DA_ID:
/* Implement DA_ID Nameserver request */
CtReq->CommandResponse.bits.CmdRsp =
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 3d47dc7458d1..51cb8571c049 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -208,6 +208,7 @@ enum lpfc_nlp_flag {
NPR list */
NLP_RM_DFLT_RPI = 26, /* need to remove leftover dflt RPI */
NLP_NODEV_REMOVE = 27, /* Defer removal till discovery ends */
+ NLP_FLOGI_DFR_ACC = 28, /* FLOGI LS_ACC was Deferred */
NLP_SC_REQ = 29, /* Target requires authentication */
NLP_FIRSTBURST = 30, /* Target supports FirstBurst */
NLP_RPI_REGISTERED = 31 /* nlp_rpi is valid */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b71db7d7d747..02b6d31b9ad9 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -650,8 +650,6 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_class_sup |= FC_COS_CLASS2;
if (sp->cls3.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS3;
- if (sp->cls4.classValid)
- ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
sp->cmn.bbRcvSizeLsb;
@@ -934,10 +932,15 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
/* One additional decrement on node reference count to
- * trigger the release of the node
+ * trigger the release of the node. Make sure the ndlp
+ * is marked NLP_DROPPED.
*/
- if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
+ if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag) &&
+ !test_bit(NLP_DROPPED, &ndlp->nlp_flag) &&
+ !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
+ set_bit(NLP_DROPPED, &ndlp->nlp_flag);
lpfc_nlp_put(ndlp);
+ }
goto out;
}
@@ -995,9 +998,10 @@ stop_rr_fcf_flogi:
IOERR_LOOP_OPEN_FAILURE)))
lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
"2858 FLOGI Status:x%x/x%x TMO"
- ":x%x Data x%lx x%x\n",
+ ":x%x Data x%lx x%x x%lx x%x\n",
ulp_status, ulp_word4, tmo,
- phba->hba_flag, phba->fcf.fcf_flag);
+ phba->hba_flag, phba->fcf.fcf_flag,
+ ndlp->nlp_flag, ndlp->fc4_xpt_flags);
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
@@ -1015,14 +1019,17 @@ stop_rr_fcf_flogi:
* reference to trigger node release.
*/
if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag) &&
- !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
+ !test_bit(NLP_DROPPED, &ndlp->nlp_flag) &&
+ !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
+ set_bit(NLP_DROPPED, &ndlp->nlp_flag);
lpfc_nlp_put(ndlp);
+ }
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
"0150 FLOGI Status:x%x/x%x "
- "xri x%x TMO:x%x refcnt %d\n",
+ "xri x%x iotag x%x TMO:x%x refcnt %d\n",
ulp_status, ulp_word4, cmdiocb->sli4_xritag,
- tmo, kref_read(&ndlp->kref));
+ cmdiocb->iotag, tmo, kref_read(&ndlp->kref));
/* If this is not a loop open failure, bail out */
if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
@@ -1279,6 +1286,19 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t tmo, did;
int rc;
+ /* It's possible for lpfc to reissue a FLOGI on an ndlp that is marked
+ * NLP_DROPPED. This happens when the FLOGI completed with the XB bit
+ * set causing lpfc to reference the ndlp until the XRI_ABORTED CQE is
+ * issued. The time window for the XRI_ABORTED CQE can be as much as
+ * 2*2*RA_TOV allowing for ndlp reuse of this type when the link is
+ * cycling quickly. When true, restore the initial reference and remove
+ * the NLP_DROPPED flag as lpfc is retrying.
+ */
+ if (test_and_clear_bit(NLP_DROPPED, &ndlp->nlp_flag)) {
+ if (!lpfc_nlp_get(ndlp))
+ return 1;
+ }
+
cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_FLOGI);
@@ -1334,6 +1354,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Can't do SLI4 class2 without support sequence coalescing */
sp->cls2.classValid = 0;
sp->cls2.seqDelivery = 0;
+
+ /* Fill out Auxiliary Parameter Data */
+ if (phba->pni) {
+ sp->aux.flags =
+ AUX_PARM_DATA_VALID | AUX_PARM_PNI_VALID;
+ sp->aux.pni = cpu_to_be64(phba->pni);
+ sp->aux.npiv_cnt = cpu_to_be16(phba->max_vpi - 1);
+ }
} else {
/* Historical, setting sequential-delivery bit for SLI3 */
sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
@@ -1413,11 +1441,12 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->defer_flogi_acc.ox_id;
}
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
- " ox_id: x%x, hba_flag x%lx\n",
- phba->defer_flogi_acc.rx_id,
- phba->defer_flogi_acc.ox_id, phba->hba_flag);
+ /* The LS_ACC completion needs to drop the initial reference.
+ * This is a special case for Pt2Pt because both FLOGIs need
+ * to complete and lpfc defers the LS_ACC when the remote
+ * FLOGI arrives before the driver's FLOGI.
+ */
+ set_bit(NLP_FLOGI_DFR_ACC, &ndlp->nlp_flag);
/* Send deferred FLOGI ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
@@ -1433,6 +1462,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->defer_flogi_acc.ndlp = NULL;
}
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
+ " ox_id: x%x, ndlp x%px hba_flag x%lx\n",
+ phba->defer_flogi_acc.rx_id,
+ phba->defer_flogi_acc.ox_id,
+ phba->defer_flogi_acc.ndlp,
+ phba->hba_flag);
+
vport->fc_myDID = did;
}
@@ -2248,7 +2285,8 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
sp->cmn.valid_vendor_ver_level = 0;
memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
- sp->cmn.bbRcvSizeMsb &= 0xF;
+ if (!test_bit(FC_PT2PT, &vport->fc_flag))
+ sp->cmn.bbRcvSizeMsb &= 0xF;
/* Check if the destination port supports VMID */
ndlp->vmid_support = 0;
@@ -2367,7 +2405,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
mode = KERN_INFO;
/* Warn PRLI status */
- lpfc_printf_vlog(vport, mode, LOG_ELS,
+ lpfc_vlog_msg(vport, mode, LOG_ELS,
"2754 PRLI DID:%06X Status:x%x/x%x, "
"data: x%x x%x x%lx\n",
ndlp->nlp_DID, ulp_status,
@@ -3024,6 +3062,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_DID, ulp_status,
ulp_word4);
+ /* Call NLP_EVT_DEVICE_RM if link is down or LOGO is aborted */
if (lpfc_error_lost_link(vport, ulp_status, ulp_word4))
skip_recovery = 1;
}
@@ -3262,7 +3301,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
return -ENOMEM;
}
rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID,
- (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi);
+ (u8 *)&ns_ndlp->fc_sparam, mbox, fc_ndlp->nlp_rpi);
if (rc) {
rc = -EACCES;
goto out;
@@ -3306,7 +3345,8 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
*
* This routine is a generic completion callback function for Discovery ELS cmd.
* Currently used by the ELS command issuing routines for the ELS State Change
- * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf().
+ * Request (SCR), lpfc_issue_els_scr(), Exchange Diagnostic Capabilities (EDC),
+ * lpfc_issue_els_edc() and the ELS RDF, lpfc_issue_els_rdf().
* These commands will be retried once only for ELS timeout errors.
**/
static void
@@ -3379,11 +3419,21 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb);
return;
}
+
if (ulp_status) {
/* ELS discovery cmd completes with error */
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
"4203 ELS cmd x%x error: x%x x%X\n", cmd,
ulp_status, ulp_word4);
+
+ /* In the case where the ELS cmd completes with an error and
+ * the node does not have RPI registered, the node is
+ * outstanding and should put its initial reference.
+ */
+ if ((cmd == ELS_CMD_SCR || cmd == ELS_CMD_RDF) &&
+ !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) &&
+ !test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
+ lpfc_nlp_put(ndlp);
goto out;
}
@@ -3452,6 +3502,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
uint8_t *pcmd;
uint16_t cmdsize;
struct lpfc_nodelist *ndlp;
+ bool node_created = false;
cmdsize = (sizeof(uint32_t) + sizeof(SCR));
@@ -3461,21 +3512,21 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
if (!ndlp)
return 1;
lpfc_enqueue_node(vport, ndlp);
+ node_created = true;
}
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_SCR);
if (!elsiocb)
- return 1;
+ goto out_node_created;
if (phba->sli_rev == LPFC_SLI_REV4) {
rc = lpfc_reg_fab_ctrl_node(vport, ndlp);
if (rc) {
- lpfc_els_free_iocb(phba, elsiocb);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
"0937 %s: Failed to reg fc node, rc %d\n",
__func__, rc);
- return 1;
+ goto out_free_iocb;
}
}
pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
@@ -3494,23 +3545,27 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
phba->fc_stat.elsXmitSCR++;
elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
elsiocb->ndlp = lpfc_nlp_get(ndlp);
- if (!elsiocb->ndlp) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ if (!elsiocb->ndlp)
+ goto out_free_iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue SCR: did:x%x refcnt %d",
ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- lpfc_nlp_put(ndlp);
- return 1;
- }
+ if (rc == IOCB_ERROR)
+ goto out_iocb_error;
return 0;
+
+out_iocb_error:
+ lpfc_nlp_put(ndlp);
+out_free_iocb:
+ lpfc_els_free_iocb(phba, elsiocb);
+out_node_created:
+ if (node_created)
+ lpfc_nlp_put(ndlp);
+ return 1;
}
/**
@@ -3597,8 +3652,8 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
- "Issue RSCN: did:x%x",
- ndlp->nlp_DID, 0, 0);
+ "Issue RSCN: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
@@ -3705,10 +3760,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
lpfc_nlp_put(ndlp);
return 1;
}
- /* This will cause the callback-function lpfc_cmpl_els_cmd to
- * trigger the release of the node.
- */
- /* Don't release reference count as RDF is likely outstanding */
+
return 0;
}
@@ -3726,7 +3778,12 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
*
* Return code
* 0 - Successfully issued rdf command
- * 1 - Failed to issue rdf command
+ * < 0 - Failed to issue rdf command
+ * -EACCES - RDF not required for NPIV_PORT
+ * -ENODEV - No fabric controller device available
+ * -ENOMEM - No available memory
+ * -EIO - The mailbox failed to complete successfully.
+ *
**/
int
lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
@@ -3737,25 +3794,30 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
struct lpfc_nodelist *ndlp;
uint16_t cmdsize;
int rc;
+ bool node_created = false;
+ int err;
cmdsize = sizeof(*prdf);
+ /* RDF ELS is not required on an NPIV VN_Port. */
+ if (vport->port_type == LPFC_NPIV_PORT)
+ return -EACCES;
+
ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
if (!ndlp) {
ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
if (!ndlp)
return -ENODEV;
lpfc_enqueue_node(vport, ndlp);
+ node_created = true;
}
- /* RDF ELS is not required on an NPIV VN_Port. */
- if (vport->port_type == LPFC_NPIV_PORT)
- return -EACCES;
-
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_RDF);
- if (!elsiocb)
- return -ENOMEM;
+ if (!elsiocb) {
+ err = -ENOMEM;
+ goto out_node_created;
+ }
/* Configure the payload for the supported FPIN events. */
prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt;
@@ -3781,8 +3843,8 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
elsiocb->ndlp = lpfc_nlp_get(ndlp);
if (!elsiocb->ndlp) {
- lpfc_els_free_iocb(phba, elsiocb);
- return -EIO;
+ err = -EIO;
+ goto out_free_iocb;
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -3791,11 +3853,19 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- lpfc_nlp_put(ndlp);
- return -EIO;
+ err = -EIO;
+ goto out_iocb_error;
}
return 0;
+
+out_iocb_error:
+ lpfc_nlp_put(ndlp);
+out_free_iocb:
+ lpfc_els_free_iocb(phba, elsiocb);
+out_node_created:
+ if (node_created)
+ lpfc_nlp_put(ndlp);
+ return err;
}
/**
@@ -3816,19 +3886,23 @@ static int
lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
{
+ int rc;
+
+ rc = lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL);
/* Send LS_ACC */
- if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) {
+ if (rc) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
- "1623 Failed to RDF_ACC from x%x for x%x\n",
- ndlp->nlp_DID, vport->fc_myDID);
+ "1623 Failed to RDF_ACC from x%x for x%x Data: %d\n",
+ ndlp->nlp_DID, vport->fc_myDID, rc);
return -EIO;
}
+ rc = lpfc_issue_els_rdf(vport, 0);
/* Issue new RDF for reregistering */
- if (lpfc_issue_els_rdf(vport, 0)) {
+ if (rc) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
- "2623 Failed to re register RDF for x%x\n",
- vport->fc_myDID);
+ "2623 Failed to re register RDF for x%x Data: %d\n",
+ vport->fc_myDID, rc);
return -EIO;
}
@@ -4299,7 +4373,7 @@ lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
/* The additional lpfc_nlp_put will cause the following
- * lpfc_els_free_iocb routine to trigger the rlease of
+ * lpfc_els_free_iocb routine to trigger the release of
* the node.
*/
lpfc_els_free_iocb(phba, elsiocb);
@@ -5127,7 +5201,7 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
{
struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
- /* The I/O iocb is complete. Clear the node and first dmbuf */
+ /* The I/O iocb is complete. Clear the node and first dmabuf */
elsiocb->ndlp = NULL;
/* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */
@@ -5160,14 +5234,12 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
} else {
buf_ptr1 = elsiocb->cmd_dmabuf;
lpfc_els_free_data(phba, buf_ptr1);
- elsiocb->cmd_dmabuf = NULL;
}
}
if (elsiocb->bpl_dmabuf) {
buf_ptr = elsiocb->bpl_dmabuf;
lpfc_els_free_bpl(phba, buf_ptr);
- elsiocb->bpl_dmabuf = NULL;
}
lpfc_sli_release_iocbq(phba, elsiocb);
return 0;
@@ -5305,11 +5377,12 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp;
LPFC_MBOXQ_t *mbox = NULL;
u32 ulp_status, ulp_word4, tmo, did, iotag;
+ u32 cmd;
if (!vport) {
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
"3177 null vport in ELS rsp\n");
- goto out;
+ goto release;
}
if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox;
@@ -5419,7 +5492,7 @@ out:
* these conditions because it doesn't need the login.
*/
if (phba->sli_rev == LPFC_SLI_REV4 &&
- vport && vport->port_type == LPFC_NPIV_PORT &&
+ vport->port_type == LPFC_NPIV_PORT &&
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE &&
@@ -5435,6 +5508,27 @@ out:
}
}
+ /* The driver's unsolicited deferred FLOGI ACC in Pt2Pt needs to
+ * release the initial reference because the put after the free_iocb
+ * call removes only the reference from the defer logic. This FLOGI
+ * is never registered with the SCSI transport.
+ */
+ if (test_bit(FC_PT2PT, &vport->fc_flag) &&
+ test_and_clear_bit(NLP_FLOGI_DFR_ACC, &ndlp->nlp_flag)) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_NODE | LOG_DISCOVERY,
+ "3357 Pt2Pt Defer FLOGI ACC ndlp x%px, "
+ "nflags x%lx, fc_flag x%lx\n",
+ ndlp, ndlp->nlp_flag,
+ vport->fc_flag);
+ cmd = *((u32 *)cmdiocb->cmd_dmabuf->virt);
+ if (cmd == ELS_CMD_ACC) {
+ if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
+ lpfc_nlp_put(ndlp);
+ }
+ }
+
+release:
/* Release the originating I/O reference. */
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
@@ -5569,7 +5663,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
sp->cls1.classValid = 0;
sp->cls2.classValid = 0;
sp->cls3.classValid = 0;
- sp->cls4.classValid = 0;
/* Copy our worldwide names */
memcpy(&sp->portName, &vport->fc_sparam.portName,
@@ -5583,7 +5676,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
sp->cmn.valid_vendor_ver_level = 0;
memset(sp->un.vendorVersion, 0,
sizeof(sp->un.vendorVersion));
- sp->cmn.bbRcvSizeMsb &= 0xF;
+ if (!test_bit(FC_PT2PT, &vport->fc_flag))
+ sp->cmn.bbRcvSizeMsb &= 0xF;
/* If our firmware supports this feature, convey that
* info to the target using the vendor specific field.
@@ -8402,13 +8496,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
&wqe->xmit_els_rsp.wqe_com);
vport->fc_myDID = did;
-
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "3344 Deferring FLOGI ACC: rx_id: x%x,"
- " ox_id: x%x, hba_flag x%lx\n",
- phba->defer_flogi_acc.rx_id,
- phba->defer_flogi_acc.ox_id, phba->hba_flag);
-
phba->defer_flogi_acc.flag = true;
/* This nlp_get is paired with nlp_puts that reset the
@@ -8417,6 +8504,14 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
* processed or cancelled.
*/
phba->defer_flogi_acc.ndlp = lpfc_nlp_get(ndlp);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3344 Deferring FLOGI ACC: rx_id: x%x,"
+ " ox_id: x%x, ndlp x%px, hba_flag x%lx\n",
+ phba->defer_flogi_acc.rx_id,
+ phba->defer_flogi_acc.ox_id,
+ phba->defer_flogi_acc.ndlp,
+ phba->hba_flag);
return 0;
}
@@ -8734,7 +8829,7 @@ reject_out:
* @cmdiocb: pointer to lpfc command iocb data structure.
* @ndlp: pointer to a node-list data structure.
*
- * This routine processes Read Timout Value (RTV) IOCB received as an
+ * This routine processes Read Timeout Value (RTV) IOCB received as an
* ELS unsolicited event. It first checks the remote port state. If the
* remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
* state, it invokes the lpfc_els_rsl_reject() routine to send the reject
@@ -10357,11 +10452,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* Do not process any unsolicited ELS commands
* if the ndlp is in DEV_LOSS
*/
- if (test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) {
- if (newnode)
- lpfc_nlp_put(ndlp);
+ if (test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag))
goto dropit;
- }
elsiocb->ndlp = lpfc_nlp_get(ndlp);
if (!elsiocb->ndlp)
@@ -10843,7 +10935,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
/*
* The different unsolicited event handlers would tell us
- * if they are done with "mp" by setting cmd_dmabuf to NULL.
+ * if they are done with "mp" by setting cmd_dmabuf/bpl_dmabuf to NULL.
*/
if (elsiocb->cmd_dmabuf) {
lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf);
@@ -11423,6 +11515,13 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
sp->cls2.seqDelivery = 1;
sp->cls3.seqDelivery = 1;
+ /* Fill out Auxiliary Parameter Data */
+ if (phba->pni) {
+ sp->aux.flags =
+ AUX_PARM_DATA_VALID | AUX_PARM_PNI_VALID;
+ sp->aux.pni = cpu_to_be64(phba->pni);
+ }
+
pcmd += sizeof(uint32_t); /* CSP Word 2 */
pcmd += sizeof(uint32_t); /* CSP Word 3 */
pcmd += sizeof(uint32_t); /* CSP Word 4 */
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 43d246c5c049..bb803f32bc1b 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -424,6 +424,7 @@ lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp)
{
if (test_and_clear_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags)) {
+ clear_bit(NLP_DROPPED, &ndlp->nlp_flag);
lpfc_nlp_get(ndlp);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE,
"8438 Devloss timeout reversed on DID x%x "
@@ -566,7 +567,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
return fcf_inuse;
}
- lpfc_nlp_put(ndlp);
+ if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
+ lpfc_nlp_put(ndlp);
return fcf_inuse;
}
@@ -4371,6 +4373,8 @@ out:
lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
+ if (phba->pni)
+ lpfc_ns_cmd(vport, SLI_CTNS_RSPNI_PNI, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 3bc0efa7453e..b2e353590ebb 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -168,6 +168,11 @@ struct lpfc_sli_ct_request {
uint8_t len;
uint8_t symbname[255];
} rspn;
+ struct rspni { /* For RSPNI_PNI requests */
+ __be64 pni;
+ u8 len;
+ u8 symbname[255];
+ } rspni;
struct gff {
uint32_t PortId;
} gff;
@@ -213,6 +218,8 @@ struct lpfc_sli_ct_request {
sizeof(struct da_id))
#define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct rspn))
+#define RSPNI_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct rspni))
/*
* FsType Definitions
@@ -309,6 +316,7 @@ struct lpfc_sli_ct_request {
#define SLI_CTNS_RIP_NN 0x0235
#define SLI_CTNS_RIPA_NN 0x0236
#define SLI_CTNS_RSNN_NN 0x0239
+#define SLI_CTNS_RSPNI_PNI 0x0240
#define SLI_CTNS_DA_ID 0x0300
/*
@@ -512,6 +520,21 @@ struct class_parms {
uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */
};
+enum aux_parm_flags {
+ AUX_PARM_PNI_VALID = 0x20, /* FC Word 0, bit 29 */
+ AUX_PARM_DATA_VALID = 0x40, /* FC Word 0, bit 30 */
+};
+
+struct aux_parm {
+ u8 flags; /* FC Word 0, bit 31:24 */
+ u8 ext_feat[3]; /* FC Word 0, bit 23:0 */
+
+ __be64 pni; /* FC Word 1 and 2, platform name identifier */
+
+ __be16 rsvd; /* FC Word 3, bit 31:16 */
+ __be16 npiv_cnt; /* FC Word 3, bit 15:0 */
+} __packed;
+
struct serv_parm { /* Structure is in Big Endian format */
struct csp cmn;
struct lpfc_name portName;
@@ -519,7 +542,7 @@ struct serv_parm { /* Structure is in Big Endian format */
struct class_parms cls1;
struct class_parms cls2;
struct class_parms cls3;
- struct class_parms cls4;
+ struct aux_parm aux;
union {
uint8_t vendorVersion[16];
struct {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 065eb91de9c0..b1460b16dd91 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3057,12 +3057,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
lpfc_vmid_vport_cleanup(vport);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (ndlp->nlp_DID == Fabric_Cntl_DID &&
- ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
- lpfc_nlp_put(ndlp);
- continue;
- }
-
/* Fabric Ports not in UNMAPPED state are cleaned up in the
* DEVICE_RM event.
*/
@@ -7950,7 +7944,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Allocate all driver workqueues here */
/* The lpfc_wq workqueue for deferred irq use */
- phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+ phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!phba->wq)
return -ENOMEM;
@@ -9082,9 +9076,9 @@ lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
}
- lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "6077 Setup FDMI mask: hba x%x port x%x\n",
- vport->fdmi_hba_mask, vport->fdmi_port_mask);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6077 Setup FDMI mask: hba x%x port x%x\n",
+ vport->fdmi_hba_mask, vport->fdmi_port_mask);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 1e5ef93e67e3..8240d59f4120 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -432,8 +432,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_class_sup |= FC_COS_CLASS2;
if (sp->cls3.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS3;
- if (sp->cls4.classValid)
- ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
/* if already logged in, do implicit logout */
@@ -452,18 +450,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
if (!(ndlp->nlp_type & NLP_FABRIC) &&
!(phba->nvmet_support)) {
- /* Clear ndlp info, since follow up PRLI may have
- * updated ndlp information
- */
- ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
- ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
- ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
- clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag);
-
- lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
- ndlp, NULL);
- return 1;
+ break;
}
if (nlp_portwwn != 0 &&
nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
@@ -485,7 +472,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
break;
}
-
+ /* Clear ndlp info, since follow up processes may have
+ * updated ndlp information
+ */
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
@@ -1426,8 +1415,6 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
ndlp->nlp_class_sup |= FC_COS_CLASS2;
if (sp->cls3.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS3;
- if (sp->cls4.classValid)
- ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 7ea7c4245c69..73d77cfab5f8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -27,6 +27,8 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/lockdep.h>
+#include <linux/dmi.h>
+#include <linux/of.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -8447,6 +8449,70 @@ lpfc_set_host_tm(struct lpfc_hba *phba)
}
/**
+ * lpfc_get_platform_uuid - Attempts to extract a platform uuid
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine attempts to first read SMBIOS DMI data for the System
+ * Information structure offset 08h called System UUID. Else, no platform
+ * UUID will be advertised.
+ **/
+static void
+lpfc_get_platform_uuid(struct lpfc_hba *phba)
+{
+ int rc;
+ const char *uuid;
+ char pni[17] = {0}; /* 16 characters + '\0' */
+ bool is_ff = true, is_00 = true;
+ u8 i;
+
+ /* First attempt SMBIOS DMI */
+ uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
+ if (uuid) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2088 SMBIOS UUID %s\n",
+ uuid);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2099 Could not extract UUID\n");
+ }
+
+ if (uuid && uuid_is_valid(uuid)) {
+ /* Generate PNI from UUID format.
+ *
+ * 1.) Extract lower 64 bits from UUID format.
+ * 2.) Set 3h for NAA Locally Assigned Name Identifier format.
+ *
+ * e.g. xxxxxxxx-xxxx-xxxx-yyyy-yyyyyyyyyyyy
+ *
+ * extract the yyyy-yyyyyyyyyyyy portion
+ * final PNI 3yyyyyyyyyyyyyyy
+ */
+ scnprintf(pni, sizeof(pni), "3%c%c%c%s",
+ uuid[20], uuid[21], uuid[22], &uuid[24]);
+
+ /* Sanitize the converted PNI */
+ for (i = 1; i < 16 && (is_ff || is_00); i++) {
+ if (pni[i] != '0')
+ is_00 = false;
+ if (pni[i] != 'f' && pni[i] != 'F')
+ is_ff = false;
+ }
+
+ /* Convert from char* to unsigned long */
+ rc = kstrtoul(pni, 16, &phba->pni);
+ if (!rc && !is_ff && !is_00) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2100 PNI 0x%016lx\n", phba->pni);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2101 PNI %s generation status %d\n",
+ pni, rc);
+ phba->pni = 0;
+ }
+ }
+}
+
+/**
* lpfc_sli4_hba_setup - SLI4 device initialization PCI function
* @phba: Pointer to HBA context object.
*
@@ -8529,6 +8595,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
clear_bit(HBA_FCOE_MODE, &phba->hba_flag);
}
+ /* Obtain platform UUID, only for SLI4 FC adapters */
+ if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag))
+ lpfc_get_platform_uuid(phba);
+
if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
LPFC_DCBX_CEE_MODE)
set_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
@@ -19858,13 +19928,15 @@ lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
+ * lpfc_sli4_resume_rpi - Resume traffic relative to an RPI
* @ndlp: pointer to lpfc nodelist data structure.
* @cmpl: completion call-back.
* @iocbq: data to load as mbox ctx_u information
*
- * This routine is invoked to remove the memory region that
- * provided rpi via a bitmask.
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No available memory
+ * -EIO - The mailbox failed to complete successfully.
**/
int
lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
@@ -19894,7 +19966,6 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
return -EIO;
}
- /* Post all rpi memory regions to the port. */
lpfc_resume_rpi(mboxq, ndlp);
if (cmpl) {
mboxq->mbox_cmpl = cmpl;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 31c3c5abdca6..f3dada5bf7c1 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.4.0.11"
+#define LPFC_DRIVER_VERSION "14.4.0.12"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index b677d80e5874..ddeea0ee2834 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -1150,9 +1150,13 @@ typedef struct LOG_BLOCK_SPAN_INFO {
} LD_SPAN_INFO, *PLD_SPAN_INFO;
struct MR_FW_RAID_MAP_ALL {
- struct MR_FW_RAID_MAP raidMap;
- struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES];
+ /* Must be last --ends in a flexible-array member. */
+ TRAILING_OVERLAP(struct MR_FW_RAID_MAP, raidMap, ldSpanMap,
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES];
+ );
} __attribute__ ((packed));
+static_assert(offsetof(struct MR_FW_RAID_MAP_ALL, raidMap.ldSpanMap) ==
+ offsetof(struct MR_FW_RAID_MAP_ALL, ldSpanMap));
struct MR_DRV_RAID_MAP {
/* total size of this structure, including this field.
@@ -1194,10 +1198,13 @@ struct MR_DRV_RAID_MAP {
* And it is mainly for code re-use purpose.
*/
struct MR_DRV_RAID_MAP_ALL {
-
- struct MR_DRV_RAID_MAP raidMap;
- struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN];
+ /* Must be last --ends in a flexible-array member. */
+ TRAILING_OVERLAP(struct MR_DRV_RAID_MAP, raidMap, ldSpanMap,
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN];
+ );
} __packed;
+static_assert(offsetof(struct MR_DRV_RAID_MAP_ALL, raidMap.ldSpanMap) ==
+ offsetof(struct MR_DRV_RAID_MAP_ALL, ldSpanMap));
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 8ff4b89ff81e..9acca83d6958 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -1534,7 +1534,7 @@ static int __init pm8001_init(void)
if (pm8001_use_tasklet && !pm8001_use_msix)
pm8001_use_tasklet = false;
- pm8001_wq = alloc_workqueue("pm80xx", 0, 0);
+ pm8001_wq = alloc_workqueue("pm80xx", WQ_PERCPU, 0);
if (!pm8001_wq)
goto err;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 6b1ebab36fa3..7792e00800ae 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3374,7 +3374,8 @@ retry_probe:
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
qedf->io_mempool);
- qedf->link_update_wq = alloc_workqueue("qedf_%u_link", WQ_MEM_RECLAIM,
+ qedf->link_update_wq = alloc_workqueue("qedf_%u_link",
+ WQ_MEM_RECLAIM | WQ_PERCPU,
1, qedf->lport->host->host_no);
INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
@@ -3585,7 +3586,8 @@ retry_probe:
ether_addr_copy(params.ll2_mac_address, qedf->mac);
/* Start LL2 processing thread */
- qedf->ll2_recv_wq = alloc_workqueue("qedf_%d_ll2", WQ_MEM_RECLAIM, 1,
+ qedf->ll2_recv_wq = alloc_workqueue("qedf_%d_ll2",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 1,
host->host_no);
if (!qedf->ll2_recv_wq) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
@@ -3628,7 +3630,8 @@ retry_probe:
}
qedf->timer_work_queue = alloc_workqueue("qedf_%u_timer",
- WQ_MEM_RECLAIM, 1, qedf->lport->host->host_no);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 1,
+ qedf->lport->host->host_no);
if (!qedf->timer_work_queue) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
"workqueue.\n");
@@ -3641,7 +3644,8 @@ retry_probe:
sprintf(host_buf, "qedf_%u_dpc",
qedf->lport->host->host_no);
qedf->dpc_wq =
- alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, host_buf);
+ alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 1,
+ host_buf);
}
INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
@@ -4177,7 +4181,8 @@ static int __init qedf_init(void)
goto err3;
}
- qedf_io_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, "qedf_io_wq");
+ qedf_io_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 1,
+ "qedf_io_wq");
if (!qedf_io_wq) {
QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
goto err4;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index b168bb2178e9..56685ee22fdf 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2768,7 +2768,7 @@ retry_probe:
}
qedi->offload_thread = alloc_workqueue("qedi_ofld%d",
- WQ_MEM_RECLAIM,
+ WQ_MEM_RECLAIM | WQ_PERCPU,
1, qedi->shost->host_no);
if (!qedi->offload_thread) {
QEDI_ERR(&qedi->dbg_ctx,
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index ef841f643171..26c312a48a19 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2799,7 +2799,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
- dprintk(2, " bus %i, target %i, lun %i\n",
+ dprintk(2, " bus %i, target %i, lun %llu\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
@@ -2871,7 +2871,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
remseg--;
}
dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
- "command packet data - b %i, t %i, l %i \n",
+ "command packet data - b %i, t %i, l %llu\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
SCSI_LUN_32(cmd));
qla1280_dump_buffer(5, (char *)pkt,
@@ -2929,14 +2929,14 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
remseg -= cnt;
dprintk(5, "qla1280_64bit_start_scsi: "
"continuation packet data - b %i, t "
- "%i, l %i \n", SCSI_BUS_32(cmd),
+ "%i, l %llu\n", SCSI_BUS_32(cmd),
SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
qla1280_dump_buffer(5, (char *)pkt,
REQUEST_ENTRY_SIZE);
}
} else { /* No data transfer */
dprintk(5, "qla1280_64bit_start_scsi: No data, command "
- "packet data - b %i, t %i, l %i \n",
+ "packet data - b %i, t %i, l %llu\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
}
@@ -3655,7 +3655,7 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
dprintk(2, "qla1280_status_entry: Check "
"condition Sense data, b %i, t %i, "
- "l %i\n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
+ "l %llu\n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
SCSI_LUN_32(cmd));
if (sense_sz)
qla1280_dump_buffer(2,
@@ -3955,7 +3955,7 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
sp = scsi_cmd_priv(cmd);
printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
- printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
+ printk(" chan=%d, target = 0x%02x, lun = 0x%02llx, cmd_len = 0x%02x\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
CMD_CDBLEN(cmd));
printk(" CDB = ");
@@ -3976,29 +3976,6 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
printk(" underflow size = 0x%x, direction=0x%x\n",
cmd->underflow, cmd->sc_data_direction);
}
-
-/**************************************************************************
- * ql1280_dump_device
- *
- **************************************************************************/
-static void
-ql1280_dump_device(struct scsi_qla_host *ha)
-{
-
- struct scsi_cmnd *cp;
- struct srb *sp;
- int i;
-
- printk(KERN_DEBUG "Outstanding Commands on controller:\n");
-
- for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
- if ((sp = ha->outstanding_cmds[i]) == NULL)
- continue;
- if ((cp = sp->cmd) == NULL)
- continue;
- qla1280_print_scsi_cmd(1, cp);
- }
-}
#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 5136549005e7..a7e3ec9bba47 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -54,10 +54,11 @@
* | Misc | 0xd303 | 0xd031-0xd0ff |
* | | | 0xd101-0xd1fe |
* | | | 0xd214-0xd2fe |
- * | Target Mode | 0xe081 | |
+ * | Target Mode | 0xe089 | |
* | Target Mode Management | 0xf09b | 0xf002 |
* | | | 0xf046-0xf049 |
* | Target Mode Task Management | 0x1000d | |
+ * | Target Mode SRR | 0x11038 | |
* ----------------------------------------------------------------------
*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index cb95b7b12051..b3265952c4be 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -3503,7 +3503,6 @@ struct isp_operations {
#define QLA_MSIX_RSP_Q 0x01
#define QLA_ATIO_VECTOR 0x02
#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03
-#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS 0x04
#define QLA_MIDX_DEFAULT 0
#define QLA_MIDX_RSP_Q 1
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 145defc420f2..55d531c19e6b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -766,7 +766,7 @@ extern int qla2x00_dfs_remove(scsi_qla_host_t *);
/* Globa function prototypes for multi-q */
extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *,
- struct qla_msix_entry *, int);
+ struct qla_msix_entry *);
extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 6a2e1c7fd125..d395cbfe6802 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4369,6 +4369,7 @@ enable_82xx_npiv:
ha->max_npiv_vports =
MIN_MULTI_ID_FABRIC - 1;
}
+ qlt_config_nvram_with_fw_version(vha);
qla2x00_get_resource_cnts(vha);
qla_init_iocb_limit(vha);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index c4c6b5c6658c..a3971afc2dd1 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -4467,32 +4467,6 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
return IRQ_HANDLED;
}
-irqreturn_t
-qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
-{
- struct qla_hw_data *ha;
- struct qla_qpair *qpair;
- struct device_reg_24xx __iomem *reg;
- unsigned long flags;
-
- qpair = dev_id;
- if (!qpair) {
- ql_log(ql_log_info, NULL, 0x505b,
- "%s: NULL response queue pointer.\n", __func__);
- return IRQ_NONE;
- }
- ha = qpair->hw;
-
- reg = &ha->iobase->isp24;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- queue_work(ha->wq, &qpair->q_work);
-
- return IRQ_HANDLED;
-}
-
/* Interrupt handling helpers. */
struct qla_init_msix_entry {
@@ -4505,7 +4479,6 @@ static const struct qla_init_msix_entry msix_entries[] = {
{ "rsp_q", qla24xx_msix_rsp_q },
{ "atio_q", qla83xx_msix_atio_q },
{ "qpair_multiq", qla2xxx_msix_rsp_q },
- { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
};
static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
@@ -4792,9 +4765,10 @@ free_irqs:
}
int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
- struct qla_msix_entry *msix, int vector_type)
+ struct qla_msix_entry *msix)
{
- const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
+ const struct qla_init_msix_entry *intr =
+ &msix_entries[QLA_MSIX_QPAIR_MULTIQ_RSP_Q];
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
int ret;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 32eb0ce8b170..1f01576f044b 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -253,6 +253,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Issue set host interrupt command to send cmd out. */
ha->flags.mbox_int = 0;
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ reinit_completion(&ha->mbx_intr_comp);
/* Unlock mbx registers and wait for interrupt */
ql_dbg(ql_dbg_mbx, vha, 0x100f,
@@ -279,6 +280,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"cmd=%x Timeout.\n", command);
spin_lock_irqsave(&ha->hardware_lock, flags);
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ reinit_completion(&ha->mbx_intr_comp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (chip_reset != ha->chip_reset) {
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 8b71ac0b1d99..0abc47e72e0b 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -899,9 +899,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
rsp->options, rsp->id, rsp->rsp_q_in,
rsp->rsp_q_out);
- ret = qla25xx_request_irq(ha, qpair, qpair->msix,
- ha->flags.disable_msix_handshake ?
- QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS);
+ ret = qla25xx_request_irq(ha, qpair, qpair->msix);
if (ret)
goto que_failed;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 316594aa40cc..42eb65a62f1f 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -1292,7 +1292,7 @@ void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp)
a.reason = FCNVME_RJT_RC_LOGIC;
a.explanation = FCNVME_RJT_EXP_NONE;
xmt_reject = true;
- kfree(item);
+ qla24xx_free_purex_item(item);
goto out;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 9007533e36e0..3a57f07d73f5 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1862,12 +1862,6 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
- if (qla2x00_chip_is_down(vha)) {
- req->outstanding_cmds[cnt] = NULL;
- sp->done(sp, res);
- continue;
- }
-
switch (sp->cmd_type) {
case TYPE_SRB:
qla2x00_abort_srb(qp, sp, res, &flags);
@@ -1881,10 +1875,26 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
continue;
}
cmd = (struct qla_tgt_cmd *)sp;
- cmd->aborted = 1;
+
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+
+ if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ cmd->aborted = 1;
+ cmd->write_data_transferred = 0;
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+ ha->tgt.tgt_ops->handle_data(cmd);
+ } else {
+ ha->tgt.tgt_ops->free_cmd(cmd);
+ }
break;
case TYPE_TGT_TMCMD:
- /* Skip task management functions. */
+ /*
+ * Currently, only ABTS response gets on the
+ * outstanding_cmds[]
+ */
+ qlt_free_ul_mcmd(ha,
+ (struct qla_tgt_mgmt_cmd *) sp);
break;
default:
break;
@@ -3397,7 +3407,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
- ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
+ ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (unlikely(!ha->wq)) {
ret = -ENOMEM;
goto probe_failed;
@@ -3444,13 +3454,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mqenable = 0;
if (ha->mqenable) {
- bool startit = false;
-
- if (QLA_TGT_MODE_ENABLED())
- startit = false;
-
- if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED)
- startit = true;
+ bool startit = !!(host->active_mode & MODE_INITIATOR);
/* Create start of day qpairs for Block MQ */
for (i = 0; i < ha->max_qpairs; i++)
@@ -5280,7 +5284,7 @@ void qla24xx_sched_upd_fcport(fc_port_t *fcport)
qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
- queue_work(system_unbound_wq, &fcport->reg_work);
+ queue_work(system_dfl_wq, &fcport->reg_work);
}
static
@@ -7244,6 +7248,7 @@ qla2xxx_wake_dpc(struct scsi_qla_host *vha)
if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
wake_up_process(t);
}
+EXPORT_SYMBOL(qla2xxx_wake_dpc);
/*
* qla2x00_rst_aen
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 1e81582085e3..d772136984c9 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -104,8 +104,6 @@ static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
response_t *pkt);
static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
int fn, void *iocb, int flags);
-static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd
- *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
struct atio_from_isp *atio, uint16_t status, int qfull);
static void qlt_disable_vha(struct scsi_qla_host *vha);
@@ -136,20 +134,6 @@ static struct workqueue_struct *qla_tgt_wq;
static DEFINE_MUTEX(qla_tgt_mutex);
static LIST_HEAD(qla_tgt_glist);
-static const char *prot_op_str(u32 prot_op)
-{
- switch (prot_op) {
- case TARGET_PROT_NORMAL: return "NORMAL";
- case TARGET_PROT_DIN_INSERT: return "DIN_INSERT";
- case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT";
- case TARGET_PROT_DIN_STRIP: return "DIN_STRIP";
- case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP";
- case TARGET_PROT_DIN_PASS: return "DIN_PASS";
- case TARGET_PROT_DOUT_PASS: return "DOUT_PASS";
- default: return "UNKNOWN";
- }
-}
-
/* This API intentionally takes dest as a parameter, rather than returning
* int value to avoid caller forgetting to issue wmb() after the store */
void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
@@ -226,6 +210,10 @@ static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
struct qla_tgt_sess_op *u;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
unsigned long flags;
+ unsigned int add_cdb_len = 0;
+
+ /* atio must be the last member of qla_tgt_sess_op for add_cdb_len */
+ BUILD_BUG_ON(offsetof(struct qla_tgt_sess_op, atio) + sizeof(u->atio) != sizeof(*u));
if (tgt->tgt_stop) {
ql_dbg(ql_dbg_async, vha, 0x502c,
@@ -234,12 +222,17 @@ static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
goto out_term;
}
- u = kzalloc(sizeof(*u), GFP_ATOMIC);
+ if (atio->u.raw.entry_type == ATIO_TYPE7 &&
+ atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)
+ add_cdb_len =
+ ((unsigned int) atio->u.isp24.fcp_cmnd.add_cdb_len) * 4;
+
+ u = kzalloc(sizeof(*u) + add_cdb_len, GFP_ATOMIC);
if (u == NULL)
goto out_term;
u->vha = vha;
- memcpy(&u->atio, atio, sizeof(*atio));
+ memcpy(&u->atio, atio, sizeof(*atio) + add_cdb_len);
INIT_LIST_HEAD(&u->cmd_list);
spin_lock_irqsave(&vha->cmd_list_lock, flags);
@@ -252,7 +245,7 @@ out:
return;
out_term:
- qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
+ qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked);
goto out;
}
@@ -271,7 +264,7 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
"Freeing unknown %s %p, because of Abort\n",
"ATIO_TYPE7", u);
qlt_send_term_exchange(vha->hw->base_qpair, NULL,
- &u->atio, ha_locked, 0);
+ &u->atio, ha_locked);
goto abort;
}
@@ -285,7 +278,7 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
"Freeing unknown %s %p, because tgt is being stopped\n",
"ATIO_TYPE7", u);
qlt_send_term_exchange(vha->hw->base_qpair, NULL,
- &u->atio, ha_locked, 0);
+ &u->atio, ha_locked);
} else {
ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
"Reschedule u %p, vha %p, host %p\n", u, vha, host);
@@ -1909,6 +1902,10 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
* ABTS response. So, in it ID fields are reversed.
*/
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe082,
+ "qla_target(%d): tag %u: Sending TERM EXCH CTIO for ABTS\n",
+ vha->vp_idx, le32_to_cpu(entry->exchange_addr_to_abort));
+
ctio->entry_type = CTIO_TYPE7;
ctio->entry_count = 1;
ctio->nport_handle = entry->nport_handle;
@@ -1987,8 +1984,12 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
cmd_lun = scsilun_to_int(
(struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
- if (cmd_key == key && cmd_lun == lun)
+ if (cmd_key == key && cmd_lun == lun) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe085,
+ "qla_target(%d): tag %lld: aborted by TMR\n",
+ vha->vp_idx, cmd->se_cmd.tag);
cmd->aborted = 1;
+ }
}
spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
}
@@ -2017,7 +2018,6 @@ static void qlt_do_tmr_work(struct work_struct *work)
struct qla_hw_data *ha = mcmd->vha->hw;
int rc;
uint32_t tag;
- unsigned long flags;
switch (mcmd->tmr_func) {
case QLA_TGT_ABTS:
@@ -2032,34 +2032,12 @@ static void qlt_do_tmr_work(struct work_struct *work)
mcmd->tmr_func, tag);
if (rc != 0) {
- spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
- switch (mcmd->tmr_func) {
- case QLA_TGT_ABTS:
- mcmd->fc_tm_rsp = FCP_TMF_REJECTED;
- qlt_build_abts_resp_iocb(mcmd);
- break;
- case QLA_TGT_LUN_RESET:
- case QLA_TGT_CLEAR_TS:
- case QLA_TGT_ABORT_TS:
- case QLA_TGT_CLEAR_ACA:
- case QLA_TGT_TARGET_RESET:
- qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio,
- qla_sam_status);
- break;
-
- case QLA_TGT_ABORT_ALL:
- case QLA_TGT_NEXUS_LOSS_SESS:
- case QLA_TGT_NEXUS_LOSS:
- qlt_send_notify_ack(mcmd->qpair,
- &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
- break;
- }
- spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags);
-
ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
"qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
mcmd->vha->vp_idx, rc);
- mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ mcmd->flags |= QLA24XX_MGMT_LLD_OWNED;
+ mcmd->fc_tm_rsp = FCP_TMF_FAILED;
+ qlt_xmit_tm_rsp(mcmd);
}
}
@@ -2247,6 +2225,21 @@ void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
EXPORT_SYMBOL(qlt_free_mcmd);
/*
+ * If the upper layer knows about this mgmt cmd, then call its ->free_cmd()
+ * callback, which will eventually call qlt_free_mcmd(). Otherwise, call
+ * qlt_free_mcmd() directly.
+ */
+void qlt_free_ul_mcmd(struct qla_hw_data *ha, struct qla_tgt_mgmt_cmd *mcmd)
+{
+ if (!mcmd)
+ return;
+ if (mcmd->flags & QLA24XX_MGMT_LLD_OWNED)
+ qlt_free_mcmd(mcmd);
+ else
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+}
+
+/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then
* reacquire
*/
@@ -2338,12 +2331,12 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
"RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
vha->flags.online, qla2x00_reset_active(vha),
mcmd->reset_count, qpair->chip_reset);
- ha->tgt.tgt_ops->free_mcmd(mcmd);
+ qlt_free_ul_mcmd(ha, mcmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return;
}
- if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
+ if (mcmd->flags & QLA24XX_MGMT_SEND_NACK) {
switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) {
case ELS_LOGO:
case ELS_PRLO:
@@ -2376,7 +2369,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
* qlt_xmit_tm_rsp() returns here..
*/
if (free_mcmd)
- ha->tgt.tgt_ops->free_mcmd(mcmd);
+ qlt_free_ul_mcmd(ha, mcmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
}
@@ -2443,7 +2436,7 @@ out_err:
return -1;
}
-static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
+void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
{
struct qla_hw_data *ha;
struct qla_qpair *qpair;
@@ -3218,12 +3211,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
uint32_t full_req_cnt = 0;
unsigned long flags = 0;
int res;
-
- if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
- (cmd->sess && cmd->sess->deleted)) {
- cmd->state = QLA_TGT_STATE_PROCESSED;
- return 0;
- }
+ int pre_xmit_res;
ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
"is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
@@ -3231,33 +3219,43 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
&cmd->se_cmd, qpair->id);
- res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
+ pre_xmit_res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
&full_req_cnt);
- if (unlikely(res != 0)) {
- return res;
- }
+ /*
+ * Check pre_xmit_res later because we want to check other errors
+ * first.
+ */
+
+ /* Begin timer on the first call, not on SRR retry. */
+ if (likely(cmd->jiffies_at_hw_st_entry == 0))
+ cmd->jiffies_at_hw_st_entry = get_jiffies_64();
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ if (unlikely(cmd->sent_term_exchg ||
+ cmd->sess->deleted ||
+ !qpair->fw_started ||
+ cmd->reset_count != qpair->chip_reset)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe101,
+ "qla_target(%d): tag %lld: skipping send response for aborted cmd\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+ qlt_unmap_sg(vha, cmd);
+ cmd->state = QLA_TGT_STATE_PROCESSED;
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ return 0;
+ }
+
+ /* Check for errors from qlt_pre_xmit_response(). */
+ res = pre_xmit_res;
+ if (unlikely(res))
+ goto out_unmap_unlock;
+
if (xmit_type == QLA_TGT_XMIT_STATUS)
qpair->tgt_counters.core_qla_snd_status++;
else
qpair->tgt_counters.core_qla_que_buf++;
- if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
- /*
- * Either the port is not online or this request was from
- * previous life, just abort the processing.
- */
- cmd->state = QLA_TGT_STATE_PROCESSED;
- ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
- "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
- vha->flags.online, qla2x00_reset_active(vha),
- cmd->reset_count, qpair->chip_reset);
- res = 0;
- goto out_unmap_unlock;
- }
-
/* Does F/W have an IOCBs for this request */
res = qlt_check_reserve_free_req(qpair, full_req_cnt);
if (unlikely(res))
@@ -3372,36 +3370,50 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
struct qla_tgt_prm prm;
unsigned long flags = 0;
int res = 0;
+ int pci_map_res;
struct qla_qpair *qpair = cmd->qpair;
+ /* Begin timer on the first call, not on SRR retry. */
+ if (likely(cmd->jiffies_at_hw_st_entry == 0))
+ cmd->jiffies_at_hw_st_entry = get_jiffies_64();
+
memset(&prm, 0, sizeof(prm));
prm.cmd = cmd;
prm.tgt = tgt;
prm.sg = NULL;
prm.req_cnt = 1;
- if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
- (cmd->sess && cmd->sess->deleted)) {
- /*
- * Either the port is not online or this request was from
- * previous life, just abort the processing.
- */
+ /* Calculate number of entries and segments required */
+ pci_map_res = qlt_pci_map_calc_cnt(&prm);
+ /*
+ * Check pci_map_res later because we want to check other errors first.
+ */
+
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+
+ if (unlikely(cmd->sent_term_exchg ||
+ cmd->sess->deleted ||
+ !qpair->fw_started ||
+ cmd->reset_count != qpair->chip_reset)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe102,
+ "qla_target(%d): tag %lld: skipping data-out for aborted cmd\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+ qlt_unmap_sg(vha, cmd);
cmd->aborted = 1;
cmd->write_data_transferred = 0;
cmd->state = QLA_TGT_STATE_DATA_IN;
+ cmd->jiffies_at_hw_st_entry = 0;
vha->hw->tgt.tgt_ops->handle_data(cmd);
- ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
- "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
- vha->flags.online, qla2x00_reset_active(vha),
- cmd->reset_count, qpair->chip_reset);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return 0;
}
- /* Calculate number of entries and segments required */
- if (qlt_pci_map_calc_cnt(&prm) != 0)
- return -EAGAIN;
+ /* Check for errors from qlt_pci_map_calc_cnt(). */
+ if (unlikely(pci_map_res != 0)) {
+ res = -EAGAIN;
+ goto out_unlock_free_unmap;
+ }
- spin_lock_irqsave(qpair->qp_lock_ptr, flags);
/* Does F/W have an IOCBs for this request */
res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
if (res != 0)
@@ -3438,6 +3450,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
return res;
out_unlock_free_unmap:
+ cmd->jiffies_at_hw_st_entry = 0;
qlt_unmap_sg(vha, cmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
@@ -3457,7 +3470,6 @@ qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
uint8_t *ep = &sts->expected_dif[0];
uint64_t lba = cmd->se_cmd.t_task_lba;
uint8_t scsi_status, sense_key, asc, ascq;
- unsigned long flags;
struct scsi_qla_host *vha = cmd->vha;
cmd->trc_flags |= TRC_DIF_ERR;
@@ -3528,16 +3540,14 @@ out:
case QLA_TGT_STATE_NEED_DATA:
/* handle_data will load DIF error code */
cmd->state = QLA_TGT_STATE_DATA_IN;
+ cmd->jiffies_at_hw_st_entry = 0;
vha->hw->tgt.tgt_ops->handle_data(cmd);
break;
default:
- spin_lock_irqsave(&cmd->cmd_lock, flags);
- if (cmd->aborted) {
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ if (cmd->sent_term_exchg) {
vha->hw->tgt.tgt_ops->free_cmd(cmd);
break;
}
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
ascq);
@@ -3611,6 +3621,62 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
}
/*
+ * Handle a SRR that had been previously associated with a command when the
+ * command has been aborted or otherwise cannot process the SRR.
+ *
+ * If reject is true, then attempt to reject the SRR. Otherwise abort the
+ * immediate notify exchange.
+ */
+void qlt_srr_abort(struct qla_tgt_cmd *cmd, bool reject)
+{
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_tgt_srr *srr = cmd->srr;
+
+ if (srr->imm_ntfy_recvd) {
+ if (reject)
+ srr->reject = true;
+ else
+ srr->aborted = true;
+
+ if (srr->ctio_recvd) {
+ /*
+ * The SRR should already be scheduled for processing,
+ * and the SRR processing code should see that the cmd
+ * has been aborted and take appropriate action. In
+ * addition, the cmd refcount should have been
+ * incremented, preventing the cmd from being freed
+ * until SRR processing is done.
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102e,
+ "qla_target(%d): tag %lld: %s: SRR already scheduled\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__);
+ } else {
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ unsigned long flags;
+
+ /* Shedule processing for the SRR immediate notify. */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102f,
+ "qla_target(%d): tag %lld: %s: schedule SRR %s\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__,
+ reject ? "reject" : "abort");
+ cmd->srr = NULL;
+ srr->cmd = NULL;
+ spin_lock_irqsave(&tgt->srr_lock, flags);
+ list_add_tail(&srr->srr_list_entry, &tgt->srr_list);
+ queue_work(qla_tgt_wq, &tgt->srr_work);
+ spin_unlock_irqrestore(&tgt->srr_lock, flags);
+ }
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11030,
+ "qla_target(%d): tag %lld: %s: no IMM SRR; free SRR\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__);
+ cmd->srr = NULL;
+ kfree(srr);
+ }
+}
+EXPORT_SYMBOL(qlt_srr_abort);
+
+/*
* If hardware_lock held on entry, might drop it, then reaquire
* This function sends the appropriate CTIO to ISP 2xxx or 24xx
*/
@@ -3618,43 +3684,61 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair,
struct qla_tgt_cmd *cmd,
struct atio_from_isp *atio)
{
- struct scsi_qla_host *vha = qpair->vha;
struct ctio7_to_24xx *ctio24;
- struct qla_hw_data *ha = vha->hw;
- request_t *pkt;
- int ret = 0;
+ struct scsi_qla_host *vha;
+ uint16_t loop_id;
uint16_t temp;
- ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
-
- if (cmd)
+ if (cmd) {
vha = cmd->vha;
+ loop_id = cmd->loop_id;
+ } else {
+ port_id_t id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
+ struct qla_hw_data *ha;
+ struct fc_port *sess;
+ unsigned long flags;
- pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
- if (pkt == NULL) {
+ vha = qpair->vha;
+ ha = vha->hw;
+
+ /*
+ * CTIO7_NHANDLE_UNRECOGNIZED works when aborting an idle
+ * command but not when aborting a command with an active CTIO
+ * exchange.
+ */
+ loop_id = CTIO7_NHANDLE_UNRECOGNIZED;
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
+ if (sess)
+ loop_id = sess->loop_id;
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ }
+
+ if (cmd) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe009,
+ "qla_target(%d): tag %lld: Sending TERM EXCH CTIO state %d cmd_sent_to_fw %u\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->state,
+ cmd->cmd_sent_to_fw);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe009,
+ "qla_target(%d): tag %u: Sending TERM EXCH CTIO (no cmd)\n",
+ vha->vp_idx, le32_to_cpu(atio->u.isp24.exchange_addr));
+ }
+
+ ctio24 = qla2x00_alloc_iocbs_ready(qpair, NULL);
+ if (!ctio24) {
ql_dbg(ql_dbg_tgt, vha, 0xe050,
"qla_target(%d): %s failed: unable to allocate "
"request packet\n", vha->vp_idx, __func__);
return -ENOMEM;
}
- if (cmd != NULL) {
- if (cmd->state < QLA_TGT_STATE_PROCESSED) {
- ql_dbg(ql_dbg_tgt, vha, 0xe051,
- "qla_target(%d): Terminating cmd %p with "
- "incorrect state %d\n", vha->vp_idx, cmd,
- cmd->state);
- } else
- ret = 1;
- }
-
qpair->tgt_counters.num_term_xchg_sent++;
- pkt->entry_count = 1;
- pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
- ctio24 = (struct ctio7_to_24xx *)pkt;
ctio24->entry_type = CTIO_TYPE7;
- ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED);
+ ctio24->entry_count = 1;
+ ctio24->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ ctio24->nport_handle = cpu_to_le16(loop_id);
ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio24->vp_index = vha->vp_idx;
ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -3671,12 +3755,25 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair,
qpair->reqq_start_iocbs(qpair);
else
qla2x00_start_iocbs(vha, qpair->req);
- return ret;
+ return 0;
}
-static void qlt_send_term_exchange(struct qla_qpair *qpair,
- struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
- int ul_abort)
+/*
+ * Aborting a command that is active in the FW (i.e. cmd->cmd_sent_to_fw == 1)
+ * will usually trigger the FW to send a completion CTIO with error status,
+ * and the driver will then call the ->handle_data() or ->free_cmd() callbacks.
+ * This can be used to clear a command that is locked up in the FW unless there
+ * is something more seriously wrong.
+ *
+ * Aborting a command that is not active in the FW (i.e.
+ * cmd->cmd_sent_to_fw == 0) will not directly trigger any callbacks. Instead,
+ * when the target mode midlevel calls qlt_rdy_to_xfer() or
+ * qlt_xmit_response(), the driver will see that the cmd has been aborted and
+ * call the appropriate callback immediately without performing the requested
+ * operation.
+ */
+void qlt_send_term_exchange(struct qla_qpair *qpair,
+ struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
{
struct scsi_qla_host *vha;
unsigned long flags = 0;
@@ -3700,10 +3797,14 @@ static void qlt_send_term_exchange(struct qla_qpair *qpair,
qlt_alloc_qfull_cmd(vha, atio, 0, 0);
done:
- if (cmd && !ul_abort && !cmd->aborted) {
- if (cmd->sg_mapped)
- qlt_unmap_sg(vha, cmd);
- vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ if (cmd) {
+ /*
+ * Set this even if -ENOMEM above, since term exchange will be
+ * sent eventually...
+ */
+ cmd->sent_term_exchg = 1;
+ cmd->aborted = 1;
+ cmd->jiffies_at_term_exchg = jiffies;
}
if (!ha_locked)
@@ -3711,6 +3812,7 @@ done:
return;
}
+EXPORT_SYMBOL(qlt_send_term_exchange);
static void qlt_init_term_exchange(struct scsi_qla_host *vha)
{
@@ -3761,38 +3863,35 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
{
- struct qla_tgt *tgt = cmd->tgt;
- struct scsi_qla_host *vha = tgt->vha;
- struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_qpair *qpair = cmd->qpair;
unsigned long flags;
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
- "qla_target(%d): terminating exchange for aborted cmd=%p "
- "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
- se_cmd->tag);
-
- spin_lock_irqsave(&cmd->cmd_lock, flags);
- if (cmd->aborted) {
- if (cmd->sg_mapped)
- qlt_unmap_sg(vha, cmd);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- /*
- * It's normal to see 2 calls in this path:
- * 1) XFER Rdy completion + CMD_T_ABORT
- * 2) TCM TMR - drain_state_list
- */
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
- "multiple abort. %p transport_state %x, t_state %x, "
- "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
- cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
- return -EIO;
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
+ "qla_target(%d): tag %lld: cmd being aborted (state %d) %s; %s\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->state,
+ cmd->cmd_sent_to_fw ? "sent to fw" : "not sent to fw",
+ cmd->aborted ? "aborted" : "not aborted");
+
+ if (cmd->state != QLA_TGT_STATE_DONE && !cmd->sent_term_exchg) {
+ if (!qpair->fw_started ||
+ cmd->reset_count != qpair->chip_reset) {
+ /*
+ * Chip was reset; just pretend that we sent the term
+ * exchange.
+ */
+ cmd->sent_term_exchg = 1;
+ cmd->aborted = 1;
+ cmd->jiffies_at_term_exchg = jiffies;
+ } else {
+ qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1);
+ }
}
- cmd->aborted = 1;
- cmd->trc_flags |= TRC_ABORT;
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
return 0;
}
EXPORT_SYMBOL(qlt_abort_cmd);
@@ -3812,54 +3911,99 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
qlt_decr_num_pend_cmds(cmd->vha);
BUG_ON(cmd->sg_mapped);
+ if (unlikely(cmd->free_sg)) {
+ cmd->free_sg = 0;
+ qlt_free_sg(cmd);
+ }
+ if (unlikely(cmd->srr))
+ qlt_srr_abort(cmd, false);
+
+ if (unlikely(cmd->aborted ||
+ (cmd->trc_flags & (TRC_CTIO_STRANGE | TRC_CTIO_ERR |
+ TRC_SRR_CTIO | TRC_SRR_IMM)))) {
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xe086,
+ "qla_target(%d): tag %lld: free cmd (trc_flags %x, aborted %u, sent_term_exchg %u, rsp_sent %u)\n",
+ cmd->vha->vp_idx, cmd->se_cmd.tag,
+ cmd->trc_flags, cmd->aborted, cmd->sent_term_exchg,
+ cmd->rsp_sent);
+ }
+
+ if (unlikely(cmd->cdb != &cmd->atio.u.isp24.fcp_cmnd.cdb[0])) {
+ kfree(cmd->cdb);
+ cmd->cdb = &cmd->atio.u.isp24.fcp_cmnd.cdb[0];
+ cmd->cdb_len = 16;
+ }
+
cmd->jiffies_at_free = get_jiffies_64();
if (!sess || !sess->se_sess) {
WARN_ON(1);
return;
}
- cmd->jiffies_at_free = get_jiffies_64();
cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
}
EXPORT_SYMBOL(qlt_free_cmd);
/*
- * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ * Process a CTIO response for a SCSI command that failed due to SRR.
+ *
+ * qpair->qp_lock_ptr supposed to be held on entry
*/
-static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
- struct qla_tgt_cmd *cmd, uint32_t status)
+static int qlt_prepare_srr_ctio(struct qla_qpair *qpair,
+ struct qla_tgt_cmd *cmd)
{
- int term = 0;
- struct scsi_qla_host *vha = qpair->vha;
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_tgt_srr *srr;
- if (cmd->se_cmd.prot_op)
- ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
- "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
- "se_cmd=%p tag[%x] op %#x/%s",
- cmd->lba, cmd->lba,
- cmd->num_blks, &cmd->se_cmd,
- cmd->atio.u.isp24.exchange_addr,
- cmd->se_cmd.prot_op,
- prot_op_str(cmd->se_cmd.prot_op));
-
- if (ctio != NULL) {
- struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
-
- term = !(c->flags &
- cpu_to_le16(OF_TERM_EXCH));
- } else
- term = 1;
+ cmd->trc_flags |= TRC_SRR_CTIO;
- if (term)
- qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
+ srr = cmd->srr;
+ if (srr != NULL) {
+ /* qlt_prepare_srr_imm() was called first. */
- return term;
-}
+ WARN_ON(srr->ctio_recvd);
+ WARN_ON(!srr->imm_ntfy_recvd);
+ if (vha->hw->tgt.tgt_ops->get_cmd_ref(cmd)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11037,
+ "qla_target(%d): tag %lld: unable to get cmd ref for SRR processing\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+ qlt_srr_abort(cmd, true);
+ return -ESHUTDOWN;
+ }
+
+ srr->ctio_recvd = true;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100f,
+ "qla_target(%d): tag %lld: Scheduling SRR work\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+
+ /* Schedule the srr for processing in qlt_handle_srr(). */
+ /* IRQ is already OFF */
+ spin_lock(&tgt->srr_lock);
+ list_add_tail(&srr->srr_list_entry, &tgt->srr_list);
+ queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &tgt->srr_work);
+ spin_unlock(&tgt->srr_lock);
+ return 0;
+ }
+
+ srr = kzalloc(sizeof(*srr), GFP_ATOMIC);
+ if (!srr)
+ return -ENOMEM;
+
+ /* Expect qlt_prepare_srr_imm() to be called. */
+ srr->ctio_recvd = true;
+ srr->cmd = cmd;
+ srr->reset_count = cmd->reset_count;
+ cmd->srr = srr;
+ return 0;
+}
/* ha->hardware_lock supposed to be held on entry */
static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
- struct rsp_que *rsp, uint32_t handle, void *ctio)
+ struct rsp_que *rsp, uint32_t handle, uint8_t cmd_type,
+ const void *ctio)
{
void *cmd = NULL;
struct req_que *req;
@@ -3882,29 +4026,97 @@ static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
h &= QLA_CMD_HANDLE_MASK;
- if (h != QLA_TGT_NULL_HANDLE) {
- if (unlikely(h >= req->num_outstanding_cmds)) {
- ql_dbg(ql_dbg_tgt, vha, 0xe052,
- "qla_target(%d): Wrong handle %x received\n",
- vha->vp_idx, handle);
- return NULL;
- }
-
- cmd = req->outstanding_cmds[h];
- if (unlikely(cmd == NULL)) {
- ql_dbg(ql_dbg_async, vha, 0xe053,
- "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
- vha->vp_idx, handle, req->id, rsp->id);
- return NULL;
- }
- req->outstanding_cmds[h] = NULL;
- } else if (ctio != NULL) {
+ if (h == QLA_TGT_NULL_HANDLE) {
/* We can't get loop ID from CTIO7 */
ql_dbg(ql_dbg_tgt, vha, 0xe054,
"qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
"support NULL handles\n", vha->vp_idx);
return NULL;
}
+ if (unlikely(h >= req->num_outstanding_cmds)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe052,
+ "qla_target(%d): Wrong handle %x received\n",
+ vha->vp_idx, handle);
+ return NULL;
+ }
+
+ /*
+ * We passed a numeric handle for a cmd to the hardware, and the
+ * hardware passed the handle back to us. Look up the associated cmd,
+ * and validate that the cmd_type and exchange address match what the
+ * caller expects. This guards against buggy HBA firmware that returns
+ * the same CTIO multiple times.
+ */
+
+ cmd = req->outstanding_cmds[h];
+
+ if (unlikely(cmd == NULL)) {
+ if (cmd_type == TYPE_TGT_CMD) {
+ __le32 ctio_exchange_addr =
+ ((const struct ctio7_from_24xx *)ctio)->
+ exchange_address;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe053,
+ "qla_target(%d): tag %u: handle %x: cmd detached; ignoring CTIO (handle %x req->id %d rsp->id %d)\n",
+ vha->vp_idx, le32_to_cpu(ctio_exchange_addr), h,
+ handle, req->id, rsp->id);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe053,
+ "qla_target(%d): cmd detached; ignoring CTIO (handle %x req->id %d rsp->id %d)\n",
+ vha->vp_idx, handle, req->id, rsp->id);
+ }
+ return NULL;
+ }
+
+ if (unlikely(((srb_t *)cmd)->cmd_type != cmd_type)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe087,
+ "qla_target(%d): handle %x: cmd detached; ignoring CTIO (cmd_type mismatch)\n",
+ vha->vp_idx, h);
+ return NULL;
+ }
+
+ switch (cmd_type) {
+ case TYPE_TGT_CMD: {
+ __le32 ctio_exchange_addr =
+ ((const struct ctio7_from_24xx *)ctio)->
+ exchange_address;
+ __le32 cmd_exchange_addr =
+ ((struct qla_tgt_cmd *)cmd)->
+ atio.u.isp24.exchange_addr;
+
+ BUILD_BUG_ON(offsetof(struct ctio7_from_24xx,
+ exchange_address) !=
+ offsetof(struct ctio_crc_from_fw,
+ exchange_address));
+
+ if (unlikely(ctio_exchange_addr != cmd_exchange_addr)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe088,
+ "qla_target(%d): tag %u: handle %x: cmd detached; ignoring CTIO (exchange address mismatch)\n",
+ vha->vp_idx, le32_to_cpu(ctio_exchange_addr), h);
+ return NULL;
+ }
+ break;
+ }
+
+ case TYPE_TGT_TMCMD: {
+ __le32 ctio_exchange_addr =
+ ((const struct abts_resp_from_24xx_fw *)ctio)->
+ exchange_address;
+ __le32 cmd_exchange_addr =
+ ((struct qla_tgt_mgmt_cmd *)cmd)->
+ orig_iocb.abts.exchange_address;
+
+ if (unlikely(ctio_exchange_addr != cmd_exchange_addr)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe089,
+ "qla_target(%d): ABTS: handle %x: cmd detached; ignoring CTIO (exchange address mismatch)\n",
+ vha->vp_idx, h);
+ return NULL;
+ }
+ break;
+ }
+ }
+
+ req->outstanding_cmds[h] = NULL;
return cmd;
}
@@ -3913,12 +4125,13 @@ static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
- struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio)
+ struct rsp_que *rsp, uint32_t handle, uint32_t status,
+ struct ctio7_from_24xx *ctio)
{
struct qla_hw_data *ha = vha->hw;
- struct se_cmd *se_cmd;
struct qla_tgt_cmd *cmd;
struct qla_qpair *qpair = rsp->qpair;
+ uint16_t ctio_flags;
if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
/* That could happen only in case of an error/reset/abort */
@@ -3930,45 +4143,92 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
return;
}
- cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
- if (cmd == NULL)
- return;
+ ctio_flags = le16_to_cpu(ctio->flags);
+
+ cmd = qlt_ctio_to_cmd(vha, rsp, handle, TYPE_TGT_CMD, ctio);
+ if (unlikely(cmd == NULL)) {
+ if ((handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE &&
+ (ctio_flags & 0xe1ff) == (CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_TERMINATE)) {
+ u32 tag = le32_to_cpu(ctio->exchange_address);
- if ((le16_to_cpu(((struct ctio7_from_24xx *)ctio)->flags) & CTIO7_FLAGS_DATA_OUT) &&
- cmd->sess) {
- qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess,
- (struct ctio7_from_24xx *)ctio);
+ if (status == CTIO_SUCCESS)
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe083,
+ "qla_target(%d): tag %u: term exchange successful\n",
+ vha->vp_idx, tag);
+ else
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe084,
+ "qla_target(%d): tag %u: term exchange failed; status = 0x%x\n",
+ vha->vp_idx, tag, status);
+ }
+ return;
}
- se_cmd = &cmd->se_cmd;
+ if ((ctio_flags & CTIO7_FLAGS_DATA_OUT) && cmd->sess)
+ qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess, ctio);
+
cmd->cmd_sent_to_fw = 0;
qlt_unmap_sg(vha, cmd);
if (unlikely(status != CTIO_SUCCESS)) {
+ u8 op = cmd->cdb ? cmd->cdb[0] : 0;
+ bool term_exchg = false;
+
+ /*
+ * If the hardware terminated the exchange, then we don't need
+ * to send an explicit term exchange message.
+ */
+ if (ctio_flags & OF_TERM_EXCH) {
+ cmd->sent_term_exchg = 1;
+ cmd->aborted = 1;
+ cmd->jiffies_at_term_exchg = jiffies;
+ }
+
switch (status & 0xFFFF) {
case CTIO_INVALID_RX_ID:
+ term_exchg = true;
if (printk_ratelimit())
dev_info(&vha->hw->pdev->dev,
- "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
- vha->vp_idx, cmd->atio.u.isp24.attr,
+ "qla_target(%d): tag %lld, op %x: CTIO with INVALID_RX_ID status 0x%x received (state %d, port %8phC, LUN %lld, ATIO attr %x, CTIO Flags %x|%x)\n",
+ vha->vp_idx, cmd->se_cmd.tag, op,
+ status, cmd->state, cmd->sess->port_name,
+ cmd->unpacked_lun, cmd->atio.u.isp24.attr,
((cmd->ctio_flags >> 9) & 0xf),
cmd->ctio_flags);
-
break;
+
case CTIO_LIP_RESET:
case CTIO_TARGET_RESET:
case CTIO_ABORTED:
- /* driver request abort via Terminate exchange */
+ term_exchg = true;
+ fallthrough;
case CTIO_TIMEOUT:
- /* They are OK */
+ {
+ const char *status_str;
+
+ switch (status & 0xFFFF) {
+ case CTIO_LIP_RESET:
+ status_str = "LIP_RESET";
+ break;
+ case CTIO_TARGET_RESET:
+ status_str = "TARGET_RESET";
+ break;
+ case CTIO_ABORTED:
+ status_str = "ABORTED";
+ break;
+ case CTIO_TIMEOUT:
+ default:
+ status_str = "TIMEOUT";
+ break;
+ }
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
- "qla_target(%d): CTIO with "
- "status %#x received, state %x, se_cmd %p, "
- "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
- "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
- status, cmd->state, se_cmd);
+ "qla_target(%d): tag %lld, op %x: CTIO with %s status 0x%x received (state %d, port %8phC, LUN %lld)\n",
+ vha->vp_idx, cmd->se_cmd.tag, op,
+ status_str, status, cmd->state,
+ cmd->sess->port_name, cmd->unpacked_lun);
break;
+ }
case CTIO_PORT_LOGGED_OUT:
case CTIO_PORT_UNAVAILABLE:
@@ -3977,11 +4237,13 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
(status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
- "qla_target(%d): CTIO with %s status %x "
- "received (state %x, se_cmd %p)\n", vha->vp_idx,
+ "qla_target(%d): tag %lld, op %x: CTIO with %s status 0x%x received (state %d, port %8phC, LUN %lld)\n",
+ vha->vp_idx, cmd->se_cmd.tag, op,
logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
- status, cmd->state, se_cmd);
+ status, cmd->state, cmd->sess->port_name,
+ cmd->unpacked_lun);
+ term_exchg = true;
if (logged_out && cmd->sess) {
/*
* Session is already logged out, but we need
@@ -3996,18 +4258,30 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
}
break;
}
+
+ case CTIO_SRR_RECEIVED:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100e,
+ "qla_target(%d): tag %lld, op %x: CTIO with SRR status 0x%x received (state %d, port %8phC, LUN %lld, bufflen %d)\n",
+ vha->vp_idx, cmd->se_cmd.tag, op, status,
+ cmd->state, cmd->sess->port_name,
+ cmd->unpacked_lun, cmd->bufflen);
+
+ if (qlt_prepare_srr_ctio(qpair, cmd) == 0)
+ return;
+ break;
+
case CTIO_DIF_ERROR: {
struct ctio_crc_from_fw *crc =
(struct ctio_crc_from_fw *)ctio;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
- "qla_target(%d): CTIO with DIF_ERROR status %x "
- "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
- "expect_dif[0x%llx]\n",
- vha->vp_idx, status, cmd->state, se_cmd,
+ "qla_target(%d): tag %lld, op %x: CTIO with DIF_ERROR status 0x%x received (state %d, port %8phC, LUN %lld, actual_dif[0x%llx] expect_dif[0x%llx])\n",
+ vha->vp_idx, cmd->se_cmd.tag, op, status,
+ cmd->state, cmd->sess->port_name,
+ cmd->unpacked_lun,
*((u64 *)&crc->actual_dif[0]),
*((u64 *)&crc->expected_dif[0]));
- qlt_handle_dif_error(qpair, cmd, ctio);
+ qlt_handle_dif_error(qpair, cmd, crc);
return;
}
@@ -4016,51 +4290,72 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
case CTIO_FAST_INVALID_REQ:
case CTIO_FAST_SPI_ERR:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
- "qla_target(%d): CTIO with EDIF error status 0x%x received (state %x, se_cmd %p\n",
- vha->vp_idx, status, cmd->state, se_cmd);
+ "qla_target(%d): tag %lld, op %x: CTIO with EDIF error status 0x%x received (state %d, port %8phC, LUN %lld)\n",
+ vha->vp_idx, cmd->se_cmd.tag, op, status,
+ cmd->state, cmd->sess->port_name,
+ cmd->unpacked_lun);
break;
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
- "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
- vha->vp_idx, status, cmd->state, se_cmd);
+ "qla_target(%d): tag %lld, op %x: CTIO with error status 0x%x received (state %d, port %8phC, LUN %lld)\n",
+ vha->vp_idx, cmd->se_cmd.tag, op, status,
+ cmd->state, cmd->sess->port_name,
+ cmd->unpacked_lun);
break;
}
+ cmd->trc_flags |= TRC_CTIO_ERR;
- /* "cmd->aborted" means
- * cmd is already aborted/terminated, we don't
- * need to terminate again. The exchange is already
- * cleaned up/freed at FW level. Just cleanup at driver
- * level.
+ /*
+ * In state QLA_TGT_STATE_NEED_DATA the failed CTIO was for
+ * Data-Out, so either abort the exchange or try sending check
+ * condition with sense data depending on the severity of
+ * the error. In state QLA_TGT_STATE_PROCESSED the failed CTIO
+ * was for status (and possibly Data-In), so don't try sending
+ * an error status again in that case (if the error was for
+ * Data-In with status, we could try sending status without
+ * Data-In, but we don't do that currently).
*/
- if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
- (!cmd->aborted)) {
- cmd->trc_flags |= TRC_CTIO_ERR;
- if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
- return;
- }
+ if (!cmd->sent_term_exchg &&
+ (term_exchg || cmd->state != QLA_TGT_STATE_NEED_DATA))
+ qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1);
+ }
+
+ if (unlikely(cmd->srr != NULL)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11031,
+ "qla_target(%d): tag %lld, op %x: expected CTIO with SRR status; got status 0x%x: state %d, bufflen %d\n",
+ vha->vp_idx, cmd->se_cmd.tag,
+ cmd->cdb ? cmd->cdb[0] : 0, status, cmd->state,
+ cmd->bufflen);
+ qlt_srr_abort(cmd, true);
}
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
cmd->trc_flags |= TRC_CTIO_DONE;
+
+ if (likely(status == CTIO_SUCCESS))
+ cmd->rsp_sent = 1;
+
} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
cmd->state = QLA_TGT_STATE_DATA_IN;
if (status == CTIO_SUCCESS)
cmd->write_data_transferred = 1;
+ cmd->jiffies_at_hw_st_entry = 0;
ha->tgt.tgt_ops->handle_data(cmd);
return;
} else if (cmd->aborted) {
cmd->trc_flags |= TRC_CTIO_ABORTED;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
- "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
+ "qla_target(%d): tag %lld: Aborted command finished\n",
+ vha->vp_idx, cmd->se_cmd.tag);
} else {
cmd->trc_flags |= TRC_CTIO_STRANGE;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
- "qla_target(%d): A command in state (%d) should "
- "not return a CTIO complete\n", vha->vp_idx, cmd->state);
+ "qla_target(%d): tag %lld: A command in state (%d) should not return a CTIO complete\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->state);
}
if (unlikely(status != CTIO_SUCCESS) &&
@@ -4113,7 +4408,6 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
struct qla_hw_data *ha = vha->hw;
struct fc_port *sess = cmd->sess;
struct atio_from_isp *atio = &cmd->atio;
- unsigned char *cdb;
unsigned long flags;
uint32_t data_length;
int ret, fcp_task_attr, data_dir, bidi = 0;
@@ -4129,8 +4423,6 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
goto out_term;
}
- spin_lock_init(&cmd->cmd_lock);
- cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr);
if (atio->u.isp24.fcp_cmnd.rddata &&
@@ -4148,7 +4440,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
atio->u.isp24.fcp_cmnd.task_attr);
data_length = get_datalen_for_atio(atio);
- ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
+ ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cmd->cdb, data_length,
fcp_task_attr, data_dir, bidi);
if (ret != 0)
goto out_term;
@@ -4166,9 +4458,14 @@ out_term:
*/
cmd->trc_flags |= TRC_DO_WORK_ERR;
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
+ qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1);
qlt_decr_num_pend_cmds(vha);
+ if (unlikely(cmd->cdb != &cmd->atio.u.isp24.fcp_cmnd.cdb[0])) {
+ kfree(cmd->cdb);
+ cmd->cdb = &cmd->atio.u.isp24.fcp_cmnd.cdb[0];
+ cmd->cdb_len = 16;
+ }
cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
@@ -4292,18 +4589,43 @@ out:
cmd->se_cmd.cpuid = h->cpuid;
}
+/*
+ * Safely make a fixed-length copy of a variable-length atio by truncating the
+ * CDB if necessary.
+ */
+static void memcpy_atio(struct atio_from_isp *dst,
+ const struct atio_from_isp *src)
+{
+ int len;
+
+ memcpy(dst, src, sizeof(*dst));
+
+ /*
+ * If the CDB was truncated, prevent get_datalen_for_atio() from
+ * accessing invalid memory.
+ */
+ len = src->u.isp24.fcp_cmnd.add_cdb_len;
+ if (unlikely(len != 0)) {
+ dst->u.isp24.fcp_cmnd.add_cdb_len = 0;
+ memcpy(&dst->u.isp24.fcp_cmnd.add_cdb[0],
+ &src->u.isp24.fcp_cmnd.add_cdb[len * 4],
+ 4);
+ }
+}
+
static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
struct fc_port *sess,
struct atio_from_isp *atio)
{
struct qla_tgt_cmd *cmd;
+ int add_cdb_len;
cmd = vha->hw->tgt.tgt_ops->get_cmd(sess);
if (!cmd)
return NULL;
cmd->cmd_type = TYPE_TGT_CMD;
- memcpy(&cmd->atio, atio, sizeof(*atio));
+ memcpy_atio(&cmd->atio, atio);
INIT_LIST_HEAD(&cmd->sess_cmd_list);
cmd->state = QLA_TGT_STATE_NEW;
cmd->tgt = vha->vha_tgt.qla_tgt;
@@ -4323,6 +4645,29 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
cmd->vp_idx = vha->vp_idx;
cmd->edif = sess->edif.enable;
+ cmd->cdb = &cmd->atio.u.isp24.fcp_cmnd.cdb[0];
+ cmd->cdb_len = 16;
+
+ /*
+ * NOTE: memcpy_atio() set cmd->atio.u.isp24.fcp_cmnd.add_cdb_len to 0,
+ * so use the original value here.
+ */
+ add_cdb_len = atio->u.isp24.fcp_cmnd.add_cdb_len;
+ if (unlikely(add_cdb_len != 0)) {
+ int cdb_len = 16 + add_cdb_len * 4;
+ u8 *cdb;
+
+ cdb = kmalloc(cdb_len, GFP_ATOMIC);
+ if (unlikely(!cdb)) {
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ return NULL;
+ }
+ /* CAUTION: copy CDB from atio not cmd->atio */
+ memcpy(cdb, atio->u.isp24.fcp_cmnd.cdb, cdb_len);
+ cmd->cdb = cdb;
+ cmd->cdb_len = cdb_len;
+ }
+
return cmd;
}
@@ -4900,6 +5245,863 @@ out:
}
/*
+ * Return true if the HBA firmware version is known to have bugs that
+ * prevent Sequence Level Error Recovery (SLER) / Sequence Retransmission
+ * Request (SRR) from working.
+ *
+ * Some bad versions are based on testing and some are based on "Marvell Fibre
+ * Channel Firmware Release Notes".
+ */
+static bool qlt_has_sler_fw_bug(struct qla_hw_data *ha)
+{
+ bool has_sler_fw_bug = false;
+
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ /*
+ * In the fw release notes:
+ * ER147301 was added to v9.05.00 causing SLER regressions
+ * FCD-259 was fixed in v9.08.00
+ * FCD-371 was fixed in v9.08.00
+ * FCD-1183 was fixed in v9.09.00
+ *
+ * QLE2694L (ISP2071) known bad firmware (tested):
+ * 9.06.02
+ * 9.07.00
+ * 9.08.02
+ * SRRs trigger hundreds of bogus entries in the response
+ * queue and various other problems.
+ *
+ * QLE2694L known good firmware (tested):
+ * 8.08.05
+ * 9.09.00
+ *
+ * Suspected bad firmware (not confirmed by testing):
+ * v9.05.xx
+ *
+ * unknown firmware:
+ * 9.00.00 - 9.04.xx
+ */
+ if (ha->fw_major_version == 9 &&
+ ha->fw_minor_version >= 5 &&
+ ha->fw_minor_version <= 8)
+ has_sler_fw_bug = true;
+ }
+
+ return has_sler_fw_bug;
+}
+
+/*
+ * Return true and print a message if the HA has been reset since the SRR
+ * immediate notify was received; else return false.
+ */
+static bool qlt_srr_is_chip_reset(struct scsi_qla_host *vha,
+ struct qla_qpair *qpair, struct qla_tgt_srr *srr)
+{
+ if (!vha->flags.online ||
+ !qpair->fw_started ||
+ srr->reset_count != qpair->chip_reset) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100d,
+ "qla_target(%d): chip reset; discarding IMM SRR\n",
+ vha->vp_idx);
+ return true;
+ }
+ return false;
+}
+
+/* Find and return the command associated with a SRR immediate notify. */
+static struct qla_tgt_cmd *qlt_srr_to_cmd(struct scsi_qla_host *vha,
+ const struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct fc_port *sess;
+ struct qla_tgt_cmd *cmd;
+ uint32_t tag = le32_to_cpu(iocb->u.isp24.exchange_address);
+ uint16_t loop_id;
+ be_id_t s_id;
+ unsigned long flags;
+
+ if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11009,
+ "qla_target(%d): IMM SRR with unknown exchange address; reject SRR\n",
+ vha->vp_idx);
+ return NULL;
+ }
+
+ loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
+
+ s_id.domain = iocb->u.isp24.port_id[2];
+ s_id.area = iocb->u.isp24.port_id[1];
+ s_id.al_pa = iocb->u.isp24.port_id[0];
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+ if (!sess)
+ sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+ if (!sess || sess->deleted) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100a,
+ "qla_target(%d): could not find session for IMM SRR; reject SRR\n",
+ vha->vp_idx);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return NULL;
+ }
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess, tag);
+ if (!cmd) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100b,
+ "qla_target(%d): could not find cmd for IMM SRR; reject SRR\n",
+ vha->vp_idx);
+ } else {
+ u16 srr_ox_id = le16_to_cpu(iocb->u.isp24.srr_ox_id);
+ u16 cmd_ox_id = be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id);
+
+ if (srr_ox_id != cmd_ox_id) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100c,
+ "qla_target(%d): tag %lld: IMM SRR: srr_ox_id[%04x] != cmd_ox_id[%04x]; reject SRR\n",
+ vha->vp_idx, cmd->se_cmd.tag,
+ srr_ox_id, cmd_ox_id);
+ cmd = NULL;
+ }
+ }
+
+ return cmd;
+}
+
+/*
+ * Handle an immediate notify SRR (Sequence Retransmission Request) message from
+ * the hardware. The hardware will also send a CTIO with CTIO_SRR_RECEIVED status
+ * for the affected command.
+ *
+ * This may be called a second time for the same immediate notify SRR if
+ * CTIO_SRR_RECEIVED is never received and qlt_srr_abort() is called.
+ *
+ * Process context, no locks
+ */
+static void qlt_handle_srr_imm(struct scsi_qla_host *vha,
+ struct qla_tgt_srr *srr)
+{
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair;
+ struct qla_tgt_cmd *cmd;
+ uint8_t srr_explain = NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL;
+
+ /* handle qlt_srr_abort() */
+ if (srr->aborted) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11004,
+ "qla_target(%d): IMM SRR: terminating SRR for aborted cmd\n",
+ vha->vp_idx);
+ spin_lock_irq(&ha->hardware_lock);
+ if (!qlt_srr_is_chip_reset(vha, ha->base_qpair, srr))
+ qlt_send_term_imm_notif(vha, &srr->imm_ntfy, 1);
+ spin_unlock_irq(&ha->hardware_lock);
+ kfree(srr);
+ return;
+ }
+ if (srr->reject) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11005,
+ "qla_target(%d): IMM SRR: rejecting SRR for unknown cmd\n",
+ vha->vp_idx);
+ goto out_reject;
+ }
+
+ /* Find the command associated with the SRR. */
+ cmd = qlt_srr_to_cmd(vha, &srr->imm_ntfy);
+ if (cmd == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11005,
+ "qla_target(%d): IMM SRR: rejecting SRR for unknown cmd\n",
+ vha->vp_idx);
+ srr_explain = NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_INVALID_OX_ID_RX_ID;
+ goto out_reject;
+ }
+
+ if (ha->tgt.tgt_ops->get_cmd_ref(cmd)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11038,
+ "qla_target(%d): IMM SRR: unable to get cmd ref; rejecting SRR\n",
+ vha->vp_idx);
+ cmd = NULL;
+ goto out_reject;
+ }
+
+ qpair = cmd->qpair;
+
+ spin_lock_irq(qpair->qp_lock_ptr);
+
+ if (cmd->reset_count != srr->reset_count) {
+ /* force a miscompare */
+ srr->reset_count = qpair->chip_reset ^ 1;
+ }
+ if (qlt_srr_is_chip_reset(vha, qpair, srr)) {
+ spin_unlock_irq(qpair->qp_lock_ptr);
+ ha->tgt.tgt_ops->put_cmd_ref(cmd);
+ kfree(srr);
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11001,
+ "qla_target(%d): tag %lld, op %x: received IMM SRR\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->cdb ? cmd->cdb[0] : 0);
+
+ cmd->trc_flags |= TRC_SRR_IMM;
+
+ if (cmd->srr != NULL) {
+ if (cmd->srr->imm_ntfy_recvd) {
+ /*
+ * Received another immediate notify SRR message for
+ * this command before the previous one could be processed
+ * (not expected to happen).
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11006,
+ "qla_target(%d): tag %lld: received multiple IMM SRR; reject SRR\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+ spin_unlock_irq(qpair->qp_lock_ptr);
+ ha->tgt.tgt_ops->put_cmd_ref(cmd);
+ goto out_reject;
+ }
+
+ /* qlt_prepare_srr_ctio() was called first. */
+ WARN_ON(!cmd->srr->ctio_recvd);
+
+ /*
+ * The immediate notify and CTIO handlers both allocated
+ * separate srr structs; combine them.
+ */
+ memcpy(&cmd->srr->imm_ntfy, &srr->imm_ntfy,
+ sizeof(srr->imm_ntfy));
+ kfree(srr);
+ srr = cmd->srr;
+ srr->imm_ntfy_recvd = true;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11002,
+ "qla_target(%d): tag %lld: schedule SRR work\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+
+ /* Schedule the srr for processing in qlt_handle_srr(). */
+ spin_lock(&tgt->srr_lock);
+ list_add_tail(&srr->srr_list_entry, &tgt->srr_list);
+ /*
+ * Already running the work function; no need to schedule
+ * tgt->srr_work.
+ */
+ spin_unlock(&tgt->srr_lock);
+ spin_unlock_irq(qpair->qp_lock_ptr);
+ /* return with cmd refcount incremented */
+ return;
+ }
+
+ /* The CTIO SRR for this command has not yet been received. */
+
+ if (cmd->sent_term_exchg) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11007,
+ "qla_target(%d): tag %lld: IMM SRR: cmd already aborted\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+ spin_unlock_irq(qpair->qp_lock_ptr);
+ spin_lock_irq(&ha->hardware_lock);
+ if (!qlt_srr_is_chip_reset(vha, ha->base_qpair, srr))
+ qlt_send_term_imm_notif(vha, &srr->imm_ntfy, 1);
+ spin_unlock_irq(&ha->hardware_lock);
+ kfree(srr);
+ ha->tgt.tgt_ops->put_cmd_ref(cmd);
+ return;
+ }
+
+ /* If not expecting a CTIO, then reject IMM SRR. */
+ if (!cmd->cmd_sent_to_fw) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11008,
+ "qla_target(%d): tag %lld: IMM SRR but !cmd_sent_to_fw (state %d); reject SRR\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->state);
+ spin_unlock_irq(qpair->qp_lock_ptr);
+ ha->tgt.tgt_ops->put_cmd_ref(cmd);
+ goto out_reject;
+ }
+
+ /* Expect qlt_prepare_srr_ctio() to be called. */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11003,
+ "qla_target(%d): tag %lld: wait for CTIO SRR (state %d)\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->state);
+ srr->cmd = cmd;
+ cmd->srr = srr;
+
+ spin_unlock_irq(qpair->qp_lock_ptr);
+
+ ha->tgt.tgt_ops->put_cmd_ref(cmd);
+ return;
+
+out_reject:
+ qpair = vha->hw->base_qpair;
+ spin_lock_irq(qpair->qp_lock_ptr);
+ if (!qlt_srr_is_chip_reset(vha, qpair, srr))
+ qlt_send_notify_ack(qpair, &srr->imm_ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ srr_explain);
+ spin_unlock_irq(qpair->qp_lock_ptr);
+ kfree(srr);
+}
+
+/*
+ * Handle an immediate notify SRR (Sequence Retransmission Request) message from
+ * the hardware. The hardware will also send a CTIO with CTIO_SRR_RECEIVED status
+ * for the affected command.
+ *
+ * ha->hardware_lock supposed to be held on entry
+ */
+static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_tgt_srr *srr;
+
+ ql_log(ql_log_warn, vha, 0x11000, "qla_target(%d): received IMM SRR\n",
+ vha->vp_idx);
+
+ /*
+ * Need cmd->qpair->qp_lock_ptr, but have ha->hardware_lock. Defer
+ * processing to a workqueue so that the right lock can be acquired
+ * safely.
+ */
+
+ srr = kzalloc(sizeof(*srr), GFP_ATOMIC);
+ if (!srr)
+ goto out_reject;
+
+ memcpy(&srr->imm_ntfy, iocb, sizeof(srr->imm_ntfy));
+ srr->imm_ntfy_recvd = true;
+ srr->reset_count = vha->hw->base_qpair->chip_reset;
+ spin_lock(&tgt->srr_lock);
+ list_add_tail(&srr->srr_list_entry, &tgt->srr_list);
+ queue_work(qla_tgt_wq, &tgt->srr_work);
+ spin_unlock(&tgt->srr_lock);
+ /* resume processing in qlt_handle_srr_imm() */
+ return;
+
+out_reject:
+ qlt_send_notify_ack(vha->hw->base_qpair, iocb, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+}
+
+/*
+ * If possible, undo the effect of qlt_set_data_offset() and restore the cmd
+ * data buffer back to its full size.
+ */
+static int qlt_restore_orig_sg(struct qla_tgt_cmd *cmd)
+{
+ struct scsi_qla_host *vha = cmd->vha;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ WARN_ON(cmd->sg_mapped);
+
+ if (cmd->offset == 0) {
+ /* qlt_set_data_offset() has not been called. */
+ return 0;
+ }
+
+ if (se_cmd->t_data_sg == NULL ||
+ se_cmd->t_data_nents == 0 ||
+ se_cmd->data_length == 0) {
+ /* The original scatterlist is not available. */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102c,
+ "qla_target(%d): tag %lld: cannot restore original cmd buffer; keep modified buffer at offset %d\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->offset);
+ return -ENOENT;
+ }
+
+ /* Restore the original scatterlist. */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102d,
+ "qla_target(%d): tag %lld: restore original cmd buffer: offset %d -> 0\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->offset);
+ if (cmd->free_sg) {
+ cmd->free_sg = 0;
+ qlt_free_sg(cmd);
+ }
+ cmd->offset = 0;
+ cmd->sg = se_cmd->t_data_sg;
+ cmd->sg_cnt = se_cmd->t_data_nents;
+ cmd->bufflen = se_cmd->data_length;
+ return 0;
+}
+
+/*
+ * Adjust the data buffer of the given command to skip over offset bytes from
+ * the beginning while also reducing the length by offset bytes.
+ *
+ * This may be called multiple times for a single command if there are multiple
+ * SRRs, which each call reducing the buffer size further relative to the
+ * previous call. Note that the buffer may be reset back to its original size
+ * by calling qlt_restore_orig_sg().
+ */
+static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
+{
+ struct scsi_qla_host *vha = cmd->vha;
+ struct scatterlist *sg_srr_start = NULL, *sg;
+ uint32_t first_offset = offset;
+ int sg_srr_cnt, i;
+ int bufflen = 0;
+
+ WARN_ON(cmd->sg_mapped);
+
+ ql_dbg(ql_dbg_tgt, vha, 0x11020,
+ "qla_target(%d): tag %lld: %s: sg %p sg_cnt %d dir %d cmd->offset %d cmd->bufflen %d add offset %u\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__, cmd->sg,
+ cmd->sg_cnt, cmd->dma_data_direction, cmd->offset, cmd->bufflen,
+ offset);
+
+ if (cmd->se_cmd.prot_op != TARGET_PROT_NORMAL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11021,
+ "qla_target(%d): tag %lld: %s: SRR with protection information at nonzero offset not implemented\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__);
+ return -EINVAL;
+ }
+
+ if (!cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11022,
+ "qla_target(%d): tag %lld: %s: Missing cmd->sg or zero cmd->sg_cnt\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Walk the current cmd->sg list until we locate the new sg_srr_start
+ */
+ for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
+ ql_dbg(ql_dbg_tgt, vha, 0x11023,
+ "sg[%d]: %p page: %p, length: %d, offset: %d\n",
+ i, sg, sg_page(sg), sg->length, sg->offset);
+
+ if (first_offset < sg->length) {
+ sg_srr_start = sg;
+ break;
+ }
+ first_offset -= sg->length;
+ }
+
+ if (!sg_srr_start) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11024,
+ "qla_target(%d): tag %lld: Unable to locate sg_srr_start for offset: %u\n",
+ vha->vp_idx, cmd->se_cmd.tag, offset);
+ return -EINVAL;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11025,
+ "qla_target(%d): tag %lld: prepare SRR sgl at sg index %d of %d byte offset %u of %u\n",
+ vha->vp_idx, cmd->se_cmd.tag, i, cmd->sg_cnt,
+ first_offset, sg_srr_start->length);
+
+ sg_srr_cnt = cmd->sg_cnt - i;
+
+ if (first_offset == 0 && !cmd->free_sg) {
+ /*
+ * The offset points to the beginning of a scatterlist element.
+ * In this case there is no need to modify the first scatterlist
+ * element, so we can just point directly inside the original
+ * unmodified scatterlist.
+ */
+ ql_dbg(ql_dbg_tgt, vha, 0x11026, "point directly to old sgl\n");
+ cmd->sg = sg_srr_start;
+ } else {
+ /*
+ * Allocate at most 2 new scatterlist elements to reduce memory
+ * requirements.
+ */
+ int n_alloc_sg = min(sg_srr_cnt, 2);
+ struct scatterlist *sg_srr =
+ kmalloc_array(n_alloc_sg, sizeof(*sg_srr), GFP_ATOMIC);
+ if (!sg_srr) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11027,
+ "qla_target(%d): tag %lld: Unable to allocate SRR scatterlist\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+ return -ENOMEM;
+ }
+ sg_init_table(sg_srr, n_alloc_sg);
+
+ /* Init the first sg element to skip over the unneeded data. */
+ sg_set_page(&sg_srr[0], sg_page(sg_srr_start),
+ sg_srr_start->length - first_offset,
+ sg_srr_start->offset + first_offset);
+ if (sg_srr_cnt == 1) {
+ ql_dbg(ql_dbg_tgt, vha, 0x11028,
+ "single-element array\n");
+ } else if (sg_srr_cnt == 2) {
+ /* Only two elements; copy the last element. */
+ ql_dbg(ql_dbg_tgt, vha, 0x11029,
+ "complete two-element array\n");
+ sg = sg_next(sg_srr_start);
+ sg_set_page(&sg_srr[1], sg_page(sg), sg->length,
+ sg->offset);
+ } else {
+ /*
+ * Three or more elements; chain our newly-allocated
+ * 2-entry array to the rest of the original
+ * scatterlist at the splice point.
+ */
+ ql_dbg(ql_dbg_tgt, vha, 0x1102a,
+ "chain to original scatterlist\n");
+ sg = sg_next(sg_srr_start);
+ sg_chain(sg_srr, 2, sg);
+ }
+
+ /*
+ * If the previous scatterlist was allocated here on a previous
+ * call, then it should be safe to free now.
+ */
+ if (cmd->free_sg)
+ qlt_free_sg(cmd);
+ cmd->sg = sg_srr;
+ cmd->free_sg = 1;
+ }
+
+ /* Note that sg_cnt doesn't include any extra chain elements. */
+ cmd->sg_cnt = sg_srr_cnt;
+ cmd->offset += offset;
+ cmd->bufflen -= offset;
+
+ /* Check the scatterlist length for consistency. */
+ for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
+ bufflen += sg->length;
+ }
+ if (bufflen != cmd->bufflen) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102b,
+ "qla_target(%d): tag %lld: %s: bad sgl length: expected %d got %d\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__, cmd->bufflen, bufflen);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Given the "SRR relative offset" (offset of data to retry), determine what
+ * needs to be retransmitted (data and/or status) and return the mask in
+ * xmit_type. If retrying data, adjust the command buffer to point to only the
+ * data that need to be retried, skipping over the data that don't need to be
+ * retried.
+ *
+ * Returns 0 for success or a negative error number.
+ */
+static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
+ uint32_t srr_rel_offs, int *xmit_type)
+{
+ struct scsi_qla_host *vha = cmd->vha;
+ int res = 0, rel_offs;
+
+ if (srr_rel_offs < cmd->offset ||
+ srr_rel_offs > cmd->offset + cmd->bufflen) {
+ *xmit_type = 0;
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101e,
+ "qla_target(%d): tag %lld: srr_rel_offs %u outside accepted range %u - %u\n",
+ vha->vp_idx, cmd->se_cmd.tag, srr_rel_offs,
+ cmd->offset, cmd->offset + cmd->bufflen);
+ return -EINVAL;
+ }
+
+ /*
+ * srr_rel_offs is the offset of the data we need from the beginning of
+ * the *original* buffer.
+ *
+ * cmd->offset is the offset of the current cmd scatterlist from the
+ * beginning of the *original* buffer, which might be nonzero if there
+ * was a previous SRR and the buffer could not be reset back to its
+ * original size.
+ *
+ * rel_offs is the offset of the data we need from the beginning of the
+ * current cmd scatterlist.
+ */
+ rel_offs = srr_rel_offs - cmd->offset;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101f,
+ "qla_target(%d): tag %lld: current buffer [%u - %u); srr_rel_offs=%d, rel_offs=%d\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->offset,
+ cmd->offset + cmd->bufflen, srr_rel_offs, rel_offs);
+
+ *xmit_type = QLA_TGT_XMIT_ALL;
+
+ if (rel_offs == cmd->bufflen)
+ *xmit_type = QLA_TGT_XMIT_STATUS;
+ else if (rel_offs > 0)
+ res = qlt_set_data_offset(cmd, rel_offs);
+
+ return res;
+}
+
+/*
+ * Process a SRR (Sequence Retransmission Request) for a SCSI command once both
+ * the immediate notify SRR and CTIO SRR have been received from the hw.
+ *
+ * Process context, no locks
+ */
+static void qlt_handle_srr(struct scsi_qla_host *vha, struct qla_tgt_srr *srr)
+{
+ struct qla_tgt_cmd *cmd = srr->cmd;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct qla_qpair *qpair = cmd->qpair;
+ struct qla_hw_data *ha = vha->hw;
+ uint8_t op = cmd->cdb ? cmd->cdb[0] : 0;
+ uint32_t srr_rel_offs = le32_to_cpu(srr->imm_ntfy.u.isp24.srr_rel_offs);
+ uint16_t srr_ui = le16_to_cpu(srr->imm_ntfy.u.isp24.srr_ui);
+ int xmit_type = 0;
+ bool xmit_response = false;
+ bool rdy_to_xfer = false;
+ bool did_timeout;
+ bool send_term_exch = false;
+
+ spin_lock_irq(qpair->qp_lock_ptr);
+
+ WARN_ON(cmd->cmd_sent_to_fw);
+
+ cmd->srr = NULL;
+
+ if (qlt_srr_is_chip_reset(vha, qpair, srr))
+ goto out_advance_cmd;
+
+ if (cmd->sent_term_exchg || cmd->sess->deleted || srr->aborted) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11010,
+ "qla_target(%d): tag %lld: IMM SRR: cmd already aborted\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+
+ spin_unlock_irq(qpair->qp_lock_ptr);
+
+ spin_lock_irq(&ha->hardware_lock);
+ if (!qlt_srr_is_chip_reset(vha, ha->base_qpair, srr))
+ qlt_send_term_imm_notif(vha, &srr->imm_ntfy, 1);
+ spin_unlock_irq(&ha->hardware_lock);
+
+ send_term_exch = true;
+
+ spin_lock_irq(qpair->qp_lock_ptr);
+ goto out_advance_cmd;
+ }
+
+ if (srr->reject)
+ goto out_reject;
+
+ /*
+ * If we receive multiple SRRs for the same command, place a time limit
+ * on how long we are willing to retry. This timeout should be less
+ * than SQA_MAX_HW_PENDING_TIME in scst_qla2xxx.c.
+ */
+ did_timeout = time_is_before_jiffies64((cmd->jiffies_at_hw_st_entry ? :
+ cmd->jiffies_at_alloc) + 30 * HZ);
+
+ qlt_restore_orig_sg(cmd);
+
+ switch (srr_ui) {
+ case SRR_IU_STATUS:
+ if (cmd->state != QLA_TGT_STATE_PROCESSED) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11011,
+ "qla_target(%d): tag %lld, op %x: reject SRR_IU_STATUS due to unexpected state %d\n",
+ vha->vp_idx, se_cmd->tag, op,
+ cmd->state);
+ goto out_reject;
+ }
+
+ if (did_timeout) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11033,
+ "qla_target(%d): tag %lld, op %x: reject SRR_IU_STATUS due to timeout\n",
+ vha->vp_idx, se_cmd->tag, op);
+ goto out_reject;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11012,
+ "qla_target(%d): tag %lld, op %x: accept SRR_IU_STATUS and retransmit scsi_status=%x\n",
+ vha->vp_idx, se_cmd->tag, op,
+ se_cmd->scsi_status);
+ xmit_type = QLA_TGT_XMIT_STATUS;
+ xmit_response = true;
+ cmd->trc_flags |= TRC_SRR_RSP;
+ break;
+
+ case SRR_IU_DATA_IN:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11013,
+ "qla_target(%d): tag %lld, op %x: process SRR_IU_DATA_IN: bufflen=%d, sg_cnt=%d, offset=%d, srr_offset=%d, scsi_status=%x\n",
+ vha->vp_idx, se_cmd->tag, op, cmd->bufflen,
+ cmd->sg_cnt, cmd->offset, srr_rel_offs,
+ se_cmd->scsi_status);
+
+ if (cmd->state != QLA_TGT_STATE_PROCESSED) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11014,
+ "qla_target(%d): tag %lld: reject SRR_IU_DATA_IN due to unexpected state %d\n",
+ vha->vp_idx, se_cmd->tag, cmd->state);
+ goto out_reject;
+ }
+
+ /*
+ * QLA_TGT_STATE_PROCESSED does not necessarily imply data-in
+ */
+ if (!qlt_has_data(cmd)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11015,
+ "qla_target(%d): tag %lld: reject SRR_IU_DATA_IN because cmd has no data to send\n",
+ vha->vp_idx, se_cmd->tag);
+ goto out_reject;
+ }
+
+ if (!cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11016,
+ "qla_target(%d): tag %lld: reject SRR_IU_DATA_IN because buffer is missing\n",
+ vha->vp_idx, se_cmd->tag);
+ goto out_reject;
+ }
+
+ if (did_timeout) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11034,
+ "qla_target(%d): tag %lld, op %x: reject SRR_IU_DATA_IN due to timeout\n",
+ vha->vp_idx, se_cmd->tag, op);
+ goto out_reject;
+ }
+
+ if (qlt_srr_adjust_data(cmd, srr_rel_offs, &xmit_type) != 0)
+ goto out_reject;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11017,
+ "qla_target(%d): tag %lld: accept SRR_IU_DATA_IN and retransmit data: bufflen=%d, offset=%d\n",
+ vha->vp_idx, se_cmd->tag, cmd->bufflen,
+ cmd->offset);
+ xmit_response = true;
+ cmd->trc_flags |= TRC_SRR_RSP;
+ break;
+
+ case SRR_IU_DATA_OUT:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11018,
+ "qla_target(%d): tag %lld, op %x: process SRR_IU_DATA_OUT: bufflen=%d, sg_cnt=%d, offset=%d, srr_offset=%d\n",
+ vha->vp_idx, se_cmd->tag, op, cmd->bufflen,
+ cmd->sg_cnt, cmd->offset, srr_rel_offs);
+
+ if (cmd->state != QLA_TGT_STATE_NEED_DATA) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11019,
+ "qla_target(%d): tag %lld: reject SRR_IU_DATA_OUT due to unexpected state %d\n",
+ vha->vp_idx, se_cmd->tag, cmd->state);
+ goto out_reject;
+ }
+
+ /*
+ * QLA_TGT_STATE_NEED_DATA implies there should be data-out
+ */
+ if (!qlt_has_data(cmd) || !cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101a,
+ "qla_target(%d): tag %lld: reject SRR_IU_DATA_OUT because buffer is missing\n",
+ vha->vp_idx, se_cmd->tag);
+ goto out_reject;
+ }
+
+ if (did_timeout) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11035,
+ "qla_target(%d): tag %lld, op %x: reject SRR_IU_DATA_OUT due to timeout\n",
+ vha->vp_idx, se_cmd->tag, op);
+ goto out_reject;
+ }
+
+ if (qlt_srr_adjust_data(cmd, srr_rel_offs, &xmit_type) != 0)
+ goto out_reject;
+
+ if (!(xmit_type & QLA_TGT_XMIT_DATA)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101b,
+ "qla_target(%d): tag %lld: reject SRR_IU_DATA_OUT: bad offset\n",
+ vha->vp_idx, se_cmd->tag);
+ goto out_reject;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101c,
+ "qla_target(%d): tag %lld: accept SRR_IU_DATA_OUT and receive data again: bufflen=%d, offset=%d\n",
+ vha->vp_idx, se_cmd->tag, cmd->bufflen,
+ cmd->offset);
+ cmd->trc_flags |= TRC_SRR_XRDY;
+ rdy_to_xfer = true;
+ break;
+
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101d,
+ "qla_target(%d): tag %lld, op %x: reject unknown srr_ui value 0x%x: state=%d, bufflen=%d, offset=%d, srr_offset=%d\n",
+ vha->vp_idx, se_cmd->tag, op, srr_ui, cmd->state,
+ cmd->bufflen, cmd->offset, srr_rel_offs);
+ goto out_reject;
+ }
+
+ qlt_send_notify_ack(qpair, &srr->imm_ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+
+ spin_unlock_irq(qpair->qp_lock_ptr);
+
+ if (xmit_response) {
+ /* For status and data-in, retransmit the response. */
+ if (qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status)) {
+ send_term_exch = true;
+ spin_lock_irq(qpair->qp_lock_ptr);
+ goto out_advance_cmd;
+ }
+ } else if (rdy_to_xfer) {
+ /* For data-out, receive data again. */
+ if (qlt_rdy_to_xfer(cmd)) {
+ send_term_exch = true;
+ spin_lock_irq(qpair->qp_lock_ptr);
+ goto out_advance_cmd;
+ }
+ }
+
+ return;
+
+out_reject:
+ qlt_send_notify_ack(qpair, &srr->imm_ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+
+out_advance_cmd:
+ if (!cmd->sent_term_exchg &&
+ (send_term_exch || cmd->state != QLA_TGT_STATE_NEED_DATA) &&
+ !qlt_srr_is_chip_reset(vha, qpair, srr)) {
+ cmd->trc_flags |= TRC_SRR_TERM;
+ qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1);
+ }
+ if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ /*
+ * The initiator should abort the command, but if not, try to
+ * return an error.
+ */
+ cmd->srr_failed = 1;
+ cmd->write_data_transferred = 0;
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+ cmd->jiffies_at_hw_st_entry = 0;
+ vha->hw->tgt.tgt_ops->handle_data(cmd);
+ } else {
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ }
+ spin_unlock_irq(qpair->qp_lock_ptr);
+}
+
+/* Workqueue function for processing SRR work in process context. */
+static void qlt_handle_srr_work(struct work_struct *work)
+{
+ struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
+ struct scsi_qla_host *vha = tgt->vha;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11032,
+ "qla_target(%d): Entering SRR work\n", vha->vp_idx);
+
+ for (;;) {
+ struct qla_tgt_srr *srr;
+
+ spin_lock_irq(&tgt->srr_lock);
+ srr = list_first_entry_or_null(&tgt->srr_list, typeof(*srr),
+ srr_list_entry);
+ if (!srr) {
+ spin_unlock_irq(&tgt->srr_lock);
+ break;
+ }
+ list_del(&srr->srr_list_entry);
+ spin_unlock_irq(&tgt->srr_lock);
+
+ if (!srr->cmd) {
+ qlt_handle_srr_imm(vha, srr);
+ } else {
+ qlt_handle_srr(vha, srr);
+ vha->hw->tgt.tgt_ops->put_cmd_ref(srr->cmd);
+ kfree(srr);
+ }
+ }
+}
+
+/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
@@ -5325,6 +6527,12 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
if (qlt_24xx_handle_els(vha, iocb) == 0)
send_notify_ack = 0;
break;
+
+ case IMM_NTFY_SRR:
+ qlt_prepare_srr_imm(vha, iocb);
+ send_notify_ack = 0;
+ break;
+
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
"qla_target(%d): Received unknown immediate "
@@ -5359,7 +6567,7 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
if (!sess) {
- qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
+ qlt_send_term_exchange(qpair, NULL, atio, 1);
return 0;
}
/* Sending marker isn't necessary, since we called from ISR */
@@ -5469,13 +6677,15 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
qlt_incr_num_pend_cmds(vha);
INIT_LIST_HEAD(&cmd->cmd_list);
- memcpy(&cmd->atio, atio, sizeof(*atio));
+ memcpy_atio(&cmd->atio, atio);
cmd->tgt = vha->vha_tgt.qla_tgt;
cmd->vha = vha;
cmd->reset_count = ha->base_qpair->chip_reset;
cmd->q_full = 1;
cmd->qpair = ha->base_qpair;
+ cmd->cdb = &cmd->atio.u.isp24.fcp_cmnd.cdb[0];
+ cmd->cdb_len = 16;
if (qfull) {
cmd->q_full = 1;
@@ -5588,7 +6798,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe05f,
"qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
qlt_send_term_exchange(ha->base_qpair, NULL,
- atio, 1, 0);
+ atio, 1);
break;
case -EBUSY:
ql_dbg(ql_dbg_tgt, vha, 0xe060,
@@ -5697,7 +6907,7 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
struct qla_tgt_mgmt_cmd *mcmd;
struct qla_hw_data *ha = vha->hw;
- mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt);
+ mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, TYPE_TGT_TMCMD, pkt);
if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
ql_dbg(ql_dbg_async, vha, 0xe064,
"qla_target(%d): ABTS Comp without mcmd\n",
@@ -5717,7 +6927,7 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
if (le32_to_cpu(entry->error_subcode1) == 0x1E &&
le32_to_cpu(entry->error_subcode2) == 0) {
if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
- ha->tgt.tgt_ops->free_mcmd(mcmd);
+ qlt_free_ul_mcmd(ha, mcmd);
return;
}
qlt_24xx_retry_term_exchange(vha, rsp->qpair,
@@ -5728,10 +6938,10 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
vha->vp_idx, entry->compl_status,
entry->error_subcode1,
entry->error_subcode2);
- ha->tgt.tgt_ops->free_mcmd(mcmd);
+ qlt_free_ul_mcmd(ha, mcmd);
}
} else if (mcmd) {
- ha->tgt.tgt_ops->free_mcmd(mcmd);
+ qlt_free_ul_mcmd(ha, mcmd);
}
}
@@ -5795,7 +7005,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe05f,
"qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
qlt_send_term_exchange(rsp->qpair, NULL,
- atio, 1, 0);
+ atio, 1);
break;
case -EBUSY:
ql_dbg(ql_dbg_tgt, vha, 0xe060,
@@ -5816,26 +7026,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
}
break;
- case CONTINUE_TGT_IO_TYPE:
- {
- struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
-
- qlt_do_ctio_completion(vha, rsp, entry->handle,
- le16_to_cpu(entry->status)|(pkt->entry_status << 16),
- entry);
- break;
- }
-
- case CTIO_A64_TYPE:
- {
- struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
-
- qlt_do_ctio_completion(vha, rsp, entry->handle,
- le16_to_cpu(entry->status)|(pkt->entry_status << 16),
- entry);
- break;
- }
-
case IMMED_NOTIFY_TYPE:
ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
@@ -6323,6 +7513,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
spin_lock_init(&tgt->sess_work_lock);
INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
INIT_LIST_HEAD(&tgt->sess_works_list);
+ spin_lock_init(&tgt->srr_lock);
+ INIT_LIST_HEAD(&tgt->srr_list);
+ INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
atomic_set(&tgt->tgt_global_resets_count, 0);
base_vha->vha_tgt.qla_tgt = tgt;
@@ -6705,7 +7898,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
adjust_corrupted_atio(pkt);
qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
- ha_locked, 0);
+ ha_locked);
} else {
qlt_24xx_atio_pkt_all_vps(vha,
(struct atio_from_isp *)pkt, ha_locked);
@@ -6971,6 +8164,32 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
}
}
+/* Update any settings that depend on ha->fw_*_version. */
+void
+qlt_config_nvram_with_fw_version(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ if (ql2xtgt_tape_enable && qlt_has_sler_fw_bug(ha)) {
+ ql_log(ql_log_warn, vha, 0x11036,
+ "WARNING: ignoring ql2xtgt_tape_enable due to buggy HBA firmware; please upgrade FW\n");
+
+ /* Disable FC Tape support */
+ if (ha->isp_ops->nvram_config == qla81xx_nvram_config) {
+ struct init_cb_81xx *icb =
+ (struct init_cb_81xx *)ha->init_cb;
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_12);
+ } else {
+ struct init_cb_24xx *icb =
+ (struct init_cb_24xx *)ha->init_cb;
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_12);
+ }
+ }
+}
+
void
qlt_modify_vp_config(struct scsi_qla_host *vha,
struct vp_config_entry_24xx *vpmod)
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 15a59c125c53..61072fb41b29 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -184,6 +184,7 @@ struct nack_to_isp {
#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9
#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL 0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_INVALID_OX_ID_RX_ID 0x17
#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a
#define NOTIFY_ACK_SUCCESS 0x01
@@ -686,6 +687,8 @@ struct qla_tgt_func_tmpl {
int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, u64, uint16_t,
uint32_t);
struct qla_tgt_cmd *(*get_cmd)(struct fc_port *);
+ int (*get_cmd_ref)(struct qla_tgt_cmd *cmd);
+ void (*put_cmd_ref)(struct qla_tgt_cmd *cmd);
void (*rel_cmd)(struct qla_tgt_cmd *);
void (*free_cmd)(struct qla_tgt_cmd *);
void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
@@ -754,6 +757,7 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
#define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */
#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */
#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */
+#define QLA_TGT_STATE_DONE 4 /* cmd being freed */
/* ATIO task_codes field */
#define ATIO_SIMPLE_QUEUE 0
@@ -822,18 +826,26 @@ struct qla_tgt {
int notify_ack_expected;
int abts_resp_expected;
int modify_lun_expected;
+
+ spinlock_t srr_lock;
+ struct list_head srr_list;
+ struct work_struct srr_work;
+
atomic_t tgt_global_resets_count;
+
struct list_head tgt_list_entry;
};
struct qla_tgt_sess_op {
struct scsi_qla_host *vha;
uint32_t chip_reset;
- struct atio_from_isp atio;
struct work_struct work;
struct list_head cmd_list;
bool aborted;
struct rsp_que *rsp;
+
+ struct atio_from_isp atio;
+ /* DO NOT ADD ANYTHING ELSE HERE - atio must be last member */
};
enum trace_flags {
@@ -858,6 +870,7 @@ enum trace_flags {
TRC_DATA_IN = BIT_18,
TRC_ABORT = BIT_19,
TRC_DIF_ERR = BIT_20,
+ TRC_SRR_IMM = BIT_21,
};
struct qla_tgt_cmd {
@@ -876,25 +889,36 @@ struct qla_tgt_cmd {
/* Sense buffer that will be mapped into outgoing status */
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
- spinlock_t cmd_lock;
- /* to save extra sess dereferences */
unsigned int conf_compl_supported:1;
unsigned int sg_mapped:1;
+
+ /* Call qlt_free_sg() if set. */
+ unsigned int free_sg:1;
+
unsigned int write_data_transferred:1;
+
+ /* Set if the SCSI status was sent successfully. */
+ unsigned int rsp_sent:1;
+
unsigned int q_full:1;
unsigned int term_exchg:1;
unsigned int cmd_sent_to_fw:1;
unsigned int cmd_in_wq:1;
unsigned int edif:1;
+ /* Set if a SRR was rejected. */
+ unsigned int srr_failed:1;
+
+ /* Set if the exchange has been terminated. */
+ unsigned int sent_term_exchg:1;
+
/*
- * This variable may be set from outside the LIO and I/O completion
- * callback functions. Do not declare this member variable as a
- * bitfield to avoid a read-modify-write operation when this variable
- * is set.
+ * Set if sent_term_exchg is set, or if the cmd was aborted by a TMR,
+ * or if some other error prevents normal processing of the command.
*/
- unsigned int aborted;
+ unsigned int aborted:1;
+ struct qla_tgt_srr *srr;
struct scatterlist *sg; /* cmd data buffer SG vector */
int sg_cnt; /* SG segments count */
int bufflen; /* cmd buffer length */
@@ -925,13 +949,23 @@ struct qla_tgt_cmd {
uint8_t scsi_status, sense_key, asc, ascq;
struct crc_context *ctx;
- const uint8_t *cdb;
+ uint8_t *cdb;
uint64_t lba;
+ int cdb_len;
uint16_t a_guard, e_guard, a_app_tag, e_app_tag;
uint32_t a_ref_tag, e_ref_tag;
#define DIF_BUNDL_DMA_VALID 1
uint16_t prot_flags;
+ unsigned long jiffies_at_term_exchg;
+
+ /*
+ * jiffies64 when qlt_rdy_to_xfer() or qlt_xmit_response() first
+ * called, or 0 when not in those states. Used to limit the number of
+ * SRR retries.
+ */
+ uint64_t jiffies_at_hw_st_entry;
+
uint64_t jiffies_at_alloc;
uint64_t jiffies_at_free;
@@ -965,6 +999,7 @@ struct qla_tgt_mgmt_cmd {
unsigned int flags;
#define QLA24XX_MGMT_SEND_NACK BIT_0
#define QLA24XX_MGMT_ABORT_IO_ATTR_VALID BIT_1
+#define QLA24XX_MGMT_LLD_OWNED BIT_2
uint32_t reset_count;
struct work_struct work;
uint64_t unpacked_lun;
@@ -993,6 +1028,45 @@ struct qla_tgt_prm {
uint16_t tot_dsds;
};
+/*
+ * SRR (Sequence Retransmission Request) - resend or re-receive some or all
+ * data or status to recover from a transient I/O error.
+ */
+struct qla_tgt_srr {
+ /*
+ * Copy of immediate notify SRR message received from hw; valid only if
+ * imm_ntfy_recvd is true.
+ */
+ struct imm_ntfy_from_isp imm_ntfy;
+
+ struct list_head srr_list_entry;
+
+ /* The command affected by this SRR, or NULL if not yet determined. */
+ struct qla_tgt_cmd *cmd;
+
+ /* Used to detect if the HBA has been reset since receiving the SRR. */
+ uint32_t reset_count;
+
+ /*
+ * The hardware sends two messages for each SRR - an immediate notify
+ * and a CTIO with CTIO_SRR_RECEIVED status. These keep track of which
+ * messages have been received. The SRR can be processed once both of
+ * these are true.
+ */
+ bool imm_ntfy_recvd;
+ bool ctio_recvd;
+
+ /*
+ * This is set to true if the affected command was aborted (cmd may be
+ * set to NULL), in which case the immediate notify exchange also needs
+ * to be aborted.
+ */
+ bool aborted;
+
+ /* This is set to true to force the SRR to be rejected. */
+ bool reject;
+};
+
/* Check for Switch reserved address */
#define IS_SW_RESV_ADDR(_s_id) \
((_s_id.b.domain == 0xff) && ((_s_id.b.area & 0xf0) == 0xf0))
@@ -1048,6 +1122,20 @@ static inline uint32_t sid_to_key(const be_id_t s_id)
}
/*
+ * Free the scatterlist allocated by qlt_set_data_offset(). Call this only if
+ * cmd->free_sg is set.
+ */
+static inline void qlt_free_sg(struct qla_tgt_cmd *cmd)
+{
+ /*
+ * The scatterlist may be chained to the original scatterlist, but we
+ * only need to free the first segment here since that is the only part
+ * allocated by qlt_set_data_offset().
+ */
+ kfree(cmd->sg);
+}
+
+/*
* Exported symbols from qla_target.c LLD logic used by qla2xxx code..
*/
extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, struct rsp_que *,
@@ -1055,9 +1143,14 @@ extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, struct rsp_que *,
extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
extern int qlt_abort_cmd(struct qla_tgt_cmd *);
+void qlt_srr_abort(struct qla_tgt_cmd *cmd, bool reject);
+void qlt_send_term_exchange(struct qla_qpair *qpair,
+ struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked);
extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
+void qlt_free_ul_mcmd(struct qla_hw_data *ha, struct qla_tgt_mgmt_cmd *mcmd);
extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
+extern void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd);
extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
extern void qlt_enable_vha(struct scsi_qla_host *);
extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
@@ -1073,6 +1166,7 @@ extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *,
struct init_cb_81xx *);
extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *,
struct nvram_81xx *);
+void qlt_config_nvram_with_fw_version(struct scsi_qla_host *vha);
extern void qlt_modify_vp_config(struct scsi_qla_host *,
struct vp_config_entry_24xx *);
extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index ceaf1c7b1d17..2fff68935338 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -291,6 +291,16 @@ static struct qla_tgt_cmd *tcm_qla2xxx_get_cmd(struct fc_port *sess)
return cmd;
}
+static int tcm_qla2xxx_get_cmd_ref(struct qla_tgt_cmd *cmd)
+{
+ return target_get_sess_cmd(&cmd->se_cmd, true);
+}
+
+static void tcm_qla2xxx_put_cmd_ref(struct qla_tgt_cmd *cmd)
+{
+ target_put_sess_cmd(&cmd->se_cmd);
+}
+
static void tcm_qla2xxx_rel_cmd(struct qla_tgt_cmd *cmd)
{
target_free_tag(cmd->sess->se_sess, &cmd->se_cmd);
@@ -303,6 +313,8 @@ static void tcm_qla2xxx_rel_cmd(struct qla_tgt_cmd *cmd)
*/
static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
{
+ cmd->state = QLA_TGT_STATE_DONE;
+
cmd->qpair->tgt_counters.core_qla_free_cmd++;
cmd->cmd_in_wq = 1;
@@ -529,6 +541,9 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
if (cmd->se_cmd.pi_err)
transport_generic_request_failure(&cmd->se_cmd,
cmd->se_cmd.pi_err);
+ else if (cmd->srr_failed)
+ transport_generic_request_failure(&cmd->se_cmd,
+ TCM_SNACK_REJECTED);
else
transport_generic_request_failure(&cmd->se_cmd,
TCM_CHECK_CONDITION_ABORT_CMD);
@@ -1524,6 +1539,8 @@ static const struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.handle_data = tcm_qla2xxx_handle_data,
.handle_tmr = tcm_qla2xxx_handle_tmr,
.get_cmd = tcm_qla2xxx_get_cmd,
+ .get_cmd_ref = tcm_qla2xxx_get_cmd_ref,
+ .put_cmd_ref = tcm_qla2xxx_put_cmd_ref,
.rel_cmd = tcm_qla2xxx_rel_cmd,
.free_cmd = tcm_qla2xxx_free_cmd,
.free_mcmd = tcm_qla2xxx_free_mcmd,
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 75125d2021f5..7febc0baa9d6 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1016,7 +1016,7 @@ void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
uint32_t crash_record_size = 0;
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
/* Get size of crash record. */
mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
@@ -1099,7 +1099,7 @@ void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
/* Get Crash Record. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
mbox_cmd[2] = LSDW(event_log_dma);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 9a0f467264b3..76cdad063f7b 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -216,6 +216,9 @@ int scsi_device_max_queue_depth(struct scsi_device *sdev)
*/
int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
{
+ if (!sdev->budget_map.map)
+ return -EINVAL;
+
depth = min_t(int, depth, scsi_device_max_queue_depth(sdev));
if (depth > 0) {
@@ -255,6 +258,8 @@ EXPORT_SYMBOL(scsi_change_queue_depth);
*/
int scsi_track_queue_full(struct scsi_device *sdev, int depth)
{
+ if (!sdev->budget_map.map)
+ return 0;
/*
* Don't let QUEUE_FULLs on the same
@@ -826,8 +831,11 @@ struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
spin_lock_irqsave(shost->host_lock, flags);
while (list->next != &shost->__devices) {
next = list_entry(list->next, struct scsi_device, siblings);
- /* skip devices that we can't get a reference to */
- if (!scsi_device_get(next))
+ /*
+ * Skip pseudo devices and also devices we can't get a
+ * reference to.
+ */
+ if (!scsi_device_is_pseudo_dev(next) && !scsi_device_get(next))
break;
next = NULL;
list = list->next;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index b2ab97be5db3..1f2a53ba5dd9 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -230,6 +230,7 @@ struct tape_block {
#define SDEBUG_OPT_NO_CDB_NOISE 0x4000
#define SDEBUG_OPT_HOST_BUSY 0x8000
#define SDEBUG_OPT_CMD_ABORT 0x10000
+#define SDEBUG_OPT_UNALIGNED_WRITE 0x20000
#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
SDEBUG_OPT_RESET_NOISE)
#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
@@ -237,7 +238,8 @@ struct tape_block {
SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
SDEBUG_OPT_SHORT_TRANSFER | \
SDEBUG_OPT_HOST_BUSY | \
- SDEBUG_OPT_CMD_ABORT)
+ SDEBUG_OPT_CMD_ABORT | \
+ SDEBUG_OPT_UNALIGNED_WRITE)
#define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
@@ -2961,11 +2963,11 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
int target_dev_id;
int target = scp->device->id;
unsigned char *ap;
- unsigned char *arr __free(kfree);
unsigned char *cmd = scp->cmnd;
bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
- arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
+ unsigned char *arr __free(kfree) = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
+
if (!arr)
return -ENOMEM;
dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
@@ -4932,6 +4934,14 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 *cmd = scp->cmnd;
bool meta_data_locked = false;
+ if (unlikely(sdebug_opts & SDEBUG_OPT_UNALIGNED_WRITE &&
+ atomic_read(&sdeb_inject_pending))) {
+ atomic_set(&sdeb_inject_pending, 0);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
+ UNALIGNED_WRITE_ASCQ);
+ return check_condition_result;
+ }
+
switch (cmd[0]) {
case WRITE_16:
ei_lba = 0;
@@ -6752,20 +6762,59 @@ static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
return false;
}
+struct sdebug_abort_cmd {
+ u32 unique_tag;
+};
+
+enum sdebug_internal_cmd_type {
+ SCSI_DEBUG_ABORT_CMD,
+};
+
+struct sdebug_internal_cmd {
+ enum sdebug_internal_cmd_type type;
+
+ union {
+ struct sdebug_abort_cmd abort_cmd;
+ };
+};
+
+union sdebug_priv {
+ struct sdebug_scsi_cmd cmd;
+ struct sdebug_internal_cmd internal_cmd;
+};
+
/*
- * Called from scsi_debug_abort() only, which is for timed-out cmd.
+ * Abort SCSI command @cmnd. Only called from scsi_debug_abort(). Although
+ * it would be possible to call scsi_debug_stop_cmnd() directly, an internal
+ * command is allocated and submitted to trigger the reserved command
+ * infrastructure.
*/
static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
{
- struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
- unsigned long flags;
- bool res;
-
- spin_lock_irqsave(&sdsc->lock, flags);
- res = scsi_debug_stop_cmnd(cmnd);
- spin_unlock_irqrestore(&sdsc->lock, flags);
-
- return res;
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct request *rq = scsi_cmd_to_rq(cmnd);
+ u32 unique_tag = blk_mq_unique_tag(rq);
+ struct sdebug_internal_cmd *internal_cmd;
+ struct scsi_cmnd *abort_cmd;
+ struct request *abort_rq;
+ blk_status_t res;
+
+ abort_cmd = scsi_get_internal_cmd(shost->pseudo_sdev, DMA_NONE,
+ BLK_MQ_REQ_RESERVED);
+ if (!abort_cmd)
+ return false;
+ internal_cmd = scsi_cmd_priv(abort_cmd);
+ *internal_cmd = (struct sdebug_internal_cmd) {
+ .type = SCSI_DEBUG_ABORT_CMD,
+ .abort_cmd = {
+ .unique_tag = unique_tag,
+ },
+ };
+ abort_rq = scsi_cmd_to_rq(abort_cmd);
+ abort_rq->timeout = secs_to_jiffies(3);
+ res = blk_execute_rq(abort_rq, true);
+ scsi_put_internal_cmd(abort_cmd);
+ return res == BLK_STS_OK;
}
/*
@@ -9220,6 +9269,56 @@ out_handle:
return ret;
}
+/* Process @scp, a request to abort a SCSI command by tag. */
+static void scsi_debug_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *scp)
+{
+ struct sdebug_internal_cmd *internal_cmd = scsi_cmd_priv(scp);
+ struct sdebug_abort_cmd *abort_cmd = &internal_cmd->abort_cmd;
+ const u32 unique_tag = abort_cmd->unique_tag;
+ struct scsi_cmnd *to_be_aborted_scmd =
+ scsi_host_find_tag(shost, unique_tag);
+ struct sdebug_scsi_cmd *to_be_aborted_sdsc =
+ scsi_cmd_priv(to_be_aborted_scmd);
+ bool res = false;
+
+ if (!to_be_aborted_scmd) {
+ pr_err("%s: command with tag %#x not found\n", __func__,
+ unique_tag);
+ return;
+ }
+
+ scoped_guard(spinlock_irqsave, &to_be_aborted_sdsc->lock)
+ res = scsi_debug_stop_cmnd(to_be_aborted_scmd);
+
+ if (res)
+ pr_info("%s: aborted command with tag %#x\n",
+ __func__, unique_tag);
+ else
+ pr_err("%s: failed to abort command with tag %#x\n",
+ __func__, unique_tag);
+
+ set_host_byte(scp, res ? DID_OK : DID_ERROR);
+}
+
+static int scsi_debug_process_reserved_command(struct Scsi_Host *shost,
+ struct scsi_cmnd *scp)
+{
+ struct sdebug_internal_cmd *internal_cmd = scsi_cmd_priv(scp);
+
+ switch (internal_cmd->type) {
+ case SCSI_DEBUG_ABORT_CMD:
+ scsi_debug_abort_cmd(shost, scp);
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ set_host_byte(scp, DID_ERROR);
+ break;
+ }
+
+ scsi_done(scp);
+ return 0;
+}
+
static int scsi_debug_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scp)
{
@@ -9420,6 +9519,9 @@ static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
struct sdebug_defer *sd_dp = &sdsc->sd_dp;
+ if (blk_mq_is_reserved_rq(scsi_cmd_to_rq(cmd)))
+ return 0;
+
spin_lock_init(&sdsc->lock);
hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_PINNED);
@@ -9439,6 +9541,7 @@ static const struct scsi_host_template sdebug_driver_template = {
.sdev_destroy = scsi_debug_sdev_destroy,
.ioctl = scsi_debug_ioctl,
.queuecommand = scsi_debug_queuecommand,
+ .queue_reserved_command = scsi_debug_process_reserved_command,
.change_queue_depth = sdebug_change_qdepth,
.map_queues = sdebug_map_queues,
.mq_poll = sdebug_blk_mq_poll,
@@ -9448,6 +9551,7 @@ static const struct scsi_host_template sdebug_driver_template = {
.eh_bus_reset_handler = scsi_debug_bus_reset,
.eh_host_reset_handler = scsi_debug_host_reset,
.can_queue = SDEBUG_CANQUEUE,
+ .nr_reserved_cmds = 1,
.this_id = 7,
.sg_tablesize = SG_MAX_SEGMENTS,
.cmd_per_lun = DEF_CMD_PER_LUN,
@@ -9456,7 +9560,7 @@ static const struct scsi_host_template sdebug_driver_template = {
.module = THIS_MODULE,
.skip_settle_delay = 1,
.track_queue_depth = 1,
- .cmd_size = sizeof(struct sdebug_scsi_cmd),
+ .cmd_size = sizeof(union sdebug_priv),
.init_cmd_priv = sdebug_init_cmd_priv,
.target_alloc = sdebug_target_alloc,
.target_destroy = sdebug_target_destroy,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1c13812a3f03..f869108fd969 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -749,6 +749,9 @@ static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
const struct scsi_host_template *sht = sdev->host->hostt;
struct scsi_device *tmp_sdev;
+ if (!sdev->budget_map.map)
+ return;
+
if (!sht->track_queue_depth ||
sdev->queue_depth >= sdev->max_queue_depth)
return;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d7e42293b864..51ad2ad07e43 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -396,7 +396,8 @@ void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
if (starget->can_queue > 0)
atomic_dec(&starget->target_busy);
- sbitmap_put(&sdev->budget_map, cmd->budget_token);
+ if (sdev->budget_map.map)
+ sbitmap_put(&sdev->budget_map, cmd->budget_token);
cmd->budget_token = -1;
}
@@ -1360,6 +1361,9 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
{
int token;
+ if (!sdev->budget_map.map)
+ return INT_MAX;
+
token = sbitmap_get(&sdev->budget_map);
if (token < 0)
return -1;
@@ -1530,6 +1534,14 @@ static void scsi_complete(struct request *rq)
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
enum scsi_disposition disposition;
+ if (blk_mq_is_reserved_rq(rq)) {
+ /* Only pass-through requests are supported in this code path. */
+ WARN_ON_ONCE(!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)));
+ scsi_mq_uninit_cmd(cmd);
+ __blk_mq_end_request(rq, scsi_result_to_blk_status(cmd->result));
+ return;
+ }
+
INIT_LIST_HEAD(&cmd->eh_entry);
atomic_inc(&cmd->device->iodone_cnt);
@@ -1749,7 +1761,8 @@ static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
{
struct scsi_device *sdev = q->queuedata;
- sbitmap_put(&sdev->budget_map, budget_token);
+ if (sdev->budget_map.map)
+ sbitmap_put(&sdev->budget_map, budget_token);
}
/*
@@ -1818,25 +1831,31 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
WARN_ON_ONCE(cmd->budget_token < 0);
/*
- * If the device is not in running state we will reject some or all
- * commands.
+ * Bypass the SCSI device, SCSI target and SCSI host checks for
+ * reserved commands.
*/
- if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
- ret = scsi_device_state_check(sdev, req);
- if (ret != BLK_STS_OK)
- goto out_put_budget;
- }
+ if (!blk_mq_is_reserved_rq(req)) {
+ /*
+ * If the device is not in running state we will reject some or
+ * all commands.
+ */
+ if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
+ ret = scsi_device_state_check(sdev, req);
+ if (ret != BLK_STS_OK)
+ goto out_put_budget;
+ }
- ret = BLK_STS_RESOURCE;
- if (!scsi_target_queue_ready(shost, sdev))
- goto out_put_budget;
- if (unlikely(scsi_host_in_recovery(shost))) {
- if (cmd->flags & SCMD_FAIL_IF_RECOVERING)
- ret = BLK_STS_OFFLINE;
- goto out_dec_target_busy;
+ ret = BLK_STS_RESOURCE;
+ if (!scsi_target_queue_ready(shost, sdev))
+ goto out_put_budget;
+ if (unlikely(scsi_host_in_recovery(shost))) {
+ if (cmd->flags & SCMD_FAIL_IF_RECOVERING)
+ ret = BLK_STS_OFFLINE;
+ goto out_dec_target_busy;
+ }
+ if (!scsi_host_queue_ready(q, shost, sdev, cmd))
+ goto out_dec_target_busy;
}
- if (!scsi_host_queue_ready(q, shost, sdev, cmd))
- goto out_dec_target_busy;
/*
* Only clear the driver-private command data if the LLD does not supply
@@ -1865,6 +1884,14 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->submitter = SUBMITTED_BY_BLOCK_LAYER;
blk_mq_start_request(req);
+ if (blk_mq_is_reserved_rq(req)) {
+ reason = shost->hostt->queue_reserved_command(shost, cmd);
+ if (reason) {
+ ret = BLK_STS_RESOURCE;
+ goto out_put_budget;
+ }
+ return BLK_STS_OK;
+ }
reason = scsi_dispatch_cmd(cmd);
if (reason) {
scsi_set_blocked(cmd, reason);
@@ -2083,7 +2110,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
tag_set->ops = &scsi_mq_ops_no_commit;
tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
tag_set->nr_maps = shost->nr_maps ? : 1;
- tag_set->queue_depth = shost->can_queue;
+ tag_set->queue_depth = shost->can_queue + shost->nr_reserved_cmds;
+ tag_set->reserved_tags = shost->nr_reserved_cmds;
tag_set->cmd_size = cmd_size;
tag_set->numa_node = dev_to_node(shost->dma_dev);
if (shost->hostt->tag_alloc_policy_rr)
@@ -2107,6 +2135,44 @@ void scsi_mq_free_tags(struct kref *kref)
}
/**
+ * scsi_get_internal_cmd() - Allocate an internal SCSI command.
+ * @sdev: SCSI device from which to allocate the command
+ * @data_direction: Data direction for the allocated command
+ * @flags: request allocation flags, e.g. BLK_MQ_REQ_RESERVED or
+ * BLK_MQ_REQ_NOWAIT.
+ *
+ * Allocates a SCSI command for internal LLDD use.
+ */
+struct scsi_cmnd *scsi_get_internal_cmd(struct scsi_device *sdev,
+ enum dma_data_direction data_direction,
+ blk_mq_req_flags_t flags)
+{
+ enum req_op op = data_direction == DMA_TO_DEVICE ? REQ_OP_DRV_OUT :
+ REQ_OP_DRV_IN;
+ struct scsi_cmnd *scmd;
+ struct request *rq;
+
+ rq = scsi_alloc_request(sdev->request_queue, op, flags);
+ if (IS_ERR(rq))
+ return NULL;
+ scmd = blk_mq_rq_to_pdu(rq);
+ scmd->device = sdev;
+
+ return scmd;
+}
+EXPORT_SYMBOL_GPL(scsi_get_internal_cmd);
+
+/**
+ * scsi_put_internal_cmd() - Free an internal SCSI command.
+ * @scmd: SCSI command to be freed
+ */
+void scsi_put_internal_cmd(struct scsi_cmnd *scmd)
+{
+ blk_mq_free_request(blk_mq_rq_from_pdu(scmd));
+}
+EXPORT_SYMBOL_GPL(scsi_put_internal_cmd);
+
+/**
* scsi_device_from_queue - return sdev associated with a request_queue
* @q: The request queue to return the sdev from
*
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index b02af340c2d3..3cd0d3074085 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -26,9 +26,9 @@ static void scsi_log_release_buffer(char *bufptr)
kfree(bufptr);
}
-static inline const char *scmd_name(const struct scsi_cmnd *scmd)
+static inline const char *scmd_name(struct scsi_cmnd *scmd)
{
- struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
+ const struct request *rq = scsi_cmd_to_rq(scmd);
if (!rq->q || !rq->q->disk)
return NULL;
@@ -80,8 +80,8 @@ void sdev_prefix_printk(const char *level, const struct scsi_device *sdev,
}
EXPORT_SYMBOL(sdev_prefix_printk);
-void scmd_printk(const char *level, const struct scsi_cmnd *scmd,
- const char *fmt, ...)
+void scmd_printk(const char *level, struct scsi_cmnd *scmd, const char *fmt,
+ ...)
{
va_list args;
char *logbuf;
@@ -94,7 +94,7 @@ void scmd_printk(const char *level, const struct scsi_cmnd *scmd,
if (!logbuf)
return;
off = sdev_format_header(logbuf, logbuf_len, scmd_name(scmd),
- scsi_cmd_to_rq((struct scsi_cmnd *)scmd)->tag);
+ scsi_cmd_to_rq(scmd)->tag);
if (off < logbuf_len) {
va_start(args, fmt);
off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args);
@@ -371,16 +371,15 @@ void __scsi_print_sense(const struct scsi_device *sdev, const char *name,
EXPORT_SYMBOL(__scsi_print_sense);
/* Normalize and print sense buffer in SCSI command */
-void scsi_print_sense(const struct scsi_cmnd *cmd)
+void scsi_print_sense(struct scsi_cmnd *cmd)
{
scsi_log_print_sense(cmd->device, scmd_name(cmd),
- scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag,
- cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
+ scsi_cmd_to_rq(cmd)->tag, cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE);
}
EXPORT_SYMBOL(scsi_print_sense);
-void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
- int disposition)
+void scsi_print_result(struct scsi_cmnd *cmd, const char *msg, int disposition)
{
char *logbuf;
size_t off, logbuf_len;
@@ -393,7 +392,7 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
return;
off = sdev_format_header(logbuf, logbuf_len, scmd_name(cmd),
- scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag);
+ scsi_cmd_to_rq(cmd)->tag);
if (off >= logbuf_len)
goto out_printk;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index d581613d87c7..2652fecbfe47 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -205,7 +205,6 @@ static int scsi_runtime_idle(struct device *dev)
/* Insert hooks here for targets, hosts, and transport classes */
if (scsi_is_sdev_device(dev)) {
- pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
return -EBUSY;
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 5b2b19f5e8ec..d07ec15d6c00 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -135,6 +135,7 @@ extern int scsi_complete_async_scans(void);
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
unsigned int, u64, enum scsi_scan_mode);
extern void scsi_forget_host(struct Scsi_Host *);
+struct scsi_device *scsi_get_pseudo_sdev(struct Scsi_Host *);
/* scsi_sysctl.c */
#ifdef CONFIG_SYSCTL
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 3c6e089e80c3..7acbfcfc2172 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -347,6 +347,11 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
kref_get(&sdev->host->tagset_refcnt);
sdev->request_queue = q;
+ scsi_sysfs_device_initialize(sdev);
+
+ if (scsi_device_is_pseudo_dev(sdev))
+ return sdev;
+
depth = sdev->host->cmd_per_lun ?: 1;
/*
@@ -363,8 +368,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
scsi_change_queue_depth(sdev, depth);
- scsi_sysfs_device_initialize(sdev);
-
if (shost->hostt->sdev_init) {
ret = shost->hostt->sdev_init(sdev);
if (ret) {
@@ -1068,6 +1071,11 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
transport_configure_device(&sdev->sdev_gendev);
+ sdev->sdev_bflags = *bflags;
+
+ if (scsi_device_is_pseudo_dev(sdev))
+ return SCSI_SCAN_LUN_PRESENT;
+
/*
* No need to freeze the queue as it isn't reachable to anyone else yet.
*/
@@ -1113,7 +1121,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->max_queue_depth = sdev->queue_depth;
WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
- sdev->sdev_bflags = *bflags;
/*
* Ok, the device is now all set up, we can
@@ -1212,6 +1219,12 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
if (!sdev)
goto out;
+ if (scsi_device_is_pseudo_dev(sdev)) {
+ if (bflagsp)
+ *bflagsp = BLIST_NOLUN;
+ return SCSI_SCAN_LUN_PRESENT;
+ }
+
result = kmalloc(result_len, GFP_KERNEL);
if (!result)
goto out_free_sdev;
@@ -2083,12 +2096,65 @@ void scsi_forget_host(struct Scsi_Host *shost)
restart:
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(sdev, &shost->__devices, siblings) {
- if (sdev->sdev_state == SDEV_DEL)
+ if (scsi_device_is_pseudo_dev(sdev) ||
+ sdev->sdev_state == SDEV_DEL)
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
__scsi_remove_device(sdev);
goto restart;
}
spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /*
+ * Remove the pseudo device last since it may be needed during removal
+ * of other SCSI devices.
+ */
+ if (shost->pseudo_sdev)
+ __scsi_remove_device(shost->pseudo_sdev);
}
+/**
+ * scsi_get_pseudo_sdev() - Attach a pseudo SCSI device to a SCSI host
+ * @shost: Host that needs a pseudo SCSI device
+ *
+ * Lock status: None assumed.
+ *
+ * Returns: The scsi_device or NULL
+ *
+ * Notes:
+ * Attach a single scsi_device to the Scsi_Host. The primary aim for this
+ * device is to serve as a container from which SCSI commands can be
+ * allocated. Each SCSI command will carry a command tag allocated by the
+ * block layer. These SCSI commands can be used by the LLDD to send
+ * internal or passthrough commands without having to manage tag allocation
+ * inside the LLDD.
+ */
+struct scsi_device *scsi_get_pseudo_sdev(struct Scsi_Host *shost)
+{
+ struct scsi_device *sdev = NULL;
+ struct scsi_target *starget;
+
+ guard(mutex)(&shost->scan_mutex);
+
+ if (!scsi_host_scan_allowed(shost))
+ goto out;
+
+ starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->max_id);
+ if (!starget)
+ goto out;
+
+ sdev = scsi_alloc_sdev(starget, U64_MAX, NULL);
+ if (!sdev) {
+ scsi_target_reap(starget);
+ goto put_target;
+ }
+
+ sdev->borken = 0;
+
+put_target:
+ /* See also the get_device(dev) call in scsi_alloc_target(). */
+ put_device(&starget->dev);
+
+out:
+ return sdev;
+}
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 15ba493d2138..99eb0a30df61 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -605,68 +605,6 @@ sdev_show_##field (struct device *dev, struct device_attribute *attr, \
sdev_show_function(field, format_string) \
static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL);
-
-/*
- * sdev_rw_attr: create a function and attribute variable for a
- * read/write field.
- */
-#define sdev_rw_attr(field, format_string) \
- sdev_show_function(field, format_string) \
- \
-static ssize_t \
-sdev_store_##field (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- struct scsi_device *sdev; \
- sdev = to_scsi_device(dev); \
- sscanf (buf, format_string, &sdev->field); \
- return count; \
-} \
-static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
-
-/* Currently we don't export bit fields, but we might in future,
- * so leave this code in */
-#if 0
-/*
- * sdev_rd_attr: create a function and attribute variable for a
- * read/write bit field.
- */
-#define sdev_rw_attr_bit(field) \
- sdev_show_function(field, "%d\n") \
- \
-static ssize_t \
-sdev_store_##field (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- int ret; \
- struct scsi_device *sdev; \
- ret = scsi_sdev_check_buf_bit(buf); \
- if (ret >= 0) { \
- sdev = to_scsi_device(dev); \
- sdev->field = ret; \
- ret = count; \
- } \
- return ret; \
-} \
-static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
-
-/*
- * scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1",
- * else return -EINVAL.
- */
-static int scsi_sdev_check_buf_bit(const char *buf)
-{
- if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) {
- if (buf[0] == '1')
- return 1;
- else if (buf[0] == '0')
- return 0;
- else
- return -EINVAL;
- } else
- return -EINVAL;
-}
-#endif
/*
* Create the actual show/store functions and data structures.
*/
@@ -710,10 +648,14 @@ static ssize_t
sdev_store_timeout (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct scsi_device *sdev;
- int timeout;
- sdev = to_scsi_device(dev);
- sscanf (buf, "%d\n", &timeout);
+ struct scsi_device *sdev = to_scsi_device(dev);
+ int ret, timeout;
+
+ ret = kstrtoint(buf, 0, &timeout);
+ if (ret)
+ return ret;
+ if (timeout <= 0)
+ return -EINVAL;
blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
return count;
}
@@ -1406,6 +1348,9 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
int error;
struct scsi_target *starget = sdev->sdev_target;
+ if (WARN_ON_ONCE(scsi_device_is_pseudo_dev(sdev)))
+ return -EINVAL;
+
error = scsi_target_add(starget);
if (error)
return error;
@@ -1513,7 +1458,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
cancel_work_sync(&sdev->requeue_work);
- if (sdev->host->hostt->sdev_destroy)
+ if (!scsi_device_is_pseudo_dev(sdev) && sdev->host->hostt->sdev_destroy)
sdev->host->hostt->sdev_destroy(sdev);
transport_destroy_device(dev);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 3a821afee9bc..987befb02408 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -441,7 +441,8 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
fc_host->next_vport_number = 0;
fc_host->npiv_vports_inuse = 0;
- fc_host->work_q = alloc_workqueue("fc_wq_%d", 0, 0, shost->host_no);
+ fc_host->work_q = alloc_workqueue("fc_wq_%d", WQ_PERCPU, 0,
+ shost->host_no);
if (!fc_host->work_q)
return -ENOMEM;
@@ -3088,7 +3089,7 @@ fc_remote_port_create(struct Scsi_Host *shost, int channel,
spin_unlock_irqrestore(shost->host_lock, flags);
- rport->devloss_work_q = alloc_workqueue("fc_dl_%d_%d", 0, 0,
+ rport->devloss_work_q = alloc_workqueue("fc_dl_%d_%d", WQ_PERCPU, 0,
shost->host_no, rport->number);
if (!rport->devloss_work_q) {
printk(KERN_ERR "FC Remote Port alloc_workqueue failed\n");
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 743b4c792ceb..ed21c032bbc4 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -3961,7 +3961,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
list_del_init(&session->sess_list);
spin_unlock_irqrestore(&sesslock, flags);
- queue_work(system_unbound_wq, &session->destroy_work);
+ queue_work(system_dfl_wq, &session->destroy_work);
}
break;
case ISCSI_UEVENT_UNBIND_SESSION:
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0252d3f6bed1..f2c0744b4480 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -318,6 +318,35 @@ static ssize_t manage_shutdown_store(struct device *dev,
}
static DEVICE_ATTR_RW(manage_shutdown);
+static ssize_t manage_restart_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ return sysfs_emit(buf, "%u\n", sdp->manage_restart);
+}
+
+static ssize_t manage_restart_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+ bool v;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdp->manage_restart = v;
+
+ return count;
+}
+static DEVICE_ATTR_RW(manage_restart);
+
static ssize_t
allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -654,6 +683,7 @@ static struct attribute *sd_disk_attrs[] = {
&dev_attr_manage_system_start_stop.attr,
&dev_attr_manage_runtime_start_stop.attr,
&dev_attr_manage_shutdown.attr,
+ &dev_attr_manage_restart.attr,
&dev_attr_protection_type.attr,
&dev_attr_protection_mode.attr,
&dev_attr_app_tag_own.attr,
@@ -4177,7 +4207,9 @@ static void sd_shutdown(struct device *dev)
(system_state == SYSTEM_POWER_OFF &&
sdkp->device->manage_shutdown) ||
(system_state == SYSTEM_RUNNING &&
- sdkp->device->manage_runtime_start_stop)) {
+ sdkp->device->manage_runtime_start_stop) ||
+ (system_state == SYSTEM_RESTART &&
+ sdkp->device->manage_restart)) {
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
sd_start_stop_device(sdkp, 0);
}
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index e519df68d603..70c75ab1453a 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -133,6 +133,7 @@ static int sim710_probe_common(struct device *dev, unsigned long base_addr,
out_put_host:
scsi_host_put(host);
out_release:
+ ioport_unmap(hostdata->base);
release_region(base_addr, 64);
out_free:
kfree(hostdata);
@@ -148,6 +149,7 @@ static int sim710_device_remove(struct device *dev)
scsi_remove_host(host);
NCR_700_release(host);
+ ioport_unmap(hostdata->base);
kfree(hostdata);
free_irq(host->irq, host);
release_region(host->base, 64);
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 03c97e60d36f..fe549e2b7c94 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -34,11 +34,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.34-035"
+#define DRIVER_VERSION "2.1.36-026"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 34
-#define DRIVER_REVISION 35
+#define DRIVER_RELEASE 36
+#define DRIVER_REVISION 26
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -5555,14 +5555,25 @@ static void pqi_raid_io_complete(struct pqi_io_request *io_request,
pqi_scsi_done(scmd);
}
+/*
+ * Adjust the timeout value for physical devices sent to the firmware
+ * by subtracting 3 seconds for timeouts greater than or equal to 8 seconds.
+ *
+ * This provides the firmware with additional time to attempt early recovery
+ * before the OS-level timeout occurs.
+ */
+#define ADJUST_SECS_TIMEOUT_VALUE(tv) (((tv) >= 8) ? ((tv) - 3) : (tv))
+
static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group, bool io_high_prio)
{
int rc;
+ u32 timeout;
size_t cdb_length;
struct pqi_io_request *io_request;
struct pqi_raid_path_request *request;
+ struct request *rq;
io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request)
@@ -5634,6 +5645,12 @@ static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
return SCSI_MLQUEUE_HOST_BUSY;
}
+ if (device->is_physical_device) {
+ rq = scsi_cmd_to_rq(scmd);
+ timeout = rq->timeout / HZ;
+ put_unaligned_le32(ADJUST_SECS_TIMEOUT_VALUE(timeout), &request->timeout);
+ }
+
pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
return 0;
@@ -6410,10 +6427,22 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev
static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode)
{
+ unsigned long flags;
int rc;
mutex_lock(&ctrl_info->lun_reset_mutex);
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ if (pqi_find_scsi_dev(ctrl_info, device->bus, device->target, device->lun) == NULL) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "skipping reset of scsi %d:%d:%d:%u, device has been removed\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
+ return 0;
+ }
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
dev_err(&ctrl_info->pci_dev->dev,
"resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n",
ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode);
@@ -6594,7 +6623,9 @@ static void pqi_sdev_destroy(struct scsi_device *sdev)
{
struct pqi_ctrl_info *ctrl_info;
struct pqi_scsi_dev *device;
+ struct pqi_tmf_work *tmf_work;
int mutex_acquired;
+ unsigned int lun;
unsigned long flags;
ctrl_info = shost_to_hba(sdev->host);
@@ -6621,8 +6652,13 @@ static void pqi_sdev_destroy(struct scsi_device *sdev)
mutex_unlock(&ctrl_info->scan_mutex);
+ for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++)
+ cancel_work_sync(&tmf_work->work_struct);
+
+ mutex_lock(&ctrl_info->lun_reset_mutex);
pqi_dev_info(ctrl_info, "removed", device);
pqi_free_device(device);
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
}
static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
@@ -8936,7 +8972,8 @@ static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info,
if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS)
goto out;
- host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL);
+ host_memory_descriptor->host_chunk_virt_address =
+ kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
if (!host_memory_descriptor->host_chunk_virt_address)
goto out;
@@ -10110,6 +10147,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4840)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADVANTECH, 0x8312)
},
{
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 74a6830b7ed8..168f25e4aaa3 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3526,8 +3526,64 @@ static int partition_tape(struct scsi_tape *STp, int size)
out:
return result;
}
-
+/*
+ * Handles any extra state needed for ioctls which are not st-specific.
+ * Called with the scsi_tape lock held, released before return
+ */
+static long st_common_ioctl(struct scsi_tape *STp, struct st_modedef *STm,
+ struct file *file, unsigned int cmd_in,
+ unsigned long arg)
+{
+ int i, retval = 0;
+
+ if (!STm->defined) {
+ retval = -ENXIO;
+ goto out;
+ }
+
+ switch (cmd_in) {
+ case SCSI_IOCTL_GET_IDLUN:
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ case SCSI_IOCTL_GET_PCI:
+ break;
+ case SG_IO:
+ case SCSI_IOCTL_SEND_COMMAND:
+ case CDROM_SEND_PACKET:
+ if (!capable(CAP_SYS_RAWIO)) {
+ retval = -EPERM;
+ goto out;
+ }
+ fallthrough;
+ default:
+ if ((i = flush_buffer(STp, 0)) < 0) {
+ retval = i;
+ goto out;
+ } else { /* flush_buffer succeeds */
+ if (STp->can_partitions) {
+ i = switch_partition(STp);
+ if (i < 0) {
+ retval = i;
+ goto out;
+ }
+ }
+ }
+ }
+ mutex_unlock(&STp->lock);
+
+ retval = scsi_ioctl(STp->device, file->f_mode & FMODE_WRITE,
+ cmd_in, (void __user *)arg);
+ if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) {
+ /* unload */
+ STp->rew_at_close = 0;
+ STp->ready = ST_NO_TAPE;
+ }
+
+ return retval;
+out:
+ mutex_unlock(&STp->lock);
+ return retval;
+}
/* The ioctl command */
static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
@@ -3565,6 +3621,15 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
if (retval)
goto out;
+ switch (cmd_in) {
+ case MTIOCPOS:
+ case MTIOCGET:
+ case MTIOCTOP:
+ break;
+ default:
+ return st_common_ioctl(STp, STm, file, cmd_in, arg);
+ }
+
cmd_type = _IOC_TYPE(cmd_in);
cmd_nr = _IOC_NR(cmd_in);
@@ -3876,29 +3941,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
}
mt_pos.mt_blkno = blk;
retval = put_user_mtpos(p, &mt_pos);
- goto out;
- }
- mutex_unlock(&STp->lock);
-
- switch (cmd_in) {
- case SG_IO:
- case SCSI_IOCTL_SEND_COMMAND:
- case CDROM_SEND_PACKET:
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
- break;
- default:
- break;
}
-
- retval = scsi_ioctl(STp->device, file->f_mode & FMODE_WRITE, cmd_in, p);
- if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) {
- /* unload */
- STp->rew_at_close = 0;
- STp->ready = ST_NO_TAPE;
- }
- return retval;
-
out:
mutex_unlock(&STp->lock);
return retval;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index e6357bc301cb..93c223e0a777 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1844,6 +1844,7 @@ out_release_regions:
out_scsi_host_put:
scsi_host_put(host);
out_disable:
+ unregister_reboot_notifier(&stex_notifier);
pci_disable_device(pdev);
return err;
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index 7a73f5e4a1fc..f02e12dfa5f6 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -569,7 +569,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
EXPORT_SYMBOL_GPL(clk_round_rate);
#ifdef CONFIG_PM
-static void clks_core_resume(void)
+static void clks_core_resume(void *data)
{
struct clk *clkp;
@@ -588,13 +588,17 @@ static void clks_core_resume(void)
}
}
-static struct syscore_ops clks_syscore_ops = {
+static const struct syscore_ops clks_syscore_ops = {
.resume = clks_core_resume,
};
+static struct syscore clks_syscore = {
+ .ops = &clks_syscore_ops,
+};
+
static int __init clk_syscore_init(void)
{
- register_syscore_ops(&clks_syscore_ops);
+ register_syscore(&clks_syscore);
return 0;
}
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index ea571eeb3078..3dde703b7766 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -394,7 +394,7 @@ err0:
return -ENOMEM;
}
-static int intc_suspend(void)
+static int intc_suspend(void *data)
{
struct intc_desc_int *d;
@@ -420,7 +420,7 @@ static int intc_suspend(void)
return 0;
}
-static void intc_resume(void)
+static void intc_resume(void *data)
{
struct intc_desc_int *d;
@@ -450,11 +450,15 @@ static void intc_resume(void)
}
}
-struct syscore_ops intc_syscore_ops = {
+static const struct syscore_ops intc_syscore_ops = {
.suspend = intc_suspend,
.resume = intc_resume,
};
+static struct syscore intc_syscore = {
+ .ops = &intc_syscore_ops,
+};
+
const struct bus_type intc_subsys = {
.name = "intc",
.dev_name = "intc",
@@ -477,7 +481,7 @@ static int __init register_intc_devs(void)
struct intc_desc_int *d;
int error;
- register_syscore_ops(&intc_syscore_ops);
+ register_syscore(&intc_syscore);
error = subsys_system_register(&intc_subsys, NULL);
if (!error) {
diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c
index b6e06c4d2117..79681afea8c6 100644
--- a/drivers/soc/amlogic/meson-canvas.c
+++ b/drivers/soc/amlogic/meson-canvas.c
@@ -60,12 +60,9 @@ struct meson_canvas *meson_canvas_get(struct device *dev)
return ERR_PTR(-ENODEV);
canvas_pdev = of_find_device_by_node(canvas_node);
- if (!canvas_pdev) {
- of_node_put(canvas_node);
- return ERR_PTR(-EPROBE_DEFER);
- }
-
of_node_put(canvas_node);
+ if (!canvas_pdev)
+ return ERR_PTR(-EPROBE_DEFER);
/*
* If priv is NULL, it's probably because the canvas hasn't
@@ -73,10 +70,9 @@ struct meson_canvas *meson_canvas_get(struct device *dev)
* current state, this driver probe cannot return -EPROBE_DEFER
*/
canvas = dev_get_drvdata(&canvas_pdev->dev);
- if (!canvas) {
- put_device(&canvas_pdev->dev);
+ put_device(&canvas_pdev->dev);
+ if (!canvas)
return ERR_PTR(-EINVAL);
- }
return canvas;
}
diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
index 7549f1644e5e..2a54ca43cd13 100644
--- a/drivers/soc/amlogic/meson-gx-socinfo.c
+++ b/drivers/soc/amlogic/meson-gx-socinfo.c
@@ -46,6 +46,9 @@ static const struct meson_gx_soc_id {
{ "A5", 0x3c },
{ "C3", 0x3d },
{ "A4", 0x40 },
+ { "S7", 0x46 },
+ { "S7D", 0x47 },
+ { "S6", 0x48 },
};
static const struct meson_gx_package_id {
@@ -86,6 +89,9 @@ static const struct meson_gx_package_id {
{ "A311D2", 0x36, 0x1, 0xf },
{ "A113X2", 0x3c, 0x1, 0xf },
{ "A113L2", 0x40, 0x1, 0xf },
+ { "S805X3", 0x46, 0x3, 0xf },
+ { "S905X5M", 0x47, 0x1, 0xf },
+ { "S905X5", 0x48, 0x1, 0xf },
};
static inline unsigned int socinfo_to_major(u32 socinfo)
diff --git a/drivers/soc/apple/mailbox.c b/drivers/soc/apple/mailbox.c
index 8f29108dc69a..5c48455185c9 100644
--- a/drivers/soc/apple/mailbox.c
+++ b/drivers/soc/apple/mailbox.c
@@ -302,11 +302,18 @@ struct apple_mbox *apple_mbox_get(struct device *dev, int index)
return ERR_PTR(-EPROBE_DEFER);
mbox = platform_get_drvdata(pdev);
- if (!mbox)
- return ERR_PTR(-EPROBE_DEFER);
+ if (!mbox) {
+ mbox = ERR_PTR(-EPROBE_DEFER);
+ goto out_put_pdev;
+ }
+
+ if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_CONSUMER)) {
+ mbox = ERR_PTR(-ENODEV);
+ goto out_put_pdev;
+ }
- if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_CONSUMER))
- return ERR_PTR(-ENODEV);
+out_put_pdev:
+ put_device(&pdev->dev);
return mbox;
}
diff --git a/drivers/soc/apple/sart.c b/drivers/soc/apple/sart.c
index 4ff1942b82a7..9eaf3febb382 100644
--- a/drivers/soc/apple/sart.c
+++ b/drivers/soc/apple/sart.c
@@ -214,17 +214,11 @@ static int apple_sart_probe(struct platform_device *pdev)
return 0;
}
-static void apple_sart_put_device(void *dev)
-{
- put_device(dev);
-}
-
struct apple_sart *devm_apple_sart_get(struct device *dev)
{
struct device_node *sart_node;
struct platform_device *sart_pdev;
struct apple_sart *sart;
- int ret;
sart_node = of_parse_phandle(dev->of_node, "apple,sart", 0);
if (!sart_node)
@@ -242,14 +236,11 @@ struct apple_sart *devm_apple_sart_get(struct device *dev)
return ERR_PTR(-EPROBE_DEFER);
}
- ret = devm_add_action_or_reset(dev, apple_sart_put_device,
- &sart_pdev->dev);
- if (ret)
- return ERR_PTR(ret);
-
device_link_add(dev, &sart_pdev->dev,
DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
+ put_device(&sart_pdev->dev);
+
return sart;
}
EXPORT_SYMBOL_GPL(devm_apple_sart_get);
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
index 364ddbe365c2..bd830649b60d 100644
--- a/drivers/soc/bcm/brcmstb/biuctrl.c
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -298,7 +298,7 @@ out:
#ifdef CONFIG_PM_SLEEP
static u32 cpubiuctrl_reg_save[NUM_CPU_BIUCTRL_REGS];
-static int brcmstb_cpu_credit_reg_suspend(void)
+static int brcmstb_cpu_credit_reg_suspend(void *data)
{
unsigned int i;
@@ -311,7 +311,7 @@ static int brcmstb_cpu_credit_reg_suspend(void)
return 0;
}
-static void brcmstb_cpu_credit_reg_resume(void)
+static void brcmstb_cpu_credit_reg_resume(void *data)
{
unsigned int i;
@@ -322,10 +322,14 @@ static void brcmstb_cpu_credit_reg_resume(void)
cbc_writel(cpubiuctrl_reg_save[i], i);
}
-static struct syscore_ops brcmstb_cpu_credit_syscore_ops = {
+static const struct syscore_ops brcmstb_cpu_credit_syscore_ops = {
.suspend = brcmstb_cpu_credit_reg_suspend,
.resume = brcmstb_cpu_credit_reg_resume,
};
+
+static struct syscore brcmstb_cpu_credit_syscore = {
+ .ops = &brcmstb_cpu_credit_syscore_ops,
+};
#endif
@@ -354,7 +358,7 @@ static int __init brcmstb_biuctrl_init(void)
a72_b53_rac_enable_all(np);
mcp_a72_b53_set();
#ifdef CONFIG_PM_SLEEP
- register_syscore_ops(&brcmstb_cpu_credit_syscore_ops);
+ register_syscore(&brcmstb_cpu_credit_syscore);
#endif
ret = 0;
out_put:
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 9be240999f87..6b392b3ad4b1 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1073,7 +1073,7 @@ EXPORT_SYMBOL(qman_portal_set_iperiod);
int qman_wq_alloc(void)
{
- qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
+ qm_portal_wq = alloc_workqueue("qman_portal_wq", WQ_PERCPU, 1);
if (!qm_portal_wq)
return -ENOMEM;
return 0;
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
index 6f7597950aa3..6009e8b32c44 100644
--- a/drivers/soc/fsl/qbman/qman_test_stash.c
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -219,7 +219,7 @@ static int allocate_frame_data(void)
pcfg = qman_get_qm_portal_config(qman_dma_portal);
- __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
+ __frame_ptr = kmalloc_array(4, HP_NUM_WORDS, GFP_KERNEL);
if (!__frame_ptr)
return -ENOMEM;
diff --git a/drivers/soc/mediatek/mtk-socinfo.c b/drivers/soc/mediatek/mtk-socinfo.c
index c697a0398d91..978c43e9115a 100644
--- a/drivers/soc/mediatek/mtk-socinfo.c
+++ b/drivers/soc/mediatek/mtk-socinfo.c
@@ -50,6 +50,8 @@ static struct socinfo_data socinfo_data_table[] = {
MTK_SOCINFO_ENTRY("MT8186T", "MT8186TV/AZA", "Kompanio 528", 0x81862001, CELL_NOT_USED),
MTK_SOCINFO_ENTRY("MT8188", "MT8188GV/AZA", "Kompanio 838", 0x81880000, 0x00000010),
MTK_SOCINFO_ENTRY("MT8188", "MT8188GV/HZA", "Kompanio 838", 0x81880000, 0x00000011),
+ MTK_SOCINFO_ENTRY("MT8189", "MT8189GV/AZA", "Kompanio 540", 0x81890000, 0x00000020),
+ MTK_SOCINFO_ENTRY("MT8189", "MT8189HV/AZA", "Kompanio 540", 0x81890000, 0x00000021),
MTK_SOCINFO_ENTRY("MT8192", "MT8192V/AZA", "Kompanio 820", 0x00001100, 0x00040080),
MTK_SOCINFO_ENTRY("MT8192T", "MT8192V/ATZA", "Kompanio 828", 0x00000100, 0x000400C0),
MTK_SOCINFO_ENTRY("MT8195", "MT8195GV/EZA", "Kompanio 1200", 0x81950300, CELL_NOT_USED),
@@ -58,6 +60,7 @@ static struct socinfo_data socinfo_data_table[] = {
MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EHZA", "Kompanio 1380", 0x81950404, CELL_NOT_USED),
MTK_SOCINFO_ENTRY("MT8370", "MT8370AV/AZA", "Genio 510", 0x83700000, 0x00000081),
MTK_SOCINFO_ENTRY("MT8390", "MT8390AV/AZA", "Genio 700", 0x83900000, 0x00000080),
+ MTK_SOCINFO_ENTRY("MT8391", "MT8391AV/AZA", "Genio 720", 0x83910000, 0x00000080),
MTK_SOCINFO_ENTRY("MT8395", "MT8395AV/ZA", "Genio 1200", 0x83950100, CELL_NOT_USED),
MTK_SOCINFO_ENTRY("MT8395", "MT8395AV/ZA", "Genio 1200", 0x83950800, CELL_NOT_USED),
};
diff --git a/drivers/soc/microchip/Kconfig b/drivers/soc/microchip/Kconfig
index 19f4b576f822..bcf554602561 100644
--- a/drivers/soc/microchip/Kconfig
+++ b/drivers/soc/microchip/Kconfig
@@ -9,3 +9,15 @@ config POLARFIRE_SOC_SYS_CTRL
module will be called mpfs_system_controller.
If unsure, say N.
+
+config POLARFIRE_SOC_SYSCONS
+ bool "PolarFire SoC (MPFS) syscon drivers"
+ default y
+ depends on ARCH_MICROCHIP
+ select MFD_CORE
+ help
+ These drivers add support for the syscons on PolarFire SoC (MPFS).
+ Without these drivers core parts of the kernel such as clocks
+ and resets will not function correctly.
+
+ If unsure, and on a PolarFire SoC, say y.
diff --git a/drivers/soc/microchip/Makefile b/drivers/soc/microchip/Makefile
index 14489919fe4b..1a3a1594b089 100644
--- a/drivers/soc/microchip/Makefile
+++ b/drivers/soc/microchip/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_POLARFIRE_SOC_SYS_CTRL) += mpfs-sys-controller.o
+obj-$(CONFIG_POLARFIRE_SOC_SYSCONS) += mpfs-control-scb.o mpfs-mss-top-sysreg.o
diff --git a/drivers/soc/microchip/mpfs-control-scb.c b/drivers/soc/microchip/mpfs-control-scb.c
new file mode 100644
index 000000000000..f0b84b1f49cb
--- /dev/null
+++ b/drivers/soc/microchip/mpfs-control-scb.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/array_size.h>
+#include <linux/of.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+
+static const struct mfd_cell mpfs_control_scb_devs[] = {
+ MFD_CELL_NAME("mpfs-tvs"),
+};
+
+static int mpfs_control_scb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ return mfd_add_devices(dev, PLATFORM_DEVID_NONE, mpfs_control_scb_devs,
+ ARRAY_SIZE(mpfs_control_scb_devs), NULL, 0, NULL);
+}
+
+static const struct of_device_id mpfs_control_scb_of_match[] = {
+ { .compatible = "microchip,mpfs-control-scb", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpfs_control_scb_of_match);
+
+static struct platform_driver mpfs_control_scb_driver = {
+ .driver = {
+ .name = "mpfs-control-scb",
+ .of_match_table = mpfs_control_scb_of_match,
+ },
+ .probe = mpfs_control_scb_probe,
+};
+module_platform_driver(mpfs_control_scb_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_DESCRIPTION("PolarFire SoC control scb driver");
diff --git a/drivers/soc/microchip/mpfs-mss-top-sysreg.c b/drivers/soc/microchip/mpfs-mss-top-sysreg.c
new file mode 100644
index 000000000000..b2244e44ff0f
--- /dev/null
+++ b/drivers/soc/microchip/mpfs-mss-top-sysreg.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/array_size.h>
+#include <linux/of.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+static const struct mfd_cell mpfs_mss_top_sysreg_devs[] = {
+ MFD_CELL_NAME("mpfs-reset"),
+};
+
+static int mpfs_mss_top_sysreg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = mfd_add_devices(dev, PLATFORM_DEVID_NONE, mpfs_mss_top_sysreg_devs,
+ ARRAY_SIZE(mpfs_mss_top_sysreg_devs) , NULL, 0, NULL);
+ if (ret)
+ return ret;
+
+ return devm_of_platform_populate(dev);
+}
+
+static const struct of_device_id mpfs_mss_top_sysreg_of_match[] = {
+ { .compatible = "microchip,mpfs-mss-top-sysreg", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpfs_mss_top_sysreg_of_match);
+
+static struct platform_driver mpfs_mss_top_sysreg_driver = {
+ .driver = {
+ .name = "mpfs-mss-top-sysreg",
+ .of_match_table = mpfs_mss_top_sysreg_of_match,
+ },
+ .probe = mpfs_mss_top_sysreg_probe,
+};
+module_platform_driver(mpfs_mss_top_sysreg_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_DESCRIPTION("PolarFire SoC mss top sysreg driver");
diff --git a/drivers/soc/qcom/ice.c b/drivers/soc/qcom/ice.c
index c467b55b4174..b203bc685cad 100644
--- a/drivers/soc/qcom/ice.c
+++ b/drivers/soc/qcom/ice.c
@@ -22,7 +22,18 @@
#include <soc/qcom/ice.h>
#define AES_256_XTS_KEY_SIZE 64 /* for raw keys only */
-#define QCOM_ICE_HWKM_WRAPPED_KEY_SIZE 100 /* assuming HWKM v2 */
+
+#define QCOM_ICE_HWKM_V1 1 /* HWKM version 1 */
+#define QCOM_ICE_HWKM_V2 2 /* HWKM version 2 */
+
+#define QCOM_ICE_HWKM_MAX_WRAPPED_KEY_SIZE 100 /* Maximum HWKM wrapped key size */
+
+/*
+ * Wrapped key size depends upon HWKM version:
+ * HWKM version 1 supports 68 bytes
+ * HWKM version 2 supports 100 bytes
+ */
+#define QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(v) ((v) == QCOM_ICE_HWKM_V1 ? 68 : 100)
/* QCOM ICE registers */
@@ -62,13 +73,15 @@ union crypto_cfg {
#define QCOM_ICE_REG_HWKM_TZ_KM_CTL (HWKM_OFFSET + 0x1000)
#define QCOM_ICE_HWKM_DISABLE_CRC_CHECKS_VAL (BIT(1) | BIT(2))
+/* In HWKM v1 the ICE legacy mode is controlled from HWKM register space */
+#define QCOM_ICE_HWKM_ICE_LEGACY_MODE_ENABLED BIT(5)
#define QCOM_ICE_REG_HWKM_TZ_KM_STATUS (HWKM_OFFSET + 0x1004)
#define QCOM_ICE_HWKM_KT_CLEAR_DONE BIT(0)
#define QCOM_ICE_HWKM_BOOT_CMD_LIST0_DONE BIT(1)
#define QCOM_ICE_HWKM_BOOT_CMD_LIST1_DONE BIT(2)
-#define QCOM_ICE_HWKM_CRYPTO_BIST_DONE_V2 BIT(7)
-#define QCOM_ICE_HWKM_BIST_DONE_V2 BIT(9)
+#define QCOM_ICE_HWKM_CRYPTO_BIST_DONE(v) (((v) == QCOM_ICE_HWKM_V1) ? BIT(14) : BIT(7))
+#define QCOM_ICE_HWKM_BIST_DONE(v) (((v) == QCOM_ICE_HWKM_V1) ? BIT(16) : BIT(9))
#define QCOM_ICE_REG_HWKM_BANK0_BANKN_IRQ_STATUS (HWKM_OFFSET + 0x2008)
#define QCOM_ICE_HWKM_RSP_FIFO_CLEAR_VAL BIT(3)
@@ -97,6 +110,7 @@ struct qcom_ice {
struct clk *core_clk;
bool use_hwkm;
bool hwkm_init_complete;
+ u8 hwkm_version;
};
static bool qcom_ice_check_supported(struct qcom_ice *ice)
@@ -114,9 +128,24 @@ static bool qcom_ice_check_supported(struct qcom_ice *ice)
return false;
}
+ /* HWKM version v2 is present from ICE 3.2.1 onwards while version v1
+ * is present only in ICE 3.2.0. Earlier ICE version don't have HWKM.
+ */
+ if (major > 3 ||
+ (major == 3 && (minor >= 3 || (minor == 2 && step >= 1))))
+ ice->hwkm_version = QCOM_ICE_HWKM_V2;
+ else if ((major == 3) && (minor == 2))
+ ice->hwkm_version = QCOM_ICE_HWKM_V1;
+ else
+ ice->hwkm_version = 0;
+
dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n",
major, minor, step);
+ if (ice->hwkm_version)
+ dev_info(dev, "QC Hardware Key Manager (HWKM) version v%d\n",
+ ice->hwkm_version);
+
/* If fuses are blown, ICE might not work in the standard way. */
regval = qcom_ice_readl(ice, QCOM_ICE_REG_FUSE_SETTING);
if (regval & (QCOM_ICE_FUSE_SETTING_MASK |
@@ -131,19 +160,18 @@ static bool qcom_ice_check_supported(struct qcom_ice *ice)
* v3.2.1 and later have HWKM v2. ICE v3.2.0 has HWKM v1. Earlier ICE
* versions don't have HWKM at all. However, for HWKM to be fully
* usable by Linux, the TrustZone software also needs to support certain
- * SCM calls including the ones to generate and prepare keys. That
- * effectively makes the earliest supported SoC be SM8650, which has
- * HWKM v2. Therefore, this driver doesn't include support for HWKM v1,
- * and it checks for the SCM call support before it decides to use HWKM.
+ * SCM calls including the ones to generate and prepare keys. Support
+ * for these SCM calls is present for SoCs with HWKM v2 and is being
+ * added for SoCs with HWKM v1 as well but not every SoC with HWKM v1
+ * currently supports this. So, this driver checks for the SCM call
+ * support before it decides to use HWKM.
*
* Also, since HWKM and legacy mode are mutually exclusive, and
* ICE-capable storage driver(s) need to know early on whether to
* advertise support for raw keys or wrapped keys, HWKM cannot be used
* unconditionally. A module parameter is used to opt into using it.
*/
- if ((major >= 4 ||
- (major == 3 && (minor >= 3 || (minor == 2 && step >= 1)))) &&
- qcom_scm_has_wrapped_key_support()) {
+ if (ice->hwkm_version && qcom_scm_has_wrapped_key_support()) {
if (qcom_ice_use_wrapped_keys) {
dev_info(dev, "Using HWKM. Supporting wrapped keys only.\n");
ice->use_hwkm = true;
@@ -212,8 +240,8 @@ static int qcom_ice_wait_bist_status(struct qcom_ice *ice)
(QCOM_ICE_HWKM_KT_CLEAR_DONE |
QCOM_ICE_HWKM_BOOT_CMD_LIST0_DONE |
QCOM_ICE_HWKM_BOOT_CMD_LIST1_DONE |
- QCOM_ICE_HWKM_CRYPTO_BIST_DONE_V2 |
- QCOM_ICE_HWKM_BIST_DONE_V2)) {
+ QCOM_ICE_HWKM_CRYPTO_BIST_DONE(ice->hwkm_version) |
+ QCOM_ICE_HWKM_BIST_DONE(ice->hwkm_version))) {
dev_err(ice->dev, "HWKM self-test error!\n");
/*
* Too late to revoke use_hwkm here, as it was already
@@ -230,7 +258,7 @@ static void qcom_ice_hwkm_init(struct qcom_ice *ice)
if (!ice->use_hwkm)
return;
- BUILD_BUG_ON(QCOM_ICE_HWKM_WRAPPED_KEY_SIZE >
+ BUILD_BUG_ON(QCOM_ICE_HWKM_MAX_WRAPPED_KEY_SIZE >
BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE);
/*
* When ICE is in HWKM mode, it only supports wrapped keys.
@@ -238,9 +266,15 @@ static void qcom_ice_hwkm_init(struct qcom_ice *ice)
*
* Put ICE in HWKM mode. ICE defaults to legacy mode.
*/
- regval = qcom_ice_readl(ice, QCOM_ICE_REG_CONTROL);
- regval &= ~QCOM_ICE_LEGACY_MODE_ENABLED;
- qcom_ice_writel(ice, regval, QCOM_ICE_REG_CONTROL);
+ if (ice->hwkm_version == QCOM_ICE_HWKM_V2) {
+ regval = qcom_ice_readl(ice, QCOM_ICE_REG_CONTROL);
+ regval &= ~QCOM_ICE_LEGACY_MODE_ENABLED;
+ qcom_ice_writel(ice, regval, QCOM_ICE_REG_CONTROL);
+ } else if (ice->hwkm_version == QCOM_ICE_HWKM_V1) {
+ regval = qcom_ice_readl(ice, QCOM_ICE_REG_HWKM_TZ_KM_CTL);
+ regval &= ~QCOM_ICE_HWKM_ICE_LEGACY_MODE_ENABLED;
+ qcom_ice_writel(ice, regval, QCOM_ICE_REG_HWKM_TZ_KM_CTL);
+ }
/* Disable CRC checks. This HWKM feature is not used. */
qcom_ice_writel(ice, QCOM_ICE_HWKM_DISABLE_CRC_CHECKS_VAL,
@@ -298,7 +332,7 @@ EXPORT_SYMBOL_GPL(qcom_ice_suspend);
static unsigned int translate_hwkm_slot(struct qcom_ice *ice, unsigned int slot)
{
- return slot * 2;
+ return ice->hwkm_version == QCOM_ICE_HWKM_V1 ? slot : slot * 2;
}
static int qcom_ice_program_wrapped_key(struct qcom_ice *ice, unsigned int slot,
@@ -451,11 +485,12 @@ int qcom_ice_generate_key(struct qcom_ice *ice,
{
int err;
- err = qcom_scm_generate_ice_key(lt_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE);
+ err = qcom_scm_generate_ice_key(lt_key,
+ QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version));
if (err)
return err;
- return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE;
+ return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version);
}
EXPORT_SYMBOL_GPL(qcom_ice_generate_key);
@@ -478,13 +513,13 @@ int qcom_ice_prepare_key(struct qcom_ice *ice,
int err;
err = qcom_scm_prepare_ice_key(lt_key, lt_key_size,
- eph_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE);
+ eph_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version));
if (err == -EIO || err == -EINVAL)
err = -EBADMSG; /* probably invalid key */
if (err)
return err;
- return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE;
+ return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version);
}
EXPORT_SYMBOL_GPL(qcom_ice_prepare_key);
@@ -506,11 +541,11 @@ int qcom_ice_import_key(struct qcom_ice *ice,
int err;
err = qcom_scm_import_ice_key(raw_key, raw_key_size,
- lt_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE);
+ lt_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version));
if (err)
return err;
- return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE;
+ return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version);
}
EXPORT_SYMBOL_GPL(qcom_ice_import_key);
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 857ead56b37d..13e174267294 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -214,6 +214,364 @@ static const struct llcc_slice_config ipq5424_data[] = {
},
};
+static const struct llcc_slice_config kaanapali_data[] = {
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 5120,
+ .priority = 1,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ .stale_en = true,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 35,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 25,
+ .max_cap = 1024,
+ .priority = 5,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 34,
+ .max_cap = 4096,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 9,
+ .max_cap = 5632,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .write_scid_cacheable_en = true,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 18,
+ .max_cap = 768,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 7168,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .cache_mode = 2,
+ .stale_en = true,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_MDMHPFX,
+ .slice_id = 24,
+ .max_cap = 1024,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 27,
+ .max_cap = 256,
+ .priority = 5,
+ .bonus_ways = 0xfffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 8,
+ .max_cap = 800,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .ovcap_en = true,
+ .vict_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf0000000,
+ .mru_uncap_en = true,
+ .alloc_oneway_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_CVPFW,
+ .slice_id = 19,
+ .max_cap = 512,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CPUMTE,
+ .slice_id = 7,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_CMPTHCP,
+ .slice_id = 15,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_LCPDARE,
+ .slice_id = 30,
+ .max_cap = 128,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .mru_uncap_en = true,
+ .alloc_oneway_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_AENPU,
+ .slice_id = 3,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .cache_mode = 2,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_ISLAND1,
+ .slice_id = 12,
+ .max_cap = 7936,
+ .priority = 7,
+ .fixed_size = true,
+ .bonus_ways = 0x7fffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_DISP_WB,
+ .slice_id = 23,
+ .max_cap = 512,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_VIDVSP,
+ .slice_id = 4,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_VIDDEC,
+ .slice_id = 5,
+ .max_cap = 512,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .cache_mode = 2,
+ .mru_uncap_en = true,
+ .ovcap_en = true,
+ .vict_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMOFE,
+ .slice_id = 33,
+ .max_cap = 6144,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .mru_uncap_en = true,
+ .ovcap_en = true,
+ .vict_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMRTIP,
+ .slice_id = 13,
+ .max_cap = 6144,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .mru_uncap_en = true,
+ .ovcap_en = true,
+ .vict_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMRTRF,
+ .slice_id = 10,
+ .max_cap = 3584,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .mru_uncap_en = true,
+ .ovcap_en = true,
+ .vict_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMSRTRF,
+ .slice_id = 21,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .mru_uncap_en = true,
+ .ovcap_en = true,
+ .vict_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_VIDEO_APV,
+ .slice_id = 6,
+ .max_cap = 768,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_COMPUTE1,
+ .slice_id = 22,
+ .max_cap = 4096,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_CPUSS_OPP,
+ .slice_id = 32,
+ .max_cap = 0,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_CPUSSMPAM,
+ .slice_id = 17,
+ .max_cap = 2048,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ .stale_en = true,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_CAM_IPE_STROV,
+ .slice_id = 14,
+ .max_cap = 400,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .ovcap_en = true,
+ .vict_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAM_OFE_STROV,
+ .slice_id = 20,
+ .max_cap = 400,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .ovcap_en = true,
+ .vict_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CPUSS_HEU,
+ .slice_id = 28,
+ .max_cap = 0,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0,
+ .mru_uncap_en = true,
+ .ovcap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_MDM_PNG_FIXED,
+ .slice_id = 26,
+ .max_cap = 256,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xff000000,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ },
+};
+
static const struct llcc_slice_config sa8775p_data[] = {
{
.usecase_id = LLCC_CPUSS,
@@ -3505,6 +3863,15 @@ static const u32 llcc_v6_reg_offset[] = {
[LLCC_TRP_WRS_CACHEABLE_EN] = 0x00042088,
};
+static const struct qcom_llcc_config kaanapali_cfg[] = {
+ {
+ .sct_data = kaanapali_data,
+ .size = ARRAY_SIZE(kaanapali_data),
+ .reg_offset = llcc_v6_reg_offset,
+ .edac_reg_offset = &llcc_v6_edac_reg_offset,
+ },
+};
+
static const struct qcom_llcc_config qcs615_cfg[] = {
{
.sct_data = qcs615_data,
@@ -3731,6 +4098,11 @@ static const struct qcom_llcc_config x1e80100_cfg[] = {
},
};
+static const struct qcom_sct_config kaanapali_cfgs = {
+ .llcc_config = kaanapali_cfg,
+ .num_config = ARRAY_SIZE(kaanapali_cfg),
+};
+
static const struct qcom_sct_config qcs615_cfgs = {
.llcc_config = qcs615_cfg,
.num_config = ARRAY_SIZE(qcs615_cfg),
@@ -4570,6 +4942,7 @@ err:
static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,ipq5424-llcc", .data = &ipq5424_cfgs},
+ { .compatible = "qcom,kaanapali-llcc", .data = &kaanapali_cfgs},
{ .compatible = "qcom,qcs615-llcc", .data = &qcs615_cfgs},
{ .compatible = "qcom,qcs8300-llcc", .data = &qcs8300_cfgs},
{ .compatible = "qcom,qdu1000-llcc", .data = &qdu1000_cfgs},
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
index a5c80d4fcc36..c239107cb930 100644
--- a/drivers/soc/qcom/mdt_loader.c
+++ b/drivers/soc/qcom/mdt_loader.c
@@ -332,10 +332,22 @@ static bool qcom_mdt_bins_are_split(const struct firmware *fw)
return false;
}
-static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
- const char *fw_name, void *mem_region,
- phys_addr_t mem_phys, size_t mem_size,
- phys_addr_t *reloc_base)
+/**
+ * qcom_mdt_load_no_init() - load the firmware which header is loaded as fw
+ * @dev: device handle to associate resources with
+ * @fw: firmware object for the mdt file
+ * @fw_name: name of the firmware, for construction of segment file names
+ * @mem_region: allocated memory region to load firmware into
+ * @mem_phys: physical address of allocated memory region
+ * @mem_size: size of the allocated memory region
+ * @reloc_base: adjusted physical address after relocation
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
+ const char *fw_name, void *mem_region,
+ phys_addr_t mem_phys, size_t mem_size,
+ phys_addr_t *reloc_base)
{
const struct elf32_phdr *phdrs;
const struct elf32_phdr *phdr;
@@ -435,12 +447,13 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
return ret;
}
+EXPORT_SYMBOL_GPL(qcom_mdt_load_no_init);
/**
* qcom_mdt_load() - load the firmware which header is loaded as fw
* @dev: device handle to associate resources with
* @fw: firmware object for the mdt file
- * @firmware: name of the firmware, for construction of segment file names
+ * @fw_name: name of the firmware, for construction of segment file names
* @pas_id: PAS identifier
* @mem_region: allocated memory region to load firmware into
* @mem_phys: physical address of allocated memory region
@@ -450,41 +463,20 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
* Returns 0 on success, negative errno otherwise.
*/
int qcom_mdt_load(struct device *dev, const struct firmware *fw,
- const char *firmware, int pas_id, void *mem_region,
+ const char *fw_name, int pas_id, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
phys_addr_t *reloc_base)
{
int ret;
- ret = qcom_mdt_pas_init(dev, fw, firmware, pas_id, mem_phys, NULL);
+ ret = qcom_mdt_pas_init(dev, fw, fw_name, pas_id, mem_phys, NULL);
if (ret)
return ret;
- return __qcom_mdt_load(dev, fw, firmware, mem_region, mem_phys,
- mem_size, reloc_base);
+ return qcom_mdt_load_no_init(dev, fw, fw_name, mem_region, mem_phys,
+ mem_size, reloc_base);
}
EXPORT_SYMBOL_GPL(qcom_mdt_load);
-/**
- * qcom_mdt_load_no_init() - load the firmware which header is loaded as fw
- * @dev: device handle to associate resources with
- * @fw: firmware object for the mdt file
- * @firmware: name of the firmware, for construction of segment file names
- * @mem_region: allocated memory region to load firmware into
- * @mem_phys: physical address of allocated memory region
- * @mem_size: size of the allocated memory region
- * @reloc_base: adjusted physical address after relocation
- *
- * Returns 0 on success, negative errno otherwise.
- */
-int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
- const char *firmware, void *mem_region, phys_addr_t mem_phys,
- size_t mem_size, phys_addr_t *reloc_base)
-{
- return __qcom_mdt_load(dev, fw, firmware, mem_region, mem_phys,
- mem_size, reloc_base);
-}
-EXPORT_SYMBOL_GPL(qcom_mdt_load_no_init);
-
MODULE_DESCRIPTION("Firmware parser for Qualcomm MDT format");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
index 9c3bd37b6579..71130a2f62e9 100644
--- a/drivers/soc/qcom/ocmem.c
+++ b/drivers/soc/qcom/ocmem.c
@@ -202,9 +202,9 @@ struct ocmem *of_get_ocmem(struct device *dev)
}
ocmem = platform_get_drvdata(pdev);
+ put_device(&pdev->dev);
if (!ocmem) {
dev_err(dev, "Cannot get ocmem\n");
- put_device(&pdev->dev);
return ERR_PTR(-ENODEV);
}
return ocmem;
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index c0a4be5df926..627f96ca322e 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -39,6 +39,7 @@ struct pmic_glink {
struct mutex state_lock;
unsigned int client_state;
unsigned int pdr_state;
+ bool pdr_available;
/* serializing clients list updates */
spinlock_t client_lock;
@@ -246,9 +247,12 @@ static int pmic_glink_rpmsg_probe(struct rpmsg_device *rpdev)
return dev_err_probe(&rpdev->dev, -ENODEV, "no pmic_glink device to attach to\n");
dev_set_drvdata(&rpdev->dev, pg);
+ pg->pdr_available = rpdev->id.driver_data;
guard(mutex)(&pg->state_lock);
pg->ept = rpdev->ept;
+ if (!pg->pdr_available)
+ pg->pdr_state = SERVREG_SERVICE_STATE_UP;
pmic_glink_state_notify_clients(pg);
return 0;
@@ -265,11 +269,14 @@ static void pmic_glink_rpmsg_remove(struct rpmsg_device *rpdev)
guard(mutex)(&pg->state_lock);
pg->ept = NULL;
+ if (!pg->pdr_available)
+ pg->pdr_state = SERVREG_SERVICE_STATE_DOWN;
pmic_glink_state_notify_clients(pg);
}
static const struct rpmsg_device_id pmic_glink_rpmsg_id_match[] = {
- { "PMIC_RTR_ADSP_APPS" },
+ {.name = "PMIC_RTR_ADSP_APPS", .driver_data = true },
+ {.name = "PMIC_RTR_SOCCP_APPS", .driver_data = false },
{}
};
diff --git a/drivers/soc/qcom/qcom-pbs.c b/drivers/soc/qcom/qcom-pbs.c
index 1cc5d045f9dd..06b4a596e275 100644
--- a/drivers/soc/qcom/qcom-pbs.c
+++ b/drivers/soc/qcom/qcom-pbs.c
@@ -173,6 +173,8 @@ struct pbs_dev *get_pbs_client_device(struct device *dev)
return ERR_PTR(-EINVAL);
}
+ platform_device_put(pdev);
+
return pbs;
}
EXPORT_SYMBOL_GPL(get_pbs_client_device);
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
index 8f1158e0c631..a25d1de592f0 100644
--- a/drivers/soc/qcom/qcom_gsbi.c
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -212,13 +212,6 @@ static int gsbi_probe(struct platform_device *pdev)
return of_platform_populate(node, NULL, NULL, &pdev->dev);
}
-static void gsbi_remove(struct platform_device *pdev)
-{
- struct gsbi_info *gsbi = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(gsbi->hclk);
-}
-
static const struct of_device_id gsbi_dt_match[] = {
{ .compatible = "qcom,gsbi-v1.0.0", },
{ },
@@ -232,7 +225,6 @@ static struct platform_driver gsbi_driver = {
.of_match_table = gsbi_dt_match,
},
.probe = gsbi_probe,
- .remove = gsbi_remove,
};
module_platform_driver(gsbi_driver);
diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c
index 6384f271953d..1bcbe69688d2 100644
--- a/drivers/soc/qcom/qcom_pd_mapper.c
+++ b/drivers/soc/qcom/qcom_pd_mapper.c
@@ -360,6 +360,15 @@ static const struct qcom_pdm_domain_data mpss_wlan_pd = {
},
};
+static const struct qcom_pdm_domain_data *kaanapali_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd_gps,
+ NULL,
+};
+
static const struct qcom_pdm_domain_data *msm8996_domains[] = {
&msm8996_adsp_audio_pd,
&msm8996_adsp_root_pd,
@@ -552,6 +561,7 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
{ .compatible = "qcom,apq8074", .data = NULL, },
{ .compatible = "qcom,apq8084", .data = NULL, },
{ .compatible = "qcom,apq8096", .data = msm8996_domains, },
+ { .compatible = "qcom,kaanapali", .data = kaanapali_domains, },
{ .compatible = "qcom,msm8226", .data = NULL, },
{ .compatible = "qcom,msm8909", .data = NULL, },
{ .compatible = "qcom,msm8916", .data = NULL, },
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index c4c45f15dca4..fef840b54574 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -353,8 +353,12 @@ static void *cached_entry_to_item(struct smem_private_entry *e)
return p - le32_to_cpu(e->size);
}
-/* Pointer to the one and only smem handle */
-static struct qcom_smem *__smem;
+/*
+ * Pointer to the one and only smem handle.
+ * Init to -EPROBE_DEFER to signal SMEM still has to be probed.
+ * Can be set to -ENODEV if SMEM is not initialized by SBL.
+ */
+static struct qcom_smem *__smem = INIT_ERR_PTR(-EPROBE_DEFER);
/* Timeout (ms) for the trylock of remote spinlocks */
#define HWSPINLOCK_TIMEOUT 1000
@@ -508,8 +512,8 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
unsigned long flags;
int ret;
- if (!__smem)
- return -EPROBE_DEFER;
+ if (IS_ERR(__smem))
+ return PTR_ERR(__smem);
if (item < SMEM_ITEM_LAST_FIXED) {
dev_err(__smem->dev,
@@ -517,7 +521,7 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
return -EINVAL;
}
- if (WARN_ON(item >= __smem->item_count))
+ if (item >= __smem->item_count)
return -EINVAL;
ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
@@ -685,12 +689,12 @@ invalid_canary:
void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
{
struct smem_partition *part;
- void *ptr = ERR_PTR(-EPROBE_DEFER);
+ void *ptr;
- if (!__smem)
- return ptr;
+ if (IS_ERR(__smem))
+ return __smem;
- if (WARN_ON(item >= __smem->item_count))
+ if (item >= __smem->item_count)
return ERR_PTR(-EINVAL);
if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
@@ -723,8 +727,8 @@ int qcom_smem_get_free_space(unsigned host)
struct smem_header *header;
unsigned ret;
- if (!__smem)
- return -EPROBE_DEFER;
+ if (IS_ERR(__smem))
+ return PTR_ERR(__smem);
if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
part = &__smem->partitions[host];
@@ -1181,8 +1185,8 @@ static int qcom_smem_probe(struct platform_device *pdev)
header = smem->regions[0].virt_base;
if (le32_to_cpu(header->initialized) != 1 ||
le32_to_cpu(header->reserved)) {
- dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
- return -EINVAL;
+ __smem = ERR_PTR(-ENODEV);
+ return dev_err_probe(&pdev->dev, PTR_ERR(__smem), "SMEM is not initialized by SBL\n");
}
hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
@@ -1190,7 +1194,7 @@ static int qcom_smem_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, hwlock_id,
"failed to retrieve hwlock\n");
- smem->hwlock = hwspin_lock_request_specific(hwlock_id);
+ smem->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, hwlock_id);
if (!smem->hwlock)
return -ENXIO;
@@ -1243,7 +1247,6 @@ static void qcom_smem_remove(struct platform_device *pdev)
{
platform_device_unregister(__smem->socinfo);
- hwspin_lock_free(__smem->hwlock);
__smem = NULL;
}
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 963772f45489..003a2304d535 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -37,7 +37,13 @@
*/
#define SMEM_IMAGE_TABLE_BOOT_INDEX 0
#define SMEM_IMAGE_TABLE_TZ_INDEX 1
+#define SMEM_IMAGE_TABLE_TZSECAPP_INDEX 2
#define SMEM_IMAGE_TABLE_RPM_INDEX 3
+#define SMEM_IMAGE_TABLE_SDI_INDEX 4
+#define SMEM_IMAGE_TABLE_HYP_INDEX 5
+#define SMEM_IMAGE_TABLE_ADSP1_INDEX 6
+#define SMEM_IMAGE_TABLE_ADSP2_INDEX 7
+#define SMEM_IMAGE_TABLE_CDSP2_INDEX 8
#define SMEM_IMAGE_TABLE_APPSBL_INDEX 9
#define SMEM_IMAGE_TABLE_APPS_INDEX 10
#define SMEM_IMAGE_TABLE_MPSS_INDEX 11
@@ -46,31 +52,77 @@
#define SMEM_IMAGE_TABLE_VIDEO_INDEX 14
#define SMEM_IMAGE_TABLE_DSPS_INDEX 15
#define SMEM_IMAGE_TABLE_CDSP_INDEX 16
+#define SMEM_IMAGE_TABLE_NPU_INDEX 17
+#define SMEM_IMAGE_TABLE_WPSS_INDEX 18
#define SMEM_IMAGE_TABLE_CDSP1_INDEX 19
#define SMEM_IMAGE_TABLE_GPDSP_INDEX 20
#define SMEM_IMAGE_TABLE_GPDSP1_INDEX 21
+#define SMEM_IMAGE_TABLE_SENSORPD_INDEX 22
+#define SMEM_IMAGE_TABLE_AUDIOPD_INDEX 23
+#define SMEM_IMAGE_TABLE_OEMPD_INDEX 24
+#define SMEM_IMAGE_TABLE_CHARGERPD_INDEX 25
+#define SMEM_IMAGE_TABLE_OISPD_INDEX 26
+#define SMEM_IMAGE_TABLE_SOCCP_INDEX 27
#define SMEM_IMAGE_TABLE_TME_INDEX 28
+#define SMEM_IMAGE_TABLE_GEARVM_INDEX 29
+#define SMEM_IMAGE_TABLE_UEFI_INDEX 30
+#define SMEM_IMAGE_TABLE_CDSP3_INDEX 31
+#define SMEM_IMAGE_TABLE_AUDIOPD_ADSP1_INDEX 32
+#define SMEM_IMAGE_TABLE_AUDIOPD_ADSP2_INDEX 33
+#define SMEM_IMAGE_TABLE_DCP_INDEX 34
+#define SMEM_IMAGE_TABLE_OOBS_INDEX 35
+#define SMEM_IMAGE_TABLE_OOBNS_INDEX 36
+#define SMEM_IMAGE_TABLE_DEVCFG_INDEX 37
+#define SMEM_IMAGE_TABLE_BTPD_INDEX 38
+#define SMEM_IMAGE_TABLE_QECP_INDEX 39
+
#define SMEM_IMAGE_VERSION_TABLE 469
+#define SMEM_IMAGE_VERSION_TABLE_2 667
/*
* SMEM Image table names
*/
static const char *const socinfo_image_names[] = {
+ [SMEM_IMAGE_TABLE_ADSP1_INDEX] = "adsp1",
+ [SMEM_IMAGE_TABLE_ADSP2_INDEX] = "adsp2",
[SMEM_IMAGE_TABLE_ADSP_INDEX] = "adsp",
[SMEM_IMAGE_TABLE_APPSBL_INDEX] = "appsbl",
[SMEM_IMAGE_TABLE_APPS_INDEX] = "apps",
+ [SMEM_IMAGE_TABLE_AUDIOPD_INDEX] = "audiopd",
+ [SMEM_IMAGE_TABLE_AUDIOPD_ADSP1_INDEX] = "audiopd_adsp1",
+ [SMEM_IMAGE_TABLE_AUDIOPD_ADSP2_INDEX] = "audiopd_adsp2",
[SMEM_IMAGE_TABLE_BOOT_INDEX] = "boot",
+ [SMEM_IMAGE_TABLE_BTPD_INDEX] = "btpd",
+ [SMEM_IMAGE_TABLE_CDSP1_INDEX] = "cdsp1",
+ [SMEM_IMAGE_TABLE_CDSP2_INDEX] = "cdsp2",
+ [SMEM_IMAGE_TABLE_CDSP3_INDEX] = "cdsp3",
+ [SMEM_IMAGE_TABLE_CDSP_INDEX] = "cdsp",
+ [SMEM_IMAGE_TABLE_CHARGERPD_INDEX] = "chargerpd",
[SMEM_IMAGE_TABLE_CNSS_INDEX] = "cnss",
+ [SMEM_IMAGE_TABLE_DCP_INDEX] = "dcp",
+ [SMEM_IMAGE_TABLE_DEVCFG_INDEX] = "devcfg",
+ [SMEM_IMAGE_TABLE_DSPS_INDEX] = "dsps",
+ [SMEM_IMAGE_TABLE_GEARVM_INDEX] = "gearvm",
+ [SMEM_IMAGE_TABLE_GPDSP1_INDEX] = "gpdsp1",
+ [SMEM_IMAGE_TABLE_GPDSP_INDEX] = "gpdsp",
+ [SMEM_IMAGE_TABLE_HYP_INDEX] = "hyp",
[SMEM_IMAGE_TABLE_MPSS_INDEX] = "mpss",
+ [SMEM_IMAGE_TABLE_NPU_INDEX] = "npu",
+ [SMEM_IMAGE_TABLE_OEMPD_INDEX] = "oempd",
+ [SMEM_IMAGE_TABLE_OISPD_INDEX] = "oispd",
+ [SMEM_IMAGE_TABLE_OOBNS_INDEX] = "oobns",
+ [SMEM_IMAGE_TABLE_OOBS_INDEX] = "oobs",
+ [SMEM_IMAGE_TABLE_QECP_INDEX] = "qecp",
[SMEM_IMAGE_TABLE_RPM_INDEX] = "rpm",
+ [SMEM_IMAGE_TABLE_SDI_INDEX] = "sdi",
+ [SMEM_IMAGE_TABLE_SENSORPD_INDEX] = "sensorpd",
+ [SMEM_IMAGE_TABLE_SOCCP_INDEX] = "soccp",
+ [SMEM_IMAGE_TABLE_TME_INDEX] = "tme",
[SMEM_IMAGE_TABLE_TZ_INDEX] = "tz",
+ [SMEM_IMAGE_TABLE_TZSECAPP_INDEX] = "tzsecapp",
+ [SMEM_IMAGE_TABLE_UEFI_INDEX] = "uefi",
[SMEM_IMAGE_TABLE_VIDEO_INDEX] = "video",
- [SMEM_IMAGE_TABLE_DSPS_INDEX] = "dsps",
- [SMEM_IMAGE_TABLE_CDSP_INDEX] = "cdsp",
- [SMEM_IMAGE_TABLE_CDSP1_INDEX] = "cdsp1",
- [SMEM_IMAGE_TABLE_GPDSP_INDEX] = "gpdsp",
- [SMEM_IMAGE_TABLE_GPDSP1_INDEX] = "gpdsp1",
- [SMEM_IMAGE_TABLE_TME_INDEX] = "tme",
+ [SMEM_IMAGE_TABLE_WPSS_INDEX] = "wpss",
};
static const char *const pmic_models[] = {
@@ -161,6 +213,7 @@ struct socinfo_params {
u32 num_func_clusters;
u32 boot_cluster;
u32 boot_core;
+ u32 raw_package_type;
};
struct smem_image_version {
@@ -415,6 +468,7 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(SC7280) },
{ qcom_board_id(SC7180P) },
{ qcom_board_id(QCM6490) },
+ { qcom_board_id(QCS6490) },
{ qcom_board_id(SM7325P) },
{ qcom_board_id(IPQ5000) },
{ qcom_board_id(IPQ0509) },
@@ -461,6 +515,7 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(IPQ5424) },
{ qcom_board_id(QCM6690) },
{ qcom_board_id(QCS6690) },
+ { qcom_board_id(SM8850) },
{ qcom_board_id(IPQ5404) },
{ qcom_board_id(QCS9100) },
{ qcom_board_id(QCS8300) },
@@ -609,7 +664,7 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
struct smem_image_version *versions;
struct dentry *dentry;
size_t size;
- int i;
+ int i, j;
unsigned int num_pmics;
unsigned int pmic_array_offset;
@@ -621,6 +676,14 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
&qcom_socinfo->info.fmt);
switch (qcom_socinfo->info.fmt) {
+ case SOCINFO_VERSION(0, 23):
+ case SOCINFO_VERSION(0, 22):
+ case SOCINFO_VERSION(0, 21):
+ case SOCINFO_VERSION(0, 20):
+ qcom_socinfo->info.raw_package_type = __le32_to_cpu(info->raw_package_type);
+ debugfs_create_u32("raw_package_type", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.raw_package_type);
+ fallthrough;
case SOCINFO_VERSION(0, 19):
qcom_socinfo->info.num_func_clusters = __le32_to_cpu(info->num_func_clusters);
qcom_socinfo->info.boot_cluster = __le32_to_cpu(info->boot_cluster);
@@ -753,20 +816,31 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
break;
}
- versions = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_IMAGE_VERSION_TABLE,
- &size);
-
- for (i = 0; i < ARRAY_SIZE(socinfo_image_names); i++) {
+ for (i = 0, j = 0; i < ARRAY_SIZE(socinfo_image_names); i++, j++) {
if (!socinfo_image_names[i])
continue;
+ if (i == 0) {
+ versions = qcom_smem_get(QCOM_SMEM_HOST_ANY,
+ SMEM_IMAGE_VERSION_TABLE,
+ &size);
+ } else if (i == 32) {
+ versions = qcom_smem_get(QCOM_SMEM_HOST_ANY,
+ SMEM_IMAGE_VERSION_TABLE_2,
+ &size);
+ if (IS_ERR(versions))
+ break;
+
+ j = 0;
+ }
+
dentry = debugfs_create_dir(socinfo_image_names[i],
qcom_socinfo->dbg_root);
- debugfs_create_file("name", 0444, dentry, &versions[i],
+ debugfs_create_file("name", 0444, dentry, &versions[j],
&qcom_image_name_ops);
- debugfs_create_file("variant", 0444, dentry, &versions[i],
+ debugfs_create_file("variant", 0444, dentry, &versions[j],
&qcom_image_variant_ops);
- debugfs_create_file("oem", 0444, dentry, &versions[i],
+ debugfs_create_file("oem", 0444, dentry, &versions[j],
&qcom_image_oem_ops);
}
}
diff --git a/drivers/soc/qcom/ubwc_config.c b/drivers/soc/qcom/ubwc_config.c
index 0e42d22b7224..1c25aaf55e52 100644
--- a/drivers/soc/qcom/ubwc_config.c
+++ b/drivers/soc/qcom/ubwc_config.c
@@ -16,6 +16,16 @@ static const struct qcom_ubwc_cfg_data no_ubwc_data = {
/* no UBWC, no HBB */
};
+static const struct qcom_ubwc_cfg_data kaanapali_data = {
+ .ubwc_enc_version = UBWC_6_0,
+ .ubwc_dec_version = UBWC_6_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .ubwc_bank_spread = true,
+ .highest_bank_bit = 16,
+ .macrotile_mode = true,
+};
+
static const struct qcom_ubwc_cfg_data msm8937_data = {
.ubwc_enc_version = UBWC_1_0,
.ubwc_dec_version = UBWC_1_0,
@@ -218,11 +228,24 @@ static const struct qcom_ubwc_cfg_data x1e80100_data = {
.macrotile_mode = true,
};
+static const struct qcom_ubwc_cfg_data glymur_data = {
+ .ubwc_enc_version = UBWC_5_0,
+ .ubwc_dec_version = UBWC_5_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .ubwc_bank_spread = true,
+ /* TODO: highest_bank_bit = 15 for LP_DDR4 */
+ .highest_bank_bit = 16,
+ .macrotile_mode = true,
+};
+
static const struct of_device_id qcom_ubwc_configs[] __maybe_unused = {
{ .compatible = "qcom,apq8016", .data = &no_ubwc_data },
{ .compatible = "qcom,apq8026", .data = &no_ubwc_data },
{ .compatible = "qcom,apq8074", .data = &no_ubwc_data },
{ .compatible = "qcom,apq8096", .data = &msm8998_data },
+ { .compatible = "qcom,kaanapali", .data = &kaanapali_data, },
+ { .compatible = "qcom,glymur", .data = &glymur_data},
{ .compatible = "qcom,msm8226", .data = &no_ubwc_data },
{ .compatible = "qcom,msm8916", .data = &no_ubwc_data },
{ .compatible = "qcom,msm8917", .data = &no_ubwc_data },
@@ -237,6 +260,7 @@ static const struct of_device_id qcom_ubwc_configs[] __maybe_unused = {
{ .compatible = "qcom,msm8998", .data = &msm8998_data },
{ .compatible = "qcom,qcm2290", .data = &qcm2290_data, },
{ .compatible = "qcom,qcm6490", .data = &sc7280_data, },
+ { .compatible = "qcom,qcs8300", .data = &sc8280xp_data, },
{ .compatible = "qcom,sa8155p", .data = &sm8150_data, },
{ .compatible = "qcom,sa8540p", .data = &sc8280xp_data, },
{ .compatible = "qcom,sa8775p", .data = &sa8775p_data, },
diff --git a/drivers/soc/renesas/r9a08g045-sysc.c b/drivers/soc/renesas/r9a08g045-sysc.c
index 0504d4e68761..03d653d5cde5 100644
--- a/drivers/soc/renesas/r9a08g045-sysc.c
+++ b/drivers/soc/renesas/r9a08g045-sysc.c
@@ -6,10 +6,29 @@
*/
#include <linux/bits.h>
+#include <linux/device.h>
#include <linux/init.h>
#include "rz-sysc.h"
+#define SYS_XSPI_MAP_STAADD_CS0 0x348
+#define SYS_XSPI_MAP_ENDADD_CS0 0x34c
+#define SYS_XSPI_MAP_STAADD_CS1 0x350
+#define SYS_XSPI_MAP_ENDADD_CS1 0x354
+#define SYS_GETH0_CFG 0x380
+#define SYS_GETH1_CFG 0x390
+#define SYS_PCIE_CFG 0x3a0
+#define SYS_PCIE_MON 0x3a4
+#define SYS_PCIE_ERR_MON 0x3ac
+#define SYS_PCIE_PHY 0x3b4
+#define SYS_I2C0_CFG 0x400
+#define SYS_I2C1_CFG 0x410
+#define SYS_I2C2_CFG 0x420
+#define SYS_I2C3_CFG 0x430
+#define SYS_I3C_CFG 0x440
+#define SYS_USB_PWRRDY 0xd70
+#define SYS_PCIE_RST_RSM_B 0xd74
+
static const struct rz_sysc_soc_id_init_data rzg3s_sysc_soc_id_init_data __initconst = {
.family = "RZ/G3S",
.id = 0x85e0447,
@@ -18,7 +37,57 @@ static const struct rz_sysc_soc_id_init_data rzg3s_sysc_soc_id_init_data __initc
.specific_id_mask = GENMASK(27, 0),
};
+static bool rzg3s_regmap_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SYS_XSPI_MAP_STAADD_CS0:
+ case SYS_XSPI_MAP_ENDADD_CS0:
+ case SYS_XSPI_MAP_STAADD_CS1:
+ case SYS_XSPI_MAP_ENDADD_CS1:
+ case SYS_GETH0_CFG:
+ case SYS_GETH1_CFG:
+ case SYS_PCIE_CFG:
+ case SYS_PCIE_MON:
+ case SYS_PCIE_ERR_MON:
+ case SYS_PCIE_PHY:
+ case SYS_I2C0_CFG:
+ case SYS_I2C1_CFG:
+ case SYS_I2C2_CFG:
+ case SYS_I2C3_CFG:
+ case SYS_I3C_CFG:
+ case SYS_USB_PWRRDY:
+ case SYS_PCIE_RST_RSM_B:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rzg3s_regmap_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SYS_XSPI_MAP_STAADD_CS0:
+ case SYS_XSPI_MAP_ENDADD_CS0:
+ case SYS_XSPI_MAP_STAADD_CS1:
+ case SYS_XSPI_MAP_ENDADD_CS1:
+ case SYS_PCIE_CFG:
+ case SYS_PCIE_PHY:
+ case SYS_I2C0_CFG:
+ case SYS_I2C1_CFG:
+ case SYS_I2C2_CFG:
+ case SYS_I2C3_CFG:
+ case SYS_I3C_CFG:
+ case SYS_USB_PWRRDY:
+ case SYS_PCIE_RST_RSM_B:
+ return true;
+ default:
+ return false;
+ }
+}
+
const struct rz_sysc_init_data rzg3s_sysc_init_data __initconst = {
.soc_id_init_data = &rzg3s_sysc_soc_id_init_data,
+ .readable_reg = rzg3s_regmap_readable_reg,
+ .writeable_reg = rzg3s_regmap_writeable_reg,
.max_register = 0xe20,
};
diff --git a/drivers/soc/renesas/r9a09g047-sys.c b/drivers/soc/renesas/r9a09g047-sys.c
index 2e8426c03050..e413b0eff9bf 100644
--- a/drivers/soc/renesas/r9a09g047-sys.c
+++ b/drivers/soc/renesas/r9a09g047-sys.c
@@ -29,6 +29,27 @@
#define SYS_LSI_PRR_CA55_DIS BIT(8)
#define SYS_LSI_PRR_NPU_DIS BIT(1)
+#define SYS_LSI_OTPTSU1TRMVAL0 0x330
+#define SYS_LSI_OTPTSU1TRMVAL1 0x334
+#define SYS_SPI_STAADDCS0 0x900
+#define SYS_SPI_ENDADDCS0 0x904
+#define SYS_SPI_STAADDCS1 0x908
+#define SYS_SPI_ENDADDCS1 0x90c
+#define SYS_VSP_CLK 0xe00
+#define SYS_GBETH0_CFG 0xf00
+#define SYS_GBETH1_CFG 0xf04
+#define SYS_PCIE_INTX_CH0 0x1000
+#define SYS_PCIE_MSI1_CH0 0x1004
+#define SYS_PCIE_MSI2_CH0 0x1008
+#define SYS_PCIE_MSI3_CH0 0x100c
+#define SYS_PCIE_MSI4_CH0 0x1010
+#define SYS_PCIE_MSI5_CH0 0x1014
+#define SYS_PCIE_PME_CH0 0x1018
+#define SYS_PCIE_ACK_CH0 0x101c
+#define SYS_PCIE_MISC_CH0 0x1020
+#define SYS_PCIE_MODE_CH0 0x1024
+#define SYS_ADC_CFG 0x1600
+
static void rzg3e_sys_print_id(struct device *dev,
void __iomem *sysc_base,
struct soc_device_attribute *soc_dev_attr)
@@ -62,7 +83,65 @@ static const struct rz_sysc_soc_id_init_data rzg3e_sys_soc_id_init_data __initco
.print_id = rzg3e_sys_print_id,
};
+static bool rzg3e_regmap_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SYS_LSI_OTPTSU1TRMVAL0:
+ case SYS_LSI_OTPTSU1TRMVAL1:
+ case SYS_SPI_STAADDCS0:
+ case SYS_SPI_ENDADDCS0:
+ case SYS_SPI_STAADDCS1:
+ case SYS_SPI_ENDADDCS1:
+ case SYS_VSP_CLK:
+ case SYS_GBETH0_CFG:
+ case SYS_GBETH1_CFG:
+ case SYS_PCIE_INTX_CH0:
+ case SYS_PCIE_MSI1_CH0:
+ case SYS_PCIE_MSI2_CH0:
+ case SYS_PCIE_MSI3_CH0:
+ case SYS_PCIE_MSI4_CH0:
+ case SYS_PCIE_MSI5_CH0:
+ case SYS_PCIE_PME_CH0:
+ case SYS_PCIE_ACK_CH0:
+ case SYS_PCIE_MISC_CH0:
+ case SYS_PCIE_MODE_CH0:
+ case SYS_ADC_CFG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rzg3e_regmap_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SYS_SPI_STAADDCS0:
+ case SYS_SPI_ENDADDCS0:
+ case SYS_SPI_STAADDCS1:
+ case SYS_SPI_ENDADDCS1:
+ case SYS_VSP_CLK:
+ case SYS_GBETH0_CFG:
+ case SYS_GBETH1_CFG:
+ case SYS_PCIE_INTX_CH0:
+ case SYS_PCIE_MSI1_CH0:
+ case SYS_PCIE_MSI2_CH0:
+ case SYS_PCIE_MSI3_CH0:
+ case SYS_PCIE_MSI4_CH0:
+ case SYS_PCIE_MSI5_CH0:
+ case SYS_PCIE_PME_CH0:
+ case SYS_PCIE_ACK_CH0:
+ case SYS_PCIE_MISC_CH0:
+ case SYS_PCIE_MODE_CH0:
+ case SYS_ADC_CFG:
+ return true;
+ default:
+ return false;
+ }
+}
+
const struct rz_sysc_init_data rzg3e_sys_init_data = {
.soc_id_init_data = &rzg3e_sys_soc_id_init_data,
+ .readable_reg = rzg3e_regmap_readable_reg,
+ .writeable_reg = rzg3e_regmap_writeable_reg,
.max_register = 0x170c,
};
diff --git a/drivers/soc/renesas/r9a09g056-sys.c b/drivers/soc/renesas/r9a09g056-sys.c
index 3ad1422eba36..42f5eff291fd 100644
--- a/drivers/soc/renesas/r9a09g056-sys.c
+++ b/drivers/soc/renesas/r9a09g056-sys.c
@@ -34,6 +34,24 @@
#define SYS_RZV2N_FEATURE_C55 BIT(1)
#define SYS_RZV2N_FEATURE_SEC BIT(2)
+#define SYS_LSI_OTPTSU0TRMVAL0 0x320
+#define SYS_LSI_OTPTSU0TRMVAL1 0x324
+#define SYS_LSI_OTPTSU1TRMVAL0 0x330
+#define SYS_LSI_OTPTSU1TRMVAL1 0x334
+#define SYS_GBETH0_CFG 0xf00
+#define SYS_GBETH1_CFG 0xf04
+#define SYS_PCIE_INTX_CH0 0x1000
+#define SYS_PCIE_MSI1_CH0 0x1004
+#define SYS_PCIE_MSI2_CH0 0x1008
+#define SYS_PCIE_MSI3_CH0 0x100c
+#define SYS_PCIE_MSI4_CH0 0x1010
+#define SYS_PCIE_MSI5_CH0 0x1014
+#define SYS_PCIE_PME_CH0 0x1018
+#define SYS_PCIE_ACK_CH0 0x101c
+#define SYS_PCIE_MISC_CH0 0x1020
+#define SYS_PCIE_MODE_CH0 0x1024
+#define SYS_ADC_CFG 0x1600
+
static void rzv2n_sys_print_id(struct device *dev,
void __iomem *sysc_base,
struct soc_device_attribute *soc_dev_attr)
@@ -70,6 +88,57 @@ static const struct rz_sysc_soc_id_init_data rzv2n_sys_soc_id_init_data __initco
.print_id = rzv2n_sys_print_id,
};
+static bool rzv2n_regmap_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SYS_LSI_OTPTSU0TRMVAL0:
+ case SYS_LSI_OTPTSU0TRMVAL1:
+ case SYS_LSI_OTPTSU1TRMVAL0:
+ case SYS_LSI_OTPTSU1TRMVAL1:
+ case SYS_GBETH0_CFG:
+ case SYS_GBETH1_CFG:
+ case SYS_PCIE_INTX_CH0:
+ case SYS_PCIE_MSI1_CH0:
+ case SYS_PCIE_MSI2_CH0:
+ case SYS_PCIE_MSI3_CH0:
+ case SYS_PCIE_MSI4_CH0:
+ case SYS_PCIE_MSI5_CH0:
+ case SYS_PCIE_PME_CH0:
+ case SYS_PCIE_ACK_CH0:
+ case SYS_PCIE_MISC_CH0:
+ case SYS_PCIE_MODE_CH0:
+ case SYS_ADC_CFG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rzv2n_regmap_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SYS_GBETH0_CFG:
+ case SYS_GBETH1_CFG:
+ case SYS_PCIE_INTX_CH0:
+ case SYS_PCIE_MSI1_CH0:
+ case SYS_PCIE_MSI2_CH0:
+ case SYS_PCIE_MSI3_CH0:
+ case SYS_PCIE_MSI4_CH0:
+ case SYS_PCIE_MSI5_CH0:
+ case SYS_PCIE_PME_CH0:
+ case SYS_PCIE_ACK_CH0:
+ case SYS_PCIE_MISC_CH0:
+ case SYS_PCIE_MODE_CH0:
+ case SYS_ADC_CFG:
+ return true;
+ default:
+ return false;
+ }
+}
+
const struct rz_sysc_init_data rzv2n_sys_init_data = {
.soc_id_init_data = &rzv2n_sys_soc_id_init_data,
+ .readable_reg = rzv2n_regmap_readable_reg,
+ .writeable_reg = rzv2n_regmap_writeable_reg,
+ .max_register = 0x170c,
};
diff --git a/drivers/soc/renesas/r9a09g057-sys.c b/drivers/soc/renesas/r9a09g057-sys.c
index e3390e7c7fe5..827c718ac7c5 100644
--- a/drivers/soc/renesas/r9a09g057-sys.c
+++ b/drivers/soc/renesas/r9a09g057-sys.c
@@ -29,6 +29,35 @@
#define SYS_LSI_PRR_GPU_DIS BIT(0)
#define SYS_LSI_PRR_ISP_DIS BIT(4)
+#define SYS_LSI_OTPTSU0TRMVAL0 0x320
+#define SYS_LSI_OTPTSU0TRMVAL1 0x324
+#define SYS_LSI_OTPTSU1TRMVAL0 0x330
+#define SYS_LSI_OTPTSU1TRMVAL1 0x334
+#define SYS_GBETH0_CFG 0xf00
+#define SYS_GBETH1_CFG 0xf04
+#define SYS_PCIE_INTX_CH0 0x1000
+#define SYS_PCIE_MSI1_CH0 0x1004
+#define SYS_PCIE_MSI2_CH0 0x1008
+#define SYS_PCIE_MSI3_CH0 0x100c
+#define SYS_PCIE_MSI4_CH0 0x1010
+#define SYS_PCIE_MSI5_CH0 0x1014
+#define SYS_PCIE_PME_CH0 0x1018
+#define SYS_PCIE_ACK_CH0 0x101c
+#define SYS_PCIE_MISC_CH0 0x1020
+#define SYS_PCIE_MODE_CH0 0x1024
+#define SYS_PCIE_INTX_CH1 0x1030
+#define SYS_PCIE_MSI1_CH1 0x1034
+#define SYS_PCIE_MSI2_CH1 0x1038
+#define SYS_PCIE_MSI3_CH1 0x103c
+#define SYS_PCIE_MSI4_CH1 0x1040
+#define SYS_PCIE_MSI5_CH1 0x1044
+#define SYS_PCIE_PME_CH1 0x1048
+#define SYS_PCIE_ACK_CH1 0x104c
+#define SYS_PCIE_MISC_CH1 0x1050
+#define SYS_PCIE_MODE_CH1 0x1054
+#define SYS_PCIE_MODE 0x1060
+#define SYS_ADC_CFG 0x1600
+
static void rzv2h_sys_print_id(struct device *dev,
void __iomem *sysc_base,
struct soc_device_attribute *soc_dev_attr)
@@ -62,7 +91,79 @@ static const struct rz_sysc_soc_id_init_data rzv2h_sys_soc_id_init_data __initco
.print_id = rzv2h_sys_print_id,
};
+static bool rzv2h_regmap_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SYS_LSI_OTPTSU0TRMVAL0:
+ case SYS_LSI_OTPTSU0TRMVAL1:
+ case SYS_LSI_OTPTSU1TRMVAL0:
+ case SYS_LSI_OTPTSU1TRMVAL1:
+ case SYS_GBETH0_CFG:
+ case SYS_GBETH1_CFG:
+ case SYS_PCIE_INTX_CH0:
+ case SYS_PCIE_MSI1_CH0:
+ case SYS_PCIE_MSI2_CH0:
+ case SYS_PCIE_MSI3_CH0:
+ case SYS_PCIE_MSI4_CH0:
+ case SYS_PCIE_MSI5_CH0:
+ case SYS_PCIE_PME_CH0:
+ case SYS_PCIE_ACK_CH0:
+ case SYS_PCIE_MISC_CH0:
+ case SYS_PCIE_MODE_CH0:
+ case SYS_PCIE_INTX_CH1:
+ case SYS_PCIE_MSI1_CH1:
+ case SYS_PCIE_MSI2_CH1:
+ case SYS_PCIE_MSI3_CH1:
+ case SYS_PCIE_MSI4_CH1:
+ case SYS_PCIE_MSI5_CH1:
+ case SYS_PCIE_PME_CH1:
+ case SYS_PCIE_ACK_CH1:
+ case SYS_PCIE_MISC_CH1:
+ case SYS_PCIE_MODE_CH1:
+ case SYS_PCIE_MODE:
+ case SYS_ADC_CFG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rzv2h_regmap_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SYS_GBETH0_CFG:
+ case SYS_GBETH1_CFG:
+ case SYS_PCIE_INTX_CH0:
+ case SYS_PCIE_MSI1_CH0:
+ case SYS_PCIE_MSI2_CH0:
+ case SYS_PCIE_MSI3_CH0:
+ case SYS_PCIE_MSI4_CH0:
+ case SYS_PCIE_MSI5_CH0:
+ case SYS_PCIE_PME_CH0:
+ case SYS_PCIE_ACK_CH0:
+ case SYS_PCIE_MISC_CH0:
+ case SYS_PCIE_MODE_CH0:
+ case SYS_PCIE_INTX_CH1:
+ case SYS_PCIE_MSI1_CH1:
+ case SYS_PCIE_MSI2_CH1:
+ case SYS_PCIE_MSI3_CH1:
+ case SYS_PCIE_MSI4_CH1:
+ case SYS_PCIE_MSI5_CH1:
+ case SYS_PCIE_PME_CH1:
+ case SYS_PCIE_ACK_CH1:
+ case SYS_PCIE_MISC_CH1:
+ case SYS_PCIE_MODE_CH1:
+ case SYS_PCIE_MODE:
+ case SYS_ADC_CFG:
+ return true;
+ default:
+ return false;
+ }
+}
+
const struct rz_sysc_init_data rzv2h_sys_init_data = {
.soc_id_init_data = &rzv2h_sys_soc_id_init_data,
+ .readable_reg = rzv2h_regmap_readable_reg,
+ .writeable_reg = rzv2h_regmap_writeable_reg,
.max_register = 0x170c,
};
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index 7ba02f3a4a4f..0541990901fc 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -12,6 +12,7 @@
#define WDTRSTCR_RESET 0xA55A0002
#define WDTRSTCR 0x0054
+#define GEN4_WDTRSTCR_RESET 0xA55A8002
#define GEN4_WDTRSTCR 0x0010
#define CR7BAR 0x0070
@@ -30,7 +31,7 @@ static int rcar_rst_enable_wdt_reset(void __iomem *base)
static int rcar_rst_v3u_enable_wdt_reset(void __iomem *base)
{
- iowrite32(WDTRSTCR_RESET, base + GEN4_WDTRSTCR);
+ iowrite32(GEN4_WDTRSTCR_RESET, base + GEN4_WDTRSTCR);
return 0;
}
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 1eb52356b996..ee4f17bb4db4 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -5,6 +5,7 @@
* Copyright (C) 2014-2016 Glider bvba
*/
+#include <linux/bitfield.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -524,8 +525,7 @@ static int __init renesas_soc_init(void)
eshi, eslo);
}
- if (soc->id &&
- ((product & id->mask) >> __ffs(id->mask)) != soc->id) {
+ if (soc->id && field_get(id->mask, product) != soc->id) {
pr_warn("SoC mismatch (product = 0x%x)\n", product);
ret = -ENODEV;
goto free_soc_dev_attr;
diff --git a/drivers/soc/renesas/rz-sysc.c b/drivers/soc/renesas/rz-sysc.c
index 9f79e299e6f4..ae727d9c8cc5 100644
--- a/drivers/soc/renesas/rz-sysc.c
+++ b/drivers/soc/renesas/rz-sysc.c
@@ -5,6 +5,7 @@
* Copyright (C) 2024 Renesas Electronics Corp.
*/
+#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
@@ -16,8 +17,6 @@
#include "rz-sysc.h"
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-
/**
* struct rz_sysc - RZ SYSC private data structure
* @base: SYSC base address
@@ -140,6 +139,8 @@ static int rz_sysc_probe(struct platform_device *pdev)
regmap_cfg->val_bits = 32;
regmap_cfg->fast_io = true;
regmap_cfg->max_register = data->max_register;
+ regmap_cfg->readable_reg = data->readable_reg;
+ regmap_cfg->writeable_reg = data->writeable_reg;
regmap = devm_regmap_init_mmio(dev, sysc->base, regmap_cfg);
if (IS_ERR(regmap))
diff --git a/drivers/soc/renesas/rz-sysc.h b/drivers/soc/renesas/rz-sysc.h
index 8eec355d5d56..88929bf21cb1 100644
--- a/drivers/soc/renesas/rz-sysc.h
+++ b/drivers/soc/renesas/rz-sysc.h
@@ -34,10 +34,14 @@ struct rz_sysc_soc_id_init_data {
/**
* struct rz_sysc_init_data - RZ SYSC initialization data
* @soc_id_init_data: RZ SYSC SoC ID initialization data
+ * @writeable_reg: Regmap writeable register check function
+ * @readable_reg: Regmap readable register check function
* @max_register: Maximum SYSC register offset to be used by the regmap config
*/
struct rz_sysc_init_data {
const struct rz_sysc_soc_id_init_data *soc_id_init_data;
+ bool (*writeable_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
u32 max_register;
};
diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
index 344870da7675..27bfa09ff251 100644
--- a/drivers/soc/rockchip/grf.c
+++ b/drivers/soc/rockchip/grf.c
@@ -91,6 +91,7 @@ static const struct rockchip_grf_info rk3328_grf __initconst = {
static const struct rockchip_grf_value rk3368_defaults[] __initconst = {
{ "jtag switching", RK3368_GRF_SOC_CON15, FIELD_PREP_WM16_CONST(BIT(13), 0) },
+ { "pwm select", RK3368_GRF_SOC_CON15, FIELD_PREP_WM16_CONST(BIT(12), 1) },
};
static const struct rockchip_grf_info rk3368_grf __initconst = {
@@ -98,6 +99,17 @@ static const struct rockchip_grf_info rk3368_grf __initconst = {
.num_values = ARRAY_SIZE(rk3368_defaults),
};
+#define RK3368_PMUGRF_SOC_CON0 0x100
+
+static const struct rockchip_grf_value rk3368_pmugrf_defaults[] __initconst = {
+ { "pwm2 select", RK3368_PMUGRF_SOC_CON0, FIELD_PREP_WM16_CONST(BIT(7), 0) },
+};
+
+static const struct rockchip_grf_info rk3368_pmugrf __initconst = {
+ .values = rk3368_pmugrf_defaults,
+ .num_values = ARRAY_SIZE(rk3368_pmugrf_defaults),
+};
+
#define RK3399_GRF_SOC_CON7 0xe21c
static const struct rockchip_grf_value rk3399_defaults[] __initconst = {
@@ -176,6 +188,9 @@ static const struct of_device_id rockchip_grf_dt_match[] __initconst = {
.compatible = "rockchip,rk3368-grf",
.data = (void *)&rk3368_grf,
}, {
+ .compatible = "rockchip,rk3368-pmugrf",
+ .data = (void *)&rk3368_pmugrf,
+ }, {
.compatible = "rockchip,rk3399-grf",
.data = (void *)&rk3399_grf,
}, {
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
index 248a33d7754a..636a762608c9 100644
--- a/drivers/soc/samsung/Makefile
+++ b/drivers/soc/samsung/Makefile
@@ -6,7 +6,8 @@ exynos_chipid-y += exynos-chipid.o exynos-asv.o
obj-$(CONFIG_EXYNOS_USI) += exynos-usi.o
-obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o
+obj-$(CONFIG_EXYNOS_PMU) += exynos_pmu.o
+exynos_pmu-y += exynos-pmu.o gs101-pmu.o
obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \
exynos5250-pmu.o exynos5420-pmu.o
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index c86f1058ceed..d3b4b5508e0c 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -57,11 +57,13 @@ static const struct exynos_soc_id {
{ "EXYNOS5800", 0xE5422000 },
{ "EXYNOS7420", 0xE7420000 },
{ "EXYNOS7870", 0xE7870000 },
+ { "EXYNOS8890", 0xE8890000 },
/* Compatible with: samsung,exynos850-chipid */
{ "EXYNOS2200", 0xE9925000 },
{ "EXYNOS7885", 0xE7885000 },
{ "EXYNOS850", 0xE3830000 },
{ "EXYNOS8895", 0xE8895000 },
+ { "EXYNOS9610", 0xE9610000 },
{ "EXYNOS9810", 0xE9810000 },
{ "EXYNOS990", 0xE9830000 },
{ "EXYNOSAUTOV9", 0xAAA80000 },
@@ -107,16 +109,17 @@ static int exynos_chipid_probe(struct platform_device *pdev)
const struct exynos_chipid_variant *drv_data;
struct exynos_chipid_info soc_info;
struct soc_device_attribute *soc_dev_attr;
+ struct device *dev = &pdev->dev;
struct soc_device *soc_dev;
struct device_node *root;
struct regmap *regmap;
int ret;
- drv_data = of_device_get_match_data(&pdev->dev);
+ drv_data = of_device_get_match_data(dev);
if (!drv_data)
return -EINVAL;
- regmap = device_node_to_regmap(pdev->dev.of_node);
+ regmap = device_node_to_regmap(dev->of_node);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
@@ -124,8 +127,7 @@ static int exynos_chipid_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr),
- GFP_KERNEL);
+ soc_dev_attr = devm_kzalloc(dev, sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
@@ -135,8 +137,8 @@ static int exynos_chipid_probe(struct platform_device *pdev)
of_property_read_string(root, "model", &soc_dev_attr->machine);
of_node_put(root);
- soc_dev_attr->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "%x", soc_info.revision);
+ soc_dev_attr->revision = devm_kasprintf(dev, GFP_KERNEL, "%x",
+ soc_info.revision);
if (!soc_dev_attr->revision)
return -ENOMEM;
soc_dev_attr->soc_id = product_id_to_soc_id(soc_info.product_id);
@@ -150,13 +152,13 @@ static int exynos_chipid_probe(struct platform_device *pdev)
if (IS_ERR(soc_dev))
return PTR_ERR(soc_dev);
- ret = exynos_asv_init(&pdev->dev, regmap);
+ ret = exynos_asv_init(dev, regmap);
if (ret)
goto err;
platform_set_drvdata(pdev, soc_dev);
- dev_info(&pdev->dev, "Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n",
+ dev_info(dev, "Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n",
soc_dev_attr->soc_id, soc_info.product_id, soc_info.revision);
return 0;
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index 22c50ca2aa79..d58376c38179 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -6,7 +6,6 @@
// Exynos - CPU PMU(Power Management Unit) support
#include <linux/array_size.h>
-#include <linux/arm-smccc.h>
#include <linux/bitmap.h>
#include <linux/cpuhotplug.h>
#include <linux/cpu_pm.h>
@@ -25,14 +24,6 @@
#include "exynos-pmu.h"
-#define PMUALIVE_MASK GENMASK(13, 0)
-#define TENSOR_SET_BITS (BIT(15) | BIT(14))
-#define TENSOR_CLR_BITS BIT(15)
-#define TENSOR_SMC_PMU_SEC_REG 0x82000504
-#define TENSOR_PMUREG_READ 0
-#define TENSOR_PMUREG_WRITE 1
-#define TENSOR_PMUREG_RMW 2
-
struct exynos_pmu_context {
struct device *dev;
const struct exynos_pmu_data *pmu_data;
@@ -54,125 +45,6 @@ static struct exynos_pmu_context *pmu_context;
/* forward declaration */
static struct platform_driver exynos_pmu_driver;
-/*
- * Tensor SoCs are configured so that PMU_ALIVE registers can only be written
- * from EL3, but are still read accessible. As Linux needs to write some of
- * these registers, the following functions are provided and exposed via
- * regmap.
- *
- * Note: This SMC interface is known to be implemented on gs101 and derivative
- * SoCs.
- */
-
-/* Write to a protected PMU register. */
-static int tensor_sec_reg_write(void *context, unsigned int reg,
- unsigned int val)
-{
- struct arm_smccc_res res;
- unsigned long pmu_base = (unsigned long)context;
-
- arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
- TENSOR_PMUREG_WRITE, val, 0, 0, 0, 0, &res);
-
- /* returns -EINVAL if access isn't allowed or 0 */
- if (res.a0)
- pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
-
- return (int)res.a0;
-}
-
-/* Read/Modify/Write a protected PMU register. */
-static int tensor_sec_reg_rmw(void *context, unsigned int reg,
- unsigned int mask, unsigned int val)
-{
- struct arm_smccc_res res;
- unsigned long pmu_base = (unsigned long)context;
-
- arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
- TENSOR_PMUREG_RMW, mask, val, 0, 0, 0, &res);
-
- /* returns -EINVAL if access isn't allowed or 0 */
- if (res.a0)
- pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
-
- return (int)res.a0;
-}
-
-/*
- * Read a protected PMU register. All PMU registers can be read by Linux.
- * Note: The SMC read register is not used, as only registers that can be
- * written are readable via SMC.
- */
-static int tensor_sec_reg_read(void *context, unsigned int reg,
- unsigned int *val)
-{
- *val = pmu_raw_readl(reg);
- return 0;
-}
-
-/*
- * For SoCs that have set/clear bit hardware this function can be used when
- * the PMU register will be accessed by multiple masters.
- *
- * For example, to set bits 13:8 in PMU reg offset 0x3e80
- * tensor_set_bits_atomic(ctx, 0x3e80, 0x3f00, 0x3f00);
- *
- * Set bit 8, and clear bits 13:9 PMU reg offset 0x3e80
- * tensor_set_bits_atomic(0x3e80, 0x100, 0x3f00);
- */
-static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val,
- u32 mask)
-{
- int ret;
- unsigned int i;
-
- for (i = 0; i < 32; i++) {
- if (!(mask & BIT(i)))
- continue;
-
- offset &= ~TENSOR_SET_BITS;
-
- if (val & BIT(i))
- offset |= TENSOR_SET_BITS;
- else
- offset |= TENSOR_CLR_BITS;
-
- ret = tensor_sec_reg_write(ctx, offset, i);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-static bool tensor_is_atomic(unsigned int reg)
-{
- /*
- * Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF)
- * as the target registers can be accessed by multiple masters. SFRs
- * that don't support atomic are added to the switch statement below.
- */
- if (reg > PMUALIVE_MASK)
- return false;
-
- switch (reg) {
- case GS101_SYSIP_DAT0:
- case GS101_SYSTEM_CONFIGURATION:
- return false;
- default:
- return true;
- }
-}
-
-static int tensor_sec_update_bits(void *ctx, unsigned int reg,
- unsigned int mask, unsigned int val)
-{
-
- if (!tensor_is_atomic(reg))
- return tensor_sec_reg_rmw(ctx, reg, mask, val);
-
- return tensor_set_bits_atomic(ctx, reg, val, mask);
-}
-
void pmu_raw_writel(u32 val, u32 offset)
{
writel_relaxed(val, pmu_base_addr + offset);
@@ -244,11 +116,6 @@ static const struct regmap_config regmap_pmu_intr = {
.use_raw_spinlock = true,
};
-static const struct exynos_pmu_data gs101_pmu_data = {
- .pmu_secure = true,
- .pmu_cpuhp = true,
-};
-
/*
* PMU platform driver and devicetree bindings.
*/
@@ -346,6 +213,8 @@ struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
if (!dev)
return ERR_PTR(-EPROBE_DEFER);
+ put_device(dev);
+
return syscon_node_to_regmap(pmu_np);
}
EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle);
@@ -364,6 +233,7 @@ EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle);
* disabled and cpupm_lock held.
*/
static int __gs101_cpu_pmu_online(unsigned int cpu)
+ __must_hold(&pmu_context->cpupm_lock)
{
unsigned int cpuhint = smp_processor_id();
u32 reg, mask;
@@ -424,6 +294,7 @@ static int gs101_cpuhp_pmu_online(unsigned int cpu)
/* Common function shared by both CPU hot plug and CPUIdle */
static int __gs101_cpu_pmu_offline(unsigned int cpu)
+ __must_hold(&pmu_context->cpupm_lock)
{
unsigned int cpuhint = smp_processor_id();
u32 reg, mask;
@@ -585,10 +456,6 @@ static int setup_cpuhp_and_cpuidle(struct device *dev)
if (!pmu_context->in_cpuhp)
return -ENOMEM;
- raw_spin_lock_init(&pmu_context->cpupm_lock);
- pmu_context->sys_inreboot = false;
- pmu_context->sys_insuspend = false;
-
/* set PMU to power on */
for_each_online_cpu(cpu)
gs101_cpuhp_pmu_online(cpu);
@@ -635,6 +502,9 @@ static int exynos_pmu_probe(struct platform_device *pdev)
pmu_regmcfg = regmap_smccfg;
pmu_regmcfg.max_register = resource_size(res) -
pmu_regmcfg.reg_stride;
+ pmu_regmcfg.wr_table = pmu_context->pmu_data->wr_table;
+ pmu_regmcfg.rd_table = pmu_context->pmu_data->rd_table;
+
/* Need physical address for SMC call */
regmap = devm_regmap_init(dev, NULL,
(void *)(uintptr_t)res->start,
@@ -657,6 +527,9 @@ static int exynos_pmu_probe(struct platform_device *pdev)
pmu_context->pmureg = regmap;
pmu_context->dev = dev;
+ raw_spin_lock_init(&pmu_context->cpupm_lock);
+ pmu_context->sys_inreboot = false;
+ pmu_context->sys_insuspend = false;
if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_cpuhp) {
ret = setup_cpuhp_and_cpuidle(dev);
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h
index 0938bb4fe15f..fbe381e2a2e1 100644
--- a/drivers/soc/samsung/exynos-pmu.h
+++ b/drivers/soc/samsung/exynos-pmu.h
@@ -13,11 +13,38 @@
#define PMU_TABLE_END (-1U)
+struct regmap_access_table;
+
struct exynos_pmu_conf {
unsigned int offset;
u8 val[NUM_SYS_POWERDOWN];
};
+/**
+ * struct exynos_pmu_data - of_device_id (match) data
+ *
+ * @pmu_config: Optional table detailing register writes for target system
+ * states: SYS_AFTR, SYS_LPA, SYS_SLEEP.
+ * @pmu_config_extra: Optional secondary table detailing additional register
+ * writes for target system states: SYS_AFTR, SYS_LPA,
+ * SYS_SLEEP.
+ * @pmu_secure: Whether or not PMU register writes need to be done via SMC call.
+ * @pmu_cpuhp: Whether or not extra handling is required for CPU hotplug and
+ * CPUidle outside of standard PSCI calls, due to non-compliant
+ * firmware.
+ * @pmu_init: Optional init function.
+ * @powerdown_conf: Optional callback before entering target system states:
+ * SYS_AFTR, SYS_LPA, SYS_SLEEP. This will be invoked before
+ * the registers from @pmu_config are written.
+ * @powerdown_conf_extra: Optional secondary callback before entering
+ * target system states: SYS_AFTR, SYS_LPA, SYS_SLEEP.
+ * This will be invoked after @pmu_config registers have
+ * been written.
+ * @rd_table: A table of readable register ranges in case a custom regmap is
+ * used (i.e. when @pmu_secure is @true).
+ * @wr_table: A table of writable register ranges in case a custom regmap is
+ * used (i.e. when @pmu_secure is @true).
+ */
struct exynos_pmu_data {
const struct exynos_pmu_conf *pmu_config;
const struct exynos_pmu_conf *pmu_config_extra;
@@ -27,6 +54,9 @@ struct exynos_pmu_data {
void (*pmu_init)(void);
void (*powerdown_conf)(enum sys_powerdown);
void (*powerdown_conf_extra)(enum sys_powerdown);
+
+ const struct regmap_access_table *rd_table;
+ const struct regmap_access_table *wr_table;
};
extern void __iomem *pmu_base_addr;
@@ -40,7 +70,14 @@ extern const struct exynos_pmu_data exynos4412_pmu_data;
extern const struct exynos_pmu_data exynos5250_pmu_data;
extern const struct exynos_pmu_data exynos5420_pmu_data;
#endif
+extern const struct exynos_pmu_data gs101_pmu_data;
extern void pmu_raw_writel(u32 val, u32 offset);
extern u32 pmu_raw_readl(u32 offset);
+
+int tensor_sec_reg_write(void *context, unsigned int reg, unsigned int val);
+int tensor_sec_reg_read(void *context, unsigned int reg, unsigned int *val);
+int tensor_sec_update_bits(void *context, unsigned int reg, unsigned int mask,
+ unsigned int val);
+
#endif /* __EXYNOS_PMU_H */
diff --git a/drivers/soc/samsung/gs101-pmu.c b/drivers/soc/samsung/gs101-pmu.c
new file mode 100644
index 000000000000..17dadc1b9c6e
--- /dev/null
+++ b/drivers/soc/samsung/gs101-pmu.c
@@ -0,0 +1,446 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 Linaro Ltd.
+ *
+ * GS101 PMU (Power Management Unit) support
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/array_size.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/regmap.h>
+
+#include "exynos-pmu.h"
+
+#define PMUALIVE_MASK GENMASK(13, 0)
+#define TENSOR_SET_BITS (BIT(15) | BIT(14))
+#define TENSOR_CLR_BITS BIT(15)
+#define TENSOR_SMC_PMU_SEC_REG 0x82000504
+#define TENSOR_PMUREG_READ 0
+#define TENSOR_PMUREG_WRITE 1
+#define TENSOR_PMUREG_RMW 2
+
+static const struct regmap_range gs101_pmu_registers[] = {
+ regmap_reg_range(GS101_OM_STAT, GS101_SYSTEM_INFO),
+ regmap_reg_range(GS101_IDLE_IP(0), GS101_IDLE_IP_MASK(3)),
+ regmap_reg_range(GS101_DATARAM_STATE_SLC_CH(0),
+ GS101_PPMPURAM_INFORM_SCL_CH(3)),
+ regmap_reg_range(GS101_INFORM0, GS101_SYSIP_DAT(0)),
+ /* skip SYSIP_DAT1 SYSIP_DAT2 */
+ regmap_reg_range(GS101_SYSIP_DAT(3), GS101_PWR_HOLD_SW_TRIP),
+ regmap_reg_range(GS101_GSA_INFORM(0), GS101_GSA_INFORM(1)),
+ regmap_reg_range(GS101_INFORM4, GS101_IROM_INFORM),
+ regmap_reg_range(GS101_IROM_CPU_INFORM(0), GS101_IROM_CPU_INFORM(7)),
+ regmap_reg_range(GS101_PMU_SPARE(0), GS101_PMU_SPARE(3)),
+ /* skip most IROM_xxx registers */
+ regmap_reg_range(GS101_DREX_CALIBRATION(0), GS101_DREX_CALIBRATION(7)),
+
+#define CLUSTER_CPU_RANGE(cl, cpu) \
+ regmap_reg_range(GS101_CLUSTER_CPU_CONFIGURATION(cl, cpu), \
+ GS101_CLUSTER_CPU_OPTION(cl, cpu)), \
+ regmap_reg_range(GS101_CLUSTER_CPU_OUT(cl, cpu), \
+ GS101_CLUSTER_CPU_IN(cl, cpu)), \
+ regmap_reg_range(GS101_CLUSTER_CPU_INT_IN(cl, cpu), \
+ GS101_CLUSTER_CPU_INT_DIR(cl, cpu))
+
+ /* cluster 0..2 and cpu 0..4 or 0..1 */
+ CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 0),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 1),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 2),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 3),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER1_OFFSET, 0),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER1_OFFSET, 1),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER2_OFFSET, 0),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER2_OFFSET, 1),
+#undef CLUSTER_CPU_RANGE
+
+#define CLUSTER_NONCPU_RANGE(cl) \
+ regmap_reg_range(GS101_CLUSTER_NONCPU_CONFIGURATION(cl), \
+ GS101_CLUSTER_NONCPU_OPTION(cl)), \
+ regmap_reg_range(GS101_CLUSTER_NONCPU_OUT(cl), \
+ GS101_CLUSTER_NONCPU_IN(cl)), \
+ regmap_reg_range(GS101_CLUSTER_NONCPU_INT_IN(cl), \
+ GS101_CLUSTER_NONCPU_INT_DIR(cl)), \
+ regmap_reg_range(GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_OUT(cl), \
+ GS101_CLUSTER_NONCPU_DUALRAIL_POS_OUT(cl)), \
+ regmap_reg_range(GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_IN(cl), \
+ GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_IN(cl))
+
+ CLUSTER_NONCPU_RANGE(0),
+ regmap_reg_range(GS101_CLUSTER0_NONCPU_DSU_PCH,
+ GS101_CLUSTER0_NONCPU_DSU_PCH),
+ CLUSTER_NONCPU_RANGE(1),
+ CLUSTER_NONCPU_RANGE(2),
+#undef CLUSTER_NONCPU_RANGE
+
+#define SUBBLK_RANGE(blk) \
+ regmap_reg_range(GS101_SUBBLK_CONFIGURATION(blk), \
+ GS101_SUBBLK_CTRL(blk)), \
+ regmap_reg_range(GS101_SUBBLK_OUT(blk), GS101_SUBBLK_IN(blk)), \
+ regmap_reg_range(GS101_SUBBLK_INT_IN(blk), \
+ GS101_SUBBLK_INT_DIR(blk)), \
+ regmap_reg_range(GS101_SUBBLK_MEMORY_OUT(blk), \
+ GS101_SUBBLK_MEMORY_IN(blk))
+
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_ALIVE),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_AOC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_APM),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CMU),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BUS0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BUS1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BUS2),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CORE),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_EH),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CPUCL0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CPUCL1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CPUCL2),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_G3D),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_EMBEDDED_CPUCL0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_EMBEDDED_G3D),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_HSI0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_HSI1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_HSI2),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_DPU),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_DISP),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_G2D),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MFC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CSIS),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_PDP),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_DNS),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_G3AA),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_IPP),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_ITP),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MCSC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_GDC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_TNR),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BO),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_TPU),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF2),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF3),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MISC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_PERIC0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_PERIC1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_S2D),
+#undef SUBBLK_RANGE
+
+#define SUBBLK_CPU_RANGE(blk) \
+ regmap_reg_range(GS101_SUBBLK_CPU_CONFIGURATION(blk), \
+ GS101_SUBBLK_CPU_OPTION(blk)), \
+ regmap_reg_range(GS101_SUBBLK_CPU_OUT(blk), \
+ GS101_SUBBLK_CPU_IN(blk)), \
+ regmap_reg_range(GS101_SUBBLK_CPU_INT_IN(blk), \
+ GS101_SUBBLK_CPU_INT_DIR(blk))
+
+ SUBBLK_CPU_RANGE(GS101_SUBBBLK_CPU_OFFSET_APM),
+ SUBBLK_CPU_RANGE(GS101_SUBBBLK_CPU_OFFSET_DBGCORE),
+ SUBBLK_CPU_RANGE(GS101_SUBBBLK_CPU_OFFSET_SSS),
+#undef SUBBLK_CPU_RANGE
+
+ regmap_reg_range(GS101_MIF_CONFIGURATION, GS101_MIF_CTRL),
+ regmap_reg_range(GS101_MIF_OUT, GS101_MIF_IN),
+ regmap_reg_range(GS101_MIF_INT_IN, GS101_MIF_INT_DIR),
+ regmap_reg_range(GS101_TOP_CONFIGURATION, GS101_TOP_OPTION),
+ regmap_reg_range(GS101_TOP_OUT, GS101_TOP_IN),
+ regmap_reg_range(GS101_TOP_INT_IN, GS101_WAKEUP2_STAT),
+ regmap_reg_range(GS101_WAKEUP2_INT_IN, GS101_WAKEUP2_INT_DIR),
+ regmap_reg_range(GS101_SYSTEM_CONFIGURATION, GS101_USER_DEFINED_OUT),
+ regmap_reg_range(GS101_SYSTEM_OUT, GS101_SYSTEM_IN),
+ regmap_reg_range(GS101_SYSTEM_INT_IN, GS101_EINT_WAKEUP_MASK3),
+ regmap_reg_range(GS101_USER_DEFINED_INT_IN, GS101_SCAN2DRAM_INT_DIR),
+ /* skip HCU_START */
+ regmap_reg_range(GS101_CUSTOM_OUT, GS101_CUSTOM_IN),
+ regmap_reg_range(GS101_CUSTOM_INT_IN, GS101_CUSTOM_INT_DIR),
+ regmap_reg_range(GS101_ACK_LAST_CPU, GS101_HCU_R(3)),
+ regmap_reg_range(GS101_HCU_SP, GS101_HCU_PC),
+ /* skip PMU_RAM_CTRL */
+ regmap_reg_range(GS101_APM_HCU_CTRL, GS101_APM_HCU_CTRL),
+ regmap_reg_range(GS101_APM_NMI_ENABLE, GS101_RST_STAT_PMU),
+ regmap_reg_range(GS101_HPM_INT_IN, GS101_BOOT_STAT),
+ regmap_reg_range(GS101_PMLINK_OUT, GS101_PMLINK_AOC_CTRL),
+ regmap_reg_range(GS101_TCXO_BUF_CTRL, GS101_ADD_CTRL),
+ regmap_reg_range(GS101_HCU_TIMEOUT_RESET, GS101_HCU_TIMEOUT_SCAN2DRAM),
+ regmap_reg_range(GS101_TIMER(0), GS101_TIMER(3)),
+ regmap_reg_range(GS101_PPC_MIF(0), GS101_PPC_EH),
+ /* PPC_OFFSET, skip PPC_CPUCL1_0 PPC_CPUCL1_1 */
+ regmap_reg_range(GS101_EXT_REGULATOR_MIF_DURATION, GS101_TCXO_DURATION),
+ regmap_reg_range(GS101_BURNIN_CTRL, GS101_TMU_SUB_TRIP),
+ regmap_reg_range(GS101_MEMORY_CEN, GS101_MEMORY_SMX_FEEDBACK),
+ regmap_reg_range(GS101_SLC_PCH_CHANNEL, GS101_SLC_PCH_CB),
+ regmap_reg_range(GS101_FORCE_NOMC, GS101_FORCE_NOMC),
+ regmap_reg_range(GS101_FORCE_BOOST, GS101_PMLINK_SLC_BUSY),
+ regmap_reg_range(GS101_BOOTSYNC_OUT, GS101_CTRL_SECJTAG_ALIVE),
+ regmap_reg_range(GS101_CTRL_DIV_PLL_ALV_DIVLOW, GS101_CTRL_CLKDIV__CLKRTC),
+ regmap_reg_range(GS101_CTRL_SOC32K, GS101_CTRL_SBU_SW_EN),
+ regmap_reg_range(GS101_PAD_CTRL_CLKOUT0, GS101_PAD_CTRL_WRESETO_n),
+ regmap_reg_range(GS101_PHY_CTRL_USB20, GS101_PHY_CTRL_UFS),
+};
+
+static const struct regmap_range gs101_pmu_ro_registers[] = {
+ regmap_reg_range(GS101_OM_STAT, GS101_VERSION),
+ regmap_reg_range(GS101_OTP_STATUS, GS101_OTP_STATUS),
+
+ regmap_reg_range(GS101_DATARAM_STATE_SLC_CH(0),
+ GS101_PPMPURAM_STATE_SLC_CH(0)),
+ regmap_reg_range(GS101_DATARAM_STATE_SLC_CH(1),
+ GS101_PPMPURAM_STATE_SLC_CH(1)),
+ regmap_reg_range(GS101_DATARAM_STATE_SLC_CH(2),
+ GS101_PPMPURAM_STATE_SLC_CH(2)),
+ regmap_reg_range(GS101_DATARAM_STATE_SLC_CH(3),
+ GS101_PPMPURAM_STATE_SLC_CH(3)),
+
+#define CLUSTER_CPU_RANGE(cl, cpu) \
+ regmap_reg_range(GS101_CLUSTER_CPU_IN(cl, cpu), \
+ GS101_CLUSTER_CPU_IN(cl, cpu)), \
+ regmap_reg_range(GS101_CLUSTER_CPU_INT_IN(cl, cpu), \
+ GS101_CLUSTER_CPU_INT_IN(cl, cpu))
+
+ CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 0),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 1),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 2),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 3),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER1_OFFSET, 0),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER1_OFFSET, 1),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER2_OFFSET, 0),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER2_OFFSET, 1),
+#undef CLUSTER_CPU_RANGE
+
+#define CLUSTER_NONCPU_RANGE(cl) \
+ regmap_reg_range(GS101_CLUSTER_NONCPU_IN(cl), \
+ GS101_CLUSTER_NONCPU_IN(cl)), \
+ regmap_reg_range(GS101_CLUSTER_NONCPU_INT_IN(cl), \
+ GS101_CLUSTER_NONCPU_INT_IN(cl)), \
+ regmap_reg_range(GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_IN(cl), \
+ GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_IN(cl))
+
+ CLUSTER_NONCPU_RANGE(0),
+ CLUSTER_NONCPU_RANGE(1),
+ CLUSTER_NONCPU_RANGE(2),
+ regmap_reg_range(GS101_CLUSTER_NONCPU_INT_EN(2),
+ GS101_CLUSTER_NONCPU_INT_DIR(2)),
+#undef CLUSTER_NONCPU_RANGE
+
+#define SUBBLK_RANGE(blk) \
+ regmap_reg_range(GS101_SUBBLK_IN(blk), GS101_SUBBLK_IN(blk)), \
+ regmap_reg_range(GS101_SUBBLK_INT_IN(blk), \
+ GS101_SUBBLK_INT_IN(blk)), \
+ regmap_reg_range(GS101_SUBBLK_MEMORY_IN(blk), \
+ GS101_SUBBLK_MEMORY_IN(blk))
+
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_ALIVE),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_AOC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_APM),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CMU),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BUS0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BUS1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BUS2),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CORE),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_EH),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CPUCL0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CPUCL1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CPUCL2),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_G3D),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_EMBEDDED_CPUCL0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_EMBEDDED_G3D),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_HSI0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_HSI1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_HSI2),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_DPU),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_DISP),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_G2D),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MFC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CSIS),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_PDP),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_DNS),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_G3AA),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_IPP),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_ITP),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MCSC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_GDC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_TNR),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BO),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_TPU),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF2),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF3),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MISC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_PERIC0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_PERIC1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_S2D),
+#undef SUBBLK_RANGE
+
+#define SUBBLK_CPU_RANGE(blk) \
+ regmap_reg_range(GS101_SUBBLK_CPU_IN(blk), \
+ GS101_SUBBLK_CPU_IN(blk)), \
+ regmap_reg_range(GS101_SUBBLK_CPU_INT_IN(blk), \
+ GS101_SUBBLK_CPU_INT_IN(blk))
+
+ SUBBLK_CPU_RANGE(GS101_SUBBBLK_CPU_OFFSET_APM),
+ SUBBLK_CPU_RANGE(GS101_SUBBBLK_CPU_OFFSET_DBGCORE),
+ SUBBLK_CPU_RANGE(GS101_SUBBBLK_CPU_OFFSET_SSS),
+#undef SUBBLK_CPU_RANGE
+
+ regmap_reg_range(GS101_MIF_CONFIGURATION, GS101_MIF_CONFIGURATION),
+ regmap_reg_range(GS101_MIF_IN, GS101_MIF_IN),
+ regmap_reg_range(GS101_MIF_INT_IN, GS101_MIF_INT_IN),
+ regmap_reg_range(GS101_TOP_IN, GS101_TOP_IN),
+ regmap_reg_range(GS101_TOP_INT_IN, GS101_TOP_INT_IN),
+ regmap_reg_range(GS101_WAKEUP2_INT_IN, GS101_WAKEUP2_INT_IN),
+ regmap_reg_range(GS101_SYSTEM_IN, GS101_SYSTEM_IN),
+ regmap_reg_range(GS101_SYSTEM_INT_IN, GS101_SYSTEM_INT_IN),
+ regmap_reg_range(GS101_EINT_INT_IN, GS101_EINT_INT_IN),
+ regmap_reg_range(GS101_EINT2_INT_IN, GS101_EINT2_INT_IN),
+ regmap_reg_range(GS101_EINT3_INT_IN, GS101_EINT3_INT_IN),
+ regmap_reg_range(GS101_USER_DEFINED_INT_IN, GS101_USER_DEFINED_INT_IN),
+ regmap_reg_range(GS101_SCAN2DRAM_INT_IN, GS101_SCAN2DRAM_INT_IN),
+ regmap_reg_range(GS101_CUSTOM_IN, GS101_CUSTOM_IN),
+ regmap_reg_range(GS101_CUSTOM_INT_IN, GS101_CUSTOM_INT_IN),
+ regmap_reg_range(GS101_HCU_R(0), GS101_HCU_R(3)),
+ regmap_reg_range(GS101_HCU_SP, GS101_HCU_PC),
+ regmap_reg_range(GS101_NMI_SRC_IN, GS101_NMI_SRC_IN),
+ regmap_reg_range(GS101_HPM_INT_IN, GS101_HPM_INT_IN),
+ regmap_reg_range(GS101_MEMORY_PGEN_FEEDBACK, GS101_MEMORY_PGEN_FEEDBACK),
+ regmap_reg_range(GS101_MEMORY_SMX_FEEDBACK, GS101_MEMORY_SMX_FEEDBACK),
+ regmap_reg_range(GS101_PMLINK_SLC_ACK, GS101_PMLINK_SLC_BUSY),
+ regmap_reg_range(GS101_BOOTSYNC_IN, GS101_BOOTSYNC_IN),
+ regmap_reg_range(GS101_SCAN_READY_IN, GS101_SCAN_READY_IN),
+ regmap_reg_range(GS101_CTRL_PLL_ALV_LOCK, GS101_CTRL_PLL_ALV_LOCK),
+};
+
+static const struct regmap_access_table gs101_pmu_rd_table = {
+ .yes_ranges = gs101_pmu_registers,
+ .n_yes_ranges = ARRAY_SIZE(gs101_pmu_registers),
+};
+
+static const struct regmap_access_table gs101_pmu_wr_table = {
+ .yes_ranges = gs101_pmu_registers,
+ .n_yes_ranges = ARRAY_SIZE(gs101_pmu_registers),
+ .no_ranges = gs101_pmu_ro_registers,
+ .n_no_ranges = ARRAY_SIZE(gs101_pmu_ro_registers),
+};
+
+const struct exynos_pmu_data gs101_pmu_data = {
+ .pmu_secure = true,
+ .pmu_cpuhp = true,
+ .rd_table = &gs101_pmu_rd_table,
+ .wr_table = &gs101_pmu_wr_table,
+};
+
+/*
+ * Tensor SoCs are configured so that PMU_ALIVE registers can only be written
+ * from EL3, but are still read accessible. As Linux needs to write some of
+ * these registers, the following functions are provided and exposed via
+ * regmap.
+ *
+ * Note: This SMC interface is known to be implemented on gs101 and derivative
+ * SoCs.
+ */
+
+/* Write to a protected PMU register. */
+int tensor_sec_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct arm_smccc_res res;
+ unsigned long pmu_base = (unsigned long)context;
+
+ arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
+ TENSOR_PMUREG_WRITE, val, 0, 0, 0, 0, &res);
+
+ /* returns -EINVAL if access isn't allowed or 0 */
+ if (res.a0)
+ pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
+
+ return (int)res.a0;
+}
+
+/* Read/Modify/Write a protected PMU register. */
+static int tensor_sec_reg_rmw(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ struct arm_smccc_res res;
+ unsigned long pmu_base = (unsigned long)context;
+
+ arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
+ TENSOR_PMUREG_RMW, mask, val, 0, 0, 0, &res);
+
+ /* returns -EINVAL if access isn't allowed or 0 */
+ if (res.a0)
+ pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
+
+ return (int)res.a0;
+}
+
+/*
+ * Read a protected PMU register. All PMU registers can be read by Linux.
+ * Note: The SMC read register is not used, as only registers that can be
+ * written are readable via SMC.
+ */
+int tensor_sec_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ *val = pmu_raw_readl(reg);
+ return 0;
+}
+
+/*
+ * For SoCs that have set/clear bit hardware this function can be used when
+ * the PMU register will be accessed by multiple masters.
+ *
+ * For example, to set bits 13:8 in PMU reg offset 0x3e80
+ * tensor_set_bits_atomic(ctx, 0x3e80, 0x3f00, 0x3f00);
+ *
+ * Set bit 8, and clear bits 13:9 PMU reg offset 0x3e80
+ * tensor_set_bits_atomic(0x3e80, 0x100, 0x3f00);
+ */
+static int tensor_set_bits_atomic(void *context, unsigned int offset, u32 val,
+ u32 mask)
+{
+ int ret;
+ unsigned int i;
+
+ for (i = 0; i < 32; i++) {
+ if (!(mask & BIT(i)))
+ continue;
+
+ offset &= ~TENSOR_SET_BITS;
+
+ if (val & BIT(i))
+ offset |= TENSOR_SET_BITS;
+ else
+ offset |= TENSOR_CLR_BITS;
+
+ ret = tensor_sec_reg_write(context, offset, i);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static bool tensor_is_atomic(unsigned int reg)
+{
+ /*
+ * Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF)
+ * as the target registers can be accessed by multiple masters. SFRs
+ * that don't support atomic are added to the switch statement below.
+ */
+ if (reg > PMUALIVE_MASK)
+ return false;
+
+ switch (reg) {
+ case GS101_SYSIP_DAT(0):
+ case GS101_SYSTEM_CONFIGURATION:
+ return false;
+ default:
+ return true;
+ }
+}
+
+int tensor_sec_update_bits(void *context, unsigned int reg, unsigned int mask,
+ unsigned int val)
+{
+ if (!tensor_is_atomic(reg))
+ return tensor_sec_reg_rmw(context, reg, mask, val);
+
+ return tensor_set_bits_atomic(context, reg, val, mask);
+}
diff --git a/drivers/soc/tegra/cbb/tegra194-cbb.c b/drivers/soc/tegra/cbb/tegra194-cbb.c
index c1bdea8c853f..ab75d50cc85c 100644
--- a/drivers/soc/tegra/cbb/tegra194-cbb.c
+++ b/drivers/soc/tegra/cbb/tegra194-cbb.c
@@ -1836,7 +1836,7 @@ print_errlog1_2(struct seq_file *file, struct tegra194_cbb *cbb,
}
/*
- * Print transcation type, error code and description from ErrLog0 for all
+ * Print transaction type, error code and description from ErrLog0 for all
* errors. For NOC target errors, all relevant error info is printed using
* ErrLog0 only. But additional information is printed for errors from
* APB targets because for them:
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index d27667283846..74d2fedea71c 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -182,8 +182,6 @@ static int tegra_fuse_probe(struct platform_device *pdev)
}
fuse->soc->init(fuse);
- tegra_fuse_print_sku_info(&tegra_sku_info);
- tegra_soc_device_register();
err = tegra_fuse_add_lookups(fuse);
if (err)
diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c
index 695d0b7f9a8a..06c2bcbee573 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra210.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra210.c
@@ -65,27 +65,52 @@ static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
sku_info->gpu_speedo_id = 0;
*threshold = THRESHOLD_INDEX_0;
- switch (sku) {
- case 0x00: /* Engineering SKU */
- case 0x01: /* Engineering SKU */
- case 0x07:
- case 0x17:
- case 0x27:
- if (speedo_rev >= 2)
+ if (sku_info->revision >= TEGRA_REVISION_A02) {
+ switch (sku) {
+ case 0x00: /* Engineering SKU */
+ case 0x01: /* Engineering SKU */
+ case 0x13:
+ sku_info->cpu_speedo_id = 5;
+ sku_info->gpu_speedo_id = 2;
+ break;
+
+ case 0x07:
+ case 0x17:
+ case 0x1F:
+ sku_info->cpu_speedo_id = 7;
+ sku_info->gpu_speedo_id = 2;
+ break;
+
+ case 0x27:
+ sku_info->cpu_speedo_id = 1;
+ sku_info->gpu_speedo_id = 2;
+ break;
+
+ case 0x83:
+ sku_info->cpu_speedo_id = 3;
+ sku_info->gpu_speedo_id = 3;
+ break;
+
+ case 0x87:
+ sku_info->cpu_speedo_id = 2;
sku_info->gpu_speedo_id = 1;
- break;
-
- case 0x13:
- if (speedo_rev >= 2)
- sku_info->gpu_speedo_id = 1;
-
- sku_info->cpu_speedo_id = 1;
- break;
-
- default:
+ break;
+
+ case 0x8F:
+ sku_info->soc_speedo_id = 2;
+ sku_info->cpu_speedo_id = 9;
+ sku_info->gpu_speedo_id = 2;
+ break;
+
+ default:
+ pr_err("Tegra210: unknown revision 2 or newer SKU %#04x\n", sku);
+ /* Using the default for the error case */
+ break;
+ }
+ } else if (sku == 0x00 || sku == 0x01 || sku == 0x07 || sku == 0x13 || sku == 0x17) {
+ sku_info->gpu_speedo_id = 1;
+ } else {
pr_err("Tegra210: unknown SKU %#04x\n", sku);
- /* Using the default for the error case */
- break;
}
}
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 034a2a535a1e..f3760a3b3026 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -423,6 +423,7 @@ struct tegra_pmc_soc {
* @wake_sw_status_map: Bitmap to hold raw status of wakes without mask
* @wake_cntrl_level_map: Bitmap to hold wake levels to be programmed in
* cntrl register associated with each wake during system suspend.
+ * @syscore: syscore suspend/resume callbacks
*/
struct tegra_pmc {
struct device *dev;
@@ -466,7 +467,7 @@ struct tegra_pmc {
unsigned long *wake_type_dual_edge_map;
unsigned long *wake_sw_status_map;
unsigned long *wake_cntrl_level_map;
- struct syscore_ops syscore;
+ struct syscore syscore;
};
static struct tegra_pmc *pmc = &(struct tegra_pmc) {
@@ -2897,9 +2898,16 @@ static int tegra_pmc_probe(struct platform_device *pdev)
if (IS_ERR(pmc->wake))
return PTR_ERR(pmc->wake);
- pmc->aotag = devm_platform_ioremap_resource_byname(pdev, "aotag");
- if (IS_ERR(pmc->aotag))
- return PTR_ERR(pmc->aotag);
+ /* "aotag" is an optional aperture */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "aotag");
+ if (res) {
+ pmc->aotag = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmc->aotag))
+ return PTR_ERR(pmc->aotag);
+ } else {
+ pmc->aotag = NULL;
+ }
/* "scratch" is an optional aperture */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -3147,7 +3155,7 @@ static void tegra186_pmc_process_wake_events(struct tegra_pmc *pmc, unsigned int
}
}
-static void tegra186_pmc_wake_syscore_resume(void)
+static void tegra186_pmc_wake_syscore_resume(void *data)
{
u32 status, mask;
unsigned int i;
@@ -3160,7 +3168,7 @@ static void tegra186_pmc_wake_syscore_resume(void)
}
}
-static int tegra186_pmc_wake_syscore_suspend(void)
+static int tegra186_pmc_wake_syscore_suspend(void *data)
{
wke_read_sw_wake_status(pmc);
@@ -3179,6 +3187,11 @@ static int tegra186_pmc_wake_syscore_suspend(void)
return 0;
}
+static const struct syscore_ops tegra186_pmc_wake_syscore_ops = {
+ .suspend = tegra186_pmc_wake_syscore_suspend,
+ .resume = tegra186_pmc_wake_syscore_resume,
+};
+
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM)
static int tegra_pmc_suspend(struct device *dev)
{
@@ -3829,10 +3842,8 @@ static const struct tegra_pmc_regs tegra186_pmc_regs = {
static void tegra186_pmc_init(struct tegra_pmc *pmc)
{
- pmc->syscore.suspend = tegra186_pmc_wake_syscore_suspend;
- pmc->syscore.resume = tegra186_pmc_wake_syscore_resume;
-
- register_syscore_ops(&pmc->syscore);
+ pmc->syscore.ops = &tegra186_pmc_wake_syscore_ops;
+ register_syscore(&pmc->syscore);
}
static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
@@ -4214,6 +4225,13 @@ static const struct tegra_wake_event tegra234_wake_events[] = {
TEGRA_WAKE_GPIO("power", 29, 1, TEGRA234_AON_GPIO(EE, 4)),
TEGRA_WAKE_GPIO("mgbe", 56, 0, TEGRA234_MAIN_GPIO(Y, 3)),
TEGRA_WAKE_IRQ("rtc", 73, 10),
+ TEGRA_WAKE_IRQ("usb3-port-0", 76, 167),
+ TEGRA_WAKE_IRQ("usb3-port-1", 77, 167),
+ TEGRA_WAKE_IRQ("usb3-port-2-3", 78, 167),
+ TEGRA_WAKE_IRQ("usb2-port-0", 79, 167),
+ TEGRA_WAKE_IRQ("usb2-port-1", 80, 167),
+ TEGRA_WAKE_IRQ("usb2-port-2", 81, 167),
+ TEGRA_WAKE_IRQ("usb2-port-3", 82, 167),
TEGRA_WAKE_IRQ("sw-wake", SW_WAKE_ID, 179),
};
diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
index a572d15f6161..6fdf4d14b7e7 100644
--- a/drivers/soc/xilinx/xlnx_event_manager.c
+++ b/drivers/soc/xilinx/xlnx_event_manager.c
@@ -77,17 +77,17 @@ struct registered_event_data {
static bool xlnx_is_error_event(const u32 node_id)
{
- u32 pm_family_code, pm_sub_family_code;
+ u32 pm_family_code;
- zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code);
+ zynqmp_pm_get_family_info(&pm_family_code);
- if (pm_sub_family_code == VERSAL_SUB_FAMILY_CODE) {
+ if (pm_family_code == PM_VERSAL_FAMILY_CODE) {
if (node_id == VERSAL_EVENT_ERROR_PMC_ERR1 ||
node_id == VERSAL_EVENT_ERROR_PMC_ERR2 ||
node_id == VERSAL_EVENT_ERROR_PSM_ERR1 ||
node_id == VERSAL_EVENT_ERROR_PSM_ERR2)
return true;
- } else {
+ } else if (pm_family_code == PM_VERSAL_NET_FAMILY_CODE) {
if (node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR1 ||
node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR2 ||
node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR3 ||
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index ae59bf16659a..9b7b2858b22a 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -285,7 +285,7 @@ static int register_event(struct device *dev, const enum pm_api_cb_id cb_type, c
static int zynqmp_pm_probe(struct platform_device *pdev)
{
int ret, irq;
- u32 pm_api_version, pm_family_code, pm_sub_family_code, node_id;
+ u32 pm_api_version, pm_family_code, node_id;
struct mbox_client *client;
ret = zynqmp_pm_get_api_version(&pm_api_version);
@@ -315,14 +315,16 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work,
zynqmp_pm_init_suspend_work_fn);
- ret = zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code);
+ ret = zynqmp_pm_get_family_info(&pm_family_code);
if (ret < 0)
return ret;
- if (pm_sub_family_code == VERSALNET_SUB_FAMILY_CODE)
+ if (pm_family_code == PM_VERSAL_NET_FAMILY_CODE)
node_id = PM_DEV_ACPU_0_0;
- else
+ else if (pm_family_code == PM_VERSAL_FAMILY_CODE)
node_id = PM_DEV_ACPU_0;
+ else
+ return -ENODEV;
ret = register_event(&pdev->dev, PM_NOTIFY_CB, node_id, EVENT_SUBSYSTEM_RESTART,
false, subsystem_restart_event_callback);
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 3b89b5a70331..b8457477cee9 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -730,7 +730,7 @@ static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
agent->orb_pointer);
- queue_work(system_unbound_wq, &agent->work);
+ queue_work(system_dfl_wq, &agent->work);
return RCODE_COMPLETE;
@@ -764,7 +764,7 @@ static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
pr_debug("tgt_agent DOORBELL\n");
- queue_work(system_unbound_wq, &agent->work);
+ queue_work(system_dfl_wq, &agent->work);
return RCODE_COMPLETE;
@@ -990,7 +990,7 @@ static void tgt_agent_fetch_work(struct work_struct *work)
if (tgt_agent_check_active(agent) && !doorbell) {
INIT_WORK(&req->work, tgt_agent_process_work);
- queue_work(system_unbound_wq, &req->work);
+ queue_work(system_dfl_wq, &req->work);
} else {
/* don't process this request, just check next_ORB */
sbp_free_request(req);
@@ -1618,7 +1618,7 @@ static void sbp_mgt_agent_rw(struct fw_card *card,
agent->orb_offset = sbp2_pointer_to_addr(ptr);
agent->request = req;
- queue_work(system_unbound_wq, &agent->work);
+ queue_work(system_dfl_wq, &agent->work);
rcode = RCODE_COMPLETE;
} else if (tcode == TCODE_READ_BLOCK_REQUEST) {
addr_to_sbp2_pointer(agent->orb_offset, ptr);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 9e51c535ba8c..f7868b41c5e6 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -578,6 +578,11 @@ DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc);
DEF_CONFIGFS_ATTRIB_SHOW(submit_type);
+DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_len);
+DEF_CONFIGFS_ATTRIB_SHOW(atomic_alignment);
+DEF_CONFIGFS_ATTRIB_SHOW(atomic_granularity);
+DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_with_boundary);
+DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_boundary);
#define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \
static ssize_t _name##_store(struct config_item *item, const char *page,\
@@ -1300,6 +1305,11 @@ CONFIGFS_ATTR(, max_write_same_len);
CONFIGFS_ATTR(, alua_support);
CONFIGFS_ATTR(, pgr_support);
CONFIGFS_ATTR(, submit_type);
+CONFIGFS_ATTR_RO(, atomic_max_len);
+CONFIGFS_ATTR_RO(, atomic_alignment);
+CONFIGFS_ATTR_RO(, atomic_granularity);
+CONFIGFS_ATTR_RO(, atomic_max_with_boundary);
+CONFIGFS_ATTR_RO(, atomic_max_boundary);
/*
* dev_attrib attributes for devices using the target core SBC/SPC
@@ -1343,6 +1353,11 @@ struct configfs_attribute *sbc_attrib_attrs[] = {
&attr_pgr_support,
&attr_emulate_rsoc,
&attr_submit_type,
+ &attr_atomic_alignment,
+ &attr_atomic_max_len,
+ &attr_atomic_granularity,
+ &attr_atomic_max_with_boundary,
+ &attr_atomic_max_boundary,
NULL,
};
EXPORT_SYMBOL(sbc_attrib_attrs);
@@ -2758,33 +2773,24 @@ static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item,
static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
{
struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
- struct se_device *dev;
- struct se_hba *hba;
struct t10_alua_lu_gp_member *lu_gp_mem;
- ssize_t len = 0, cur_len;
- unsigned char buf[LU_GROUP_NAME_BUF] = { };
+ const char *const end = page + PAGE_SIZE;
+ char *cur = page;
spin_lock(&lu_gp->lu_gp_lock);
list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
- dev = lu_gp_mem->lu_gp_mem_dev;
- hba = dev->se_hba;
+ struct se_device *dev = lu_gp_mem->lu_gp_mem_dev;
+ struct se_hba *hba = dev->se_hba;
- cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
+ cur += scnprintf(cur, end - cur, "%s/%s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&dev->dev_group.cg_item));
- cur_len++; /* Extra byte for NULL terminator */
-
- if ((cur_len + len) > PAGE_SIZE || cur_len > LU_GROUP_NAME_BUF) {
- pr_warn("Ran out of lu_gp_show_attr"
- "_members buffer\n");
+ if (WARN_ON_ONCE(cur >= end))
break;
- }
- memcpy(page+len, buf, cur_len);
- len += cur_len;
}
spin_unlock(&lu_gp->lu_gp_lock);
- return len;
+ return cur - page;
}
CONFIGFS_ATTR(target_lu_gp_, lu_gp_id);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 7bb711b24c0d..8ccb8541db1c 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -814,6 +814,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT;
+ /* Skip allocating lun_stats since we can't export them. */
xcopy_lun = &dev->xcopy_lun;
rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
init_completion(&xcopy_lun->lun_shutdown_comp);
@@ -840,12 +841,29 @@ free_device:
return NULL;
}
+void target_configure_write_atomic_from_bdev(struct se_dev_attrib *attrib,
+ struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ int block_size = bdev_logical_block_size(bdev);
+
+ if (!bdev_can_atomic_write(bdev))
+ return;
+
+ attrib->atomic_max_len = queue_atomic_write_max_bytes(q) / block_size;
+ attrib->atomic_granularity = attrib->atomic_alignment =
+ queue_atomic_write_unit_min_bytes(q) / block_size;
+ attrib->atomic_max_with_boundary = 0;
+ attrib->atomic_max_boundary = 0;
+}
+EXPORT_SYMBOL_GPL(target_configure_write_atomic_from_bdev);
+
/*
* Check if the underlying struct block_device supports discard and if yes
* configure the UNMAP parameters.
*/
-bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct block_device *bdev)
+bool target_configure_unmap_from_bdev(struct se_dev_attrib *attrib,
+ struct block_device *bdev)
{
int block_size = bdev_logical_block_size(bdev);
@@ -863,7 +881,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
bdev_discard_alignment(bdev) / block_size;
return true;
}
-EXPORT_SYMBOL(target_configure_unmap_from_queue);
+EXPORT_SYMBOL(target_configure_unmap_from_bdev);
/*
* Convert from blocksize advertised to the initiator to the 512 byte
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 7156a4dc1ca7..13159928e365 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -697,7 +697,7 @@ static void target_fabric_port_release(struct config_item *item)
struct se_lun *lun = container_of(to_config_group(item),
struct se_lun, lun_group);
- kfree_rcu(lun, rcu_head);
+ call_rcu(&lun->rcu_head, target_tpg_free_lun);
}
static struct configfs_item_operations target_fabric_port_item_ops = {
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 2d78ef74633c..b2610073e8cc 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -92,8 +92,8 @@ static bool fd_configure_unmap(struct se_device *dev)
struct inode *inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode))
- return target_configure_unmap_from_queue(&dev->dev_attrib,
- I_BDEV(inode));
+ return target_configure_unmap_from_bdev(&dev->dev_attrib,
+ I_BDEV(inode));
/* Limit UNMAP emulation to 8k Number of LBAs (NoLB) */
dev->dev_attrib.max_unmap_lba_count = 0x2000;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 66c292b7d74b..8ec7b534ad76 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -84,8 +84,8 @@ static bool iblock_configure_unmap(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
- return target_configure_unmap_from_queue(&dev->dev_attrib,
- ib_dev->ibd_bd);
+ return target_configure_unmap_from_bdev(&dev->dev_attrib,
+ ib_dev->ibd_bd);
}
static int iblock_configure_device(struct se_device *dev)
@@ -152,6 +152,8 @@ static int iblock_configure_device(struct se_device *dev)
if (bdev_nonrot(bd))
dev->dev_attrib.is_nonrot = 1;
+ target_configure_write_atomic_from_bdev(&dev->dev_attrib, bd);
+
bi = bdev_get_integrity(bd);
if (!bi)
return 0;
@@ -773,6 +775,9 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
else if (!bdev_write_cache(ib_dev->ibd_bd))
opf |= REQ_FUA;
}
+
+ if (cmd->se_cmd_flags & SCF_ATOMIC)
+ opf |= REQ_ATOMIC;
} else {
opf = REQ_OP_READ;
miter_dir = SG_MITER_FROM_SG;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 20aab1f50565..763e6d26e187 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -125,6 +125,7 @@ void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *,
struct se_lun *);
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64);
+void target_tpg_free_lun(struct rcu_head *head);
int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
bool, struct se_device *);
void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index fe8beb7dbab1..abe91dc8722e 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -764,6 +764,49 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
return 0;
}
+static sense_reason_t
+sbc_check_atomic(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
+{
+ struct se_dev_attrib *attrib = &dev->dev_attrib;
+ u16 boundary, transfer_len;
+ u64 lba;
+
+ lba = transport_lba_64(cdb);
+ boundary = get_unaligned_be16(&cdb[10]);
+ transfer_len = get_unaligned_be16(&cdb[12]);
+
+ if (!attrib->atomic_max_len)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ if (boundary) {
+ if (transfer_len > attrib->atomic_max_with_boundary)
+ return TCM_INVALID_CDB_FIELD;
+
+ if (boundary > attrib->atomic_max_boundary)
+ return TCM_INVALID_CDB_FIELD;
+ } else {
+ if (transfer_len > attrib->atomic_max_len)
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ if (attrib->atomic_granularity) {
+ if (transfer_len % attrib->atomic_granularity)
+ return TCM_INVALID_CDB_FIELD;
+
+ if (boundary && boundary % attrib->atomic_granularity)
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ if (dev->dev_attrib.atomic_alignment) {
+ u64 _lba = lba;
+
+ if (do_div(_lba, dev->dev_attrib.atomic_alignment))
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ return 0;
+}
+
sense_reason_t
sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
{
@@ -861,6 +904,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
break;
case WRITE_16:
case WRITE_VERIFY_16:
+ case WRITE_ATOMIC_16:
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
@@ -872,6 +916,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ if (cdb[0] == WRITE_ATOMIC_16) {
+ cmd->se_cmd_flags |= SCF_ATOMIC;
+
+ ret = sbc_check_atomic(dev, cmd, cdb);
+ if (ret)
+ return ret;
+ }
cmd->execute_cmd = sbc_execute_rw;
break;
case VARIABLE_LENGTH_CMD:
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index aad0096afa21..fe2b888bcb43 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -521,7 +521,6 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
have_tp = 1;
buf[0] = dev->transport->get_device_type(dev);
- buf[3] = have_tp ? 0x3c : 0x10;
/* Set WSNZ to 1 */
buf[4] = 0x01;
@@ -562,11 +561,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
else
put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
- /*
- * Exit now if we don't support TP.
- */
+ put_unaligned_be16(12, &buf[2]);
+
if (!have_tp)
- goto max_write_same;
+ goto try_atomic;
/*
* Set MAXIMUM UNMAP LBA COUNT
@@ -595,9 +593,29 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* MAXIMUM WRITE SAME LENGTH
*/
-max_write_same:
put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
+ put_unaligned_be16(40, &buf[2]);
+
+try_atomic:
+ /*
+ * ATOMIC
+ */
+ if (!dev->dev_attrib.atomic_max_len)
+ goto done;
+
+ if (dev->dev_attrib.atomic_max_len < io_max_blocks)
+ put_unaligned_be32(dev->dev_attrib.atomic_max_len, &buf[44]);
+ else
+ put_unaligned_be32(io_max_blocks, &buf[44]);
+
+ put_unaligned_be32(dev->dev_attrib.atomic_alignment, &buf[48]);
+ put_unaligned_be32(dev->dev_attrib.atomic_granularity, &buf[52]);
+ put_unaligned_be32(dev->dev_attrib.atomic_max_with_boundary, &buf[56]);
+ put_unaligned_be32(dev->dev_attrib.atomic_max_boundary, &buf[60]);
+
+ put_unaligned_be16(60, &buf[2]);
+done:
return 0;
}
@@ -1452,6 +1470,24 @@ static const struct target_opcode_descriptor tcm_opcode_write_same32 = {
.update_usage_bits = set_dpofua_usage_bits32,
};
+static bool tcm_is_atomic_enabled(const struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
+{
+ return cmd->se_dev->dev_attrib.atomic_max_len;
+}
+
+static struct target_opcode_descriptor tcm_opcode_write_atomic16 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = WRITE_ATOMIC_16,
+ .cdb_size = 16,
+ .usage_bits = {WRITE_ATOMIC_16, 0xf8, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_atomic_enabled,
+ .update_usage_bits = set_dpofua_usage_bits,
+};
+
static bool tcm_is_caw_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
@@ -2008,6 +2044,7 @@ static const struct target_opcode_descriptor *tcm_supported_opcodes[] = {
&tcm_opcode_write16,
&tcm_opcode_write_verify16,
&tcm_opcode_write_same32,
+ &tcm_opcode_write_atomic16,
&tcm_opcode_compare_write,
&tcm_opcode_read_capacity,
&tcm_opcode_read_capacity16,
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 6bdf2d8bd694..083205052be2 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -276,56 +276,39 @@ static ssize_t target_stat_lu_state_bit_show(struct config_item *item,
return snprintf(page, PAGE_SIZE, "exposed\n");
}
-static ssize_t target_stat_lu_num_cmds_show(struct config_item *item,
- char *page)
-{
- struct se_device *dev = to_stat_lu_dev(item);
- struct se_dev_io_stats *stats;
- unsigned int cpu;
- u32 cmds = 0;
-
- for_each_possible_cpu(cpu) {
- stats = per_cpu_ptr(dev->stats, cpu);
- cmds += stats->total_cmds;
- }
-
- /* scsiLuNumCommands */
- return snprintf(page, PAGE_SIZE, "%u\n", cmds);
-}
-
-static ssize_t target_stat_lu_read_mbytes_show(struct config_item *item,
- char *page)
-{
- struct se_device *dev = to_stat_lu_dev(item);
- struct se_dev_io_stats *stats;
- unsigned int cpu;
- u32 bytes = 0;
-
- for_each_possible_cpu(cpu) {
- stats = per_cpu_ptr(dev->stats, cpu);
- bytes += stats->read_bytes;
- }
-
- /* scsiLuReadMegaBytes */
- return snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
-}
-
-static ssize_t target_stat_lu_write_mbytes_show(struct config_item *item,
- char *page)
-{
- struct se_device *dev = to_stat_lu_dev(item);
- struct se_dev_io_stats *stats;
- unsigned int cpu;
- u32 bytes = 0;
-
- for_each_possible_cpu(cpu) {
- stats = per_cpu_ptr(dev->stats, cpu);
- bytes += stats->write_bytes;
- }
-
- /* scsiLuWrittenMegaBytes */
- return snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
-}
+#define per_cpu_stat_snprintf(stats_struct, prefix, field, shift) \
+static ssize_t \
+per_cpu_stat_##prefix##_snprintf(struct stats_struct __percpu *per_cpu_stats, \
+ char *page) \
+{ \
+ struct stats_struct *stats; \
+ unsigned int cpu; \
+ u64 sum = 0; \
+ \
+ for_each_possible_cpu(cpu) { \
+ stats = per_cpu_ptr(per_cpu_stats, cpu); \
+ sum += stats->field; \
+ } \
+ \
+ return snprintf(page, PAGE_SIZE, "%llu\n", sum >> shift); \
+}
+
+#define lu_show_per_cpu_stat(prefix, field, shift) \
+per_cpu_stat_snprintf(se_dev_io_stats, prefix, field, shift); \
+static ssize_t \
+target_stat_##prefix##_show(struct config_item *item, char *page) \
+{ \
+ struct se_device *dev = to_stat_lu_dev(item); \
+ \
+ return per_cpu_stat_##prefix##_snprintf(dev->stats, page); \
+} \
+
+/* scsiLuNumCommands */
+lu_show_per_cpu_stat(lu_num_cmds, total_cmds, 0);
+/* scsiLuReadMegaBytes */
+lu_show_per_cpu_stat(lu_read_mbytes, read_bytes, 20);
+/* scsiLuWrittenMegaBytes */
+lu_show_per_cpu_stat(lu_write_mbytes, write_bytes, 20);
static ssize_t target_stat_lu_resets_show(struct config_item *item, char *page)
{
@@ -623,53 +606,30 @@ static ssize_t target_stat_tgt_port_port_index_show(struct config_item *item,
return ret;
}
-static ssize_t target_stat_tgt_port_in_cmds_show(struct config_item *item,
- char *page)
-{
- struct se_lun *lun = to_stat_tgt_port(item);
- struct se_device *dev;
- ssize_t ret = -ENODEV;
-
- rcu_read_lock();
- dev = rcu_dereference(lun->lun_se_dev);
- if (dev)
- ret = snprintf(page, PAGE_SIZE, "%lu\n",
- atomic_long_read(&lun->lun_stats.cmd_pdus));
- rcu_read_unlock();
- return ret;
-}
-
-static ssize_t target_stat_tgt_port_write_mbytes_show(struct config_item *item,
- char *page)
-{
- struct se_lun *lun = to_stat_tgt_port(item);
- struct se_device *dev;
- ssize_t ret = -ENODEV;
-
- rcu_read_lock();
- dev = rcu_dereference(lun->lun_se_dev);
- if (dev)
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- (u32)(atomic_long_read(&lun->lun_stats.rx_data_octets) >> 20));
- rcu_read_unlock();
- return ret;
-}
-
-static ssize_t target_stat_tgt_port_read_mbytes_show(struct config_item *item,
- char *page)
-{
- struct se_lun *lun = to_stat_tgt_port(item);
- struct se_device *dev;
- ssize_t ret = -ENODEV;
-
- rcu_read_lock();
- dev = rcu_dereference(lun->lun_se_dev);
- if (dev)
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- (u32)(atomic_long_read(&lun->lun_stats.tx_data_octets) >> 20));
- rcu_read_unlock();
- return ret;
-}
+#define tgt_port_show_per_cpu_stat(prefix, field, shift) \
+per_cpu_stat_snprintf(scsi_port_stats, prefix, field, shift); \
+static ssize_t \
+target_stat_##prefix##_show(struct config_item *item, char *page) \
+{ \
+ struct se_lun *lun = to_stat_tgt_port(item); \
+ struct se_device *dev; \
+ int ret; \
+ \
+ rcu_read_lock(); \
+ dev = rcu_dereference(lun->lun_se_dev); \
+ if (!dev) { \
+ rcu_read_unlock(); \
+ return -ENODEV; \
+ } \
+ \
+ ret = per_cpu_stat_##prefix##_snprintf(lun->lun_stats, page); \
+ rcu_read_unlock(); \
+ return ret; \
+}
+
+tgt_port_show_per_cpu_stat(tgt_port_in_cmds, cmd_pdus, 0);
+tgt_port_show_per_cpu_stat(tgt_port_write_mbytes, rx_data_octets, 20);
+tgt_port_show_per_cpu_stat(tgt_port_read_mbytes, tx_data_octets, 20);
static ssize_t target_stat_tgt_port_hs_in_cmds_show(struct config_item *item,
char *page)
@@ -1035,92 +995,34 @@ static ssize_t target_stat_auth_att_count_show(struct config_item *item,
return ret;
}
-static ssize_t target_stat_auth_num_cmds_show(struct config_item *item,
- char *page)
-{
- struct se_lun_acl *lacl = auth_to_lacl(item);
- struct se_node_acl *nacl = lacl->se_lun_nacl;
- struct se_dev_entry_io_stats *stats;
- struct se_dev_entry *deve;
- unsigned int cpu;
- ssize_t ret;
- u32 cmds = 0;
-
- rcu_read_lock();
- deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
- if (!deve) {
- rcu_read_unlock();
- return -ENODEV;
- }
-
- for_each_possible_cpu(cpu) {
- stats = per_cpu_ptr(deve->stats, cpu);
- cmds += stats->total_cmds;
- }
-
- /* scsiAuthIntrOutCommands */
- ret = snprintf(page, PAGE_SIZE, "%u\n", cmds);
- rcu_read_unlock();
- return ret;
-}
-
-static ssize_t target_stat_auth_read_mbytes_show(struct config_item *item,
- char *page)
-{
- struct se_lun_acl *lacl = auth_to_lacl(item);
- struct se_node_acl *nacl = lacl->se_lun_nacl;
- struct se_dev_entry_io_stats *stats;
- struct se_dev_entry *deve;
- unsigned int cpu;
- ssize_t ret;
- u32 bytes = 0;
-
- rcu_read_lock();
- deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
- if (!deve) {
- rcu_read_unlock();
- return -ENODEV;
- }
-
- for_each_possible_cpu(cpu) {
- stats = per_cpu_ptr(deve->stats, cpu);
- bytes += stats->read_bytes;
- }
-
- /* scsiAuthIntrReadMegaBytes */
- ret = snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
- rcu_read_unlock();
- return ret;
-}
-
-static ssize_t target_stat_auth_write_mbytes_show(struct config_item *item,
- char *page)
-{
- struct se_lun_acl *lacl = auth_to_lacl(item);
- struct se_node_acl *nacl = lacl->se_lun_nacl;
- struct se_dev_entry_io_stats *stats;
- struct se_dev_entry *deve;
- unsigned int cpu;
- ssize_t ret;
- u32 bytes = 0;
-
- rcu_read_lock();
- deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
- if (!deve) {
- rcu_read_unlock();
- return -ENODEV;
- }
-
- for_each_possible_cpu(cpu) {
- stats = per_cpu_ptr(deve->stats, cpu);
- bytes += stats->write_bytes;
- }
-
- /* scsiAuthIntrWrittenMegaBytes */
- ret = snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
- rcu_read_unlock();
- return ret;
-}
+#define auth_show_per_cpu_stat(prefix, field, shift) \
+per_cpu_stat_snprintf(se_dev_entry_io_stats, prefix, field, shift); \
+static ssize_t \
+target_stat_##prefix##_show(struct config_item *item, char *page) \
+{ \
+ struct se_lun_acl *lacl = auth_to_lacl(item); \
+ struct se_node_acl *nacl = lacl->se_lun_nacl; \
+ struct se_dev_entry *deve; \
+ int ret; \
+ \
+ rcu_read_lock(); \
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun); \
+ if (!deve) { \
+ rcu_read_unlock(); \
+ return -ENODEV; \
+ } \
+ \
+ ret = per_cpu_stat_##prefix##_snprintf(deve->stats, page); \
+ rcu_read_unlock(); \
+ return ret; \
+}
+
+/* scsiAuthIntrOutCommands */
+auth_show_per_cpu_stat(auth_num_cmds, total_cmds, 0);
+/* scsiAuthIntrReadMegaBytes */
+auth_show_per_cpu_stat(auth_read_mbytes, read_bytes, 20);
+/* scsiAuthIntrWrittenMegaBytes */
+auth_show_per_cpu_stat(auth_write_mbytes, write_bytes, 20);
static ssize_t target_stat_auth_hs_num_cmds_show(struct config_item *item,
char *page)
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index c0e429e5ef31..8b5ad50baa43 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -548,7 +548,7 @@ int core_tpg_register(
ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
true, g_lun0_dev);
if (ret < 0) {
- kfree(se_tpg->tpg_virt_lun0);
+ target_tpg_free_lun(&se_tpg->tpg_virt_lun0->rcu_head);
return ret;
}
}
@@ -595,7 +595,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
if (se_tpg->proto_id >= 0) {
core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
- kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
+ call_rcu(&se_tpg->tpg_virt_lun0->rcu_head, target_tpg_free_lun);
}
target_tpg_deregister_rtpi(se_tpg);
@@ -615,6 +615,13 @@ struct se_lun *core_tpg_alloc_lun(
pr_err("Unable to allocate se_lun memory\n");
return ERR_PTR(-ENOMEM);
}
+
+ lun->lun_stats = alloc_percpu(struct scsi_port_stats);
+ if (!lun->lun_stats) {
+ pr_err("Unable to allocate se_lun stats memory\n");
+ goto free_lun;
+ }
+
lun->unpacked_lun = unpacked_lun;
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
@@ -628,6 +635,18 @@ struct se_lun *core_tpg_alloc_lun(
lun->lun_tpg = tpg;
return lun;
+
+free_lun:
+ kfree(lun);
+ return ERR_PTR(-ENOMEM);
+}
+
+void target_tpg_free_lun(struct rcu_head *head)
+{
+ struct se_lun *lun = container_of(head, struct se_lun, rcu_head);
+
+ free_percpu(lun->lun_stats);
+ kfree(lun);
}
int core_tpg_add_lun(
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 0a76bdfe5528..e8b7955d40f2 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -126,12 +126,12 @@ int init_se_kmem_caches(void)
}
target_completion_wq = alloc_workqueue("target_completion",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!target_completion_wq)
goto out_free_lba_map_mem_cache;
target_submission_wq = alloc_workqueue("target_submission",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!target_submission_wq)
goto out_free_completion_wq;
@@ -1571,7 +1571,12 @@ target_cmd_parse_cdb(struct se_cmd *cmd)
return ret;
cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
- atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
+ /*
+ * If this is the xcopy_lun then we won't have lun_stats since we
+ * can't export them.
+ */
+ if (cmd->se_lun->lun_stats)
+ this_cpu_inc(cmd->se_lun->lun_stats->cmd_pdus);
return 0;
}
EXPORT_SYMBOL(target_cmd_parse_cdb);
@@ -2597,8 +2602,9 @@ queue_rsp:
!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
goto queue_status;
- atomic_long_add(cmd->data_length,
- &cmd->se_lun->lun_stats.tx_data_octets);
+ if (cmd->se_lun->lun_stats)
+ this_cpu_add(cmd->se_lun->lun_stats->tx_data_octets,
+ cmd->data_length);
/*
* Perform READ_STRIP of PI using software emulation when
* backend had PI enabled, if the transport will not be
@@ -2621,14 +2627,16 @@ queue_rsp:
goto queue_full;
break;
case DMA_TO_DEVICE:
- atomic_long_add(cmd->data_length,
- &cmd->se_lun->lun_stats.rx_data_octets);
+ if (cmd->se_lun->lun_stats)
+ this_cpu_add(cmd->se_lun->lun_stats->rx_data_octets,
+ cmd->data_length);
/*
* Check if we need to send READ payload for BIDI-COMMAND
*/
if (cmd->se_cmd_flags & SCF_BIDI) {
- atomic_long_add(cmd->data_length,
- &cmd->se_lun->lun_stats.tx_data_octets);
+ if (cmd->se_lun->lun_stats)
+ this_cpu_add(cmd->se_lun->lun_stats->tx_data_octets,
+ cmd->data_length);
ret = cmd->se_tfo->queue_data_in(cmd);
if (ret)
goto queue_full;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 877ce58c0a70..93534a6e14b7 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -462,7 +462,7 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = {
int target_xcopy_setup_pt(void)
{
- xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
+ xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!xcopy_wq) {
pr_err("Unable to allocate xcopy_wq\n");
return -ENOMEM;
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 639fc358ed0f..f686d95d3273 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -250,7 +250,7 @@ static struct se_portal_group *ft_add_tpg(struct se_wwn *wwn, const char *name)
tpg->lport_wwn = ft_wwn;
INIT_LIST_HEAD(&tpg->lun_list);
- wq = alloc_workqueue("tcm_fc", 0, 1);
+ wq = alloc_workqueue("tcm_fc", WQ_PERCPU, 1);
if (!wq) {
kfree(tpg);
return NULL;
diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
index bd2fca7dc017..8a2f441cd2ec 100644
--- a/drivers/thermal/intel/intel_hfi.c
+++ b/drivers/thermal/intel/intel_hfi.c
@@ -592,7 +592,7 @@ static void hfi_disable_instance(void *ptr)
hfi_disable();
}
-static void hfi_syscore_resume(void)
+static void hfi_syscore_resume(void *data)
{
/* This code runs only on the boot CPU. */
struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, 0);
@@ -603,7 +603,7 @@ static void hfi_syscore_resume(void)
hfi_enable_instance(hfi_instance);
}
-static int hfi_syscore_suspend(void)
+static int hfi_syscore_suspend(void *data)
{
/* No locking needed. There is no concurrency with CPU offline. */
hfi_disable();
@@ -611,11 +611,15 @@ static int hfi_syscore_suspend(void)
return 0;
}
-static struct syscore_ops hfi_pm_ops = {
+static const struct syscore_ops hfi_pm_ops = {
.resume = hfi_syscore_resume,
.suspend = hfi_syscore_suspend,
};
+static struct syscore hfi_pm = {
+ .ops = &hfi_pm_ops,
+};
+
static int hfi_thermal_notify(struct notifier_block *nb, unsigned long state,
void *_notify)
{
@@ -710,7 +714,7 @@ void __init intel_hfi_init(void)
if (thermal_genl_register_notifier(&hfi_thermal_nb))
goto err_nl_notif;
- register_syscore_ops(&hfi_pm_ops);
+ register_syscore(&hfi_pm);
return;
diff --git a/drivers/ufs/core/Makefile b/drivers/ufs/core/Makefile
index cf820fa09a04..51e1867e524e 100644
--- a/drivers/ufs/core/Makefile
+++ b/drivers/ufs/core/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o
+ufshcd-core-$(CONFIG_RPMB) += ufs-rpmb.o
ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index c9bdd4140fd0..9ab91b4c05b0 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -134,17 +134,15 @@ unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba)
EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr);
/**
- * ufshcd_mcq_decide_queue_depth - decide the queue depth
+ * ufshcd_get_hba_mac - Maximum number of commands supported by the host
+ * controller.
* @hba: per adapter instance
*
- * Return: queue-depth on success, non-zero on error
+ * Return: queue depth on success; negative upon error.
*
- * MAC - Max. Active Command of the Host Controller (HC)
- * HC wouldn't send more than this commands to the device.
- * Calculates and adjusts the queue depth based on the depth
- * supported by the HC and ufs device.
+ * MAC = Maximum number of Active Commands supported by the Host Controller.
*/
-int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
+int ufshcd_get_hba_mac(struct ufs_hba *hba)
{
int mac;
@@ -162,18 +160,7 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
mac = hba->vops->get_hba_mac(hba);
}
if (mac < 0)
- goto err;
-
- WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
- /*
- * max. value of bqueuedepth = 256, mac is host dependent.
- * It is mandatory for UFS device to define bQueueDepth if
- * shared queuing architecture is enabled.
- */
- return min_t(int, mac, hba->dev_info.bqueuedepth);
-
-err:
- dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
+ dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
return mac;
}
@@ -307,9 +294,10 @@ static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
struct ufs_hw_queue *hwq)
{
struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
- int tag = ufshcd_mcq_get_tag(hba, cqe);
if (cqe->command_desc_base_addr) {
+ int tag = ufshcd_mcq_get_tag(hba, cqe);
+
ufshcd_compl_one_cqe(hba, tag, cqe);
/* After processed the cqe, mark it empty (invalid) entry */
cqe->command_desc_base_addr = 0;
@@ -491,9 +479,6 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
mutex_init(&hwq->sq_mutex);
}
- /* The very first HW queue serves device commands */
- hba->dev_cmd_queue = &hba->uhq[0];
-
host->host_tagset = 1;
return 0;
}
@@ -546,8 +531,9 @@ static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
*/
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
{
- struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
- struct scsi_cmnd *cmd = lrbp->cmd;
+ struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, task_tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct ufs_hw_queue *hwq;
void __iomem *reg, *opr_sqd_base;
u32 nexus, id, val;
@@ -556,24 +542,21 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
return -ETIMEDOUT;
- if (task_tag != hba->nutrs - UFSHCD_NUM_RESERVED) {
- if (!cmd)
- return -EINVAL;
- hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
- if (!hwq)
- return 0;
- } else {
- hwq = hba->dev_cmd_queue;
- }
+ if (!cmd)
+ return -EINVAL;
+
+ hwq = ufshcd_mcq_req_to_hwq(hba, rq);
+ if (!hwq)
+ return 0;
id = hwq->id;
- mutex_lock(&hwq->sq_mutex);
+ guard(mutex)(&hwq->sq_mutex);
/* stop the SQ fetching before working on it */
err = ufshcd_mcq_sq_stop(hba, hwq);
if (err)
- goto unlock;
+ return err;
/* SQCTI = EXT_IID, IID, LUN, Task Tag */
nexus = lrbp->lun << 8 | task_tag;
@@ -600,8 +583,6 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
if (ufshcd_mcq_sq_start(hba, hwq))
err = -ETIMEDOUT;
-unlock:
- mutex_unlock(&hwq->sq_mutex);
return err;
}
@@ -632,7 +613,8 @@ static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
struct ufs_hw_queue *hwq, int task_tag)
{
- struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
+ struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, task_tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
struct utp_transfer_req_desc *utrd;
__le64 cmd_desc_base_addr;
bool ret = false;
@@ -683,7 +665,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
struct Scsi_Host *host = cmd->device->host;
struct ufs_hba *hba = shost_priv(host);
int tag = scsi_cmd_to_rq(cmd)->tag;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
struct ufs_hw_queue *hwq;
int err;
diff --git a/drivers/ufs/core/ufs-rpmb.c b/drivers/ufs/core/ufs-rpmb.c
new file mode 100644
index 000000000000..ffad049872b9
--- /dev/null
+++ b/drivers/ufs/core/ufs-rpmb.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UFS OP-TEE based RPMB Driver
+ *
+ * Copyright (C) 2025 Micron Technology, Inc.
+ * Copyright (C) 2025 Qualcomm Technologies, Inc.
+ *
+ * Authors:
+ * Bean Huo <beanhuo@micron.com>
+ * Can Guo <can.guo@oss.qualcomm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/rpmb.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <ufs/ufshcd.h>
+#include <linux/unaligned.h>
+#include "ufshcd-priv.h"
+
+#define UFS_RPMB_SEC_PROTOCOL 0xEC /* JEDEC UFS application */
+#define UFS_RPMB_SEC_PROTOCOL_ID 0x01 /* JEDEC UFS RPMB protocol ID, CDB byte3 */
+
+static const struct bus_type ufs_rpmb_bus_type = {
+ .name = "ufs_rpmb",
+};
+
+/* UFS RPMB device structure */
+struct ufs_rpmb_dev {
+ u8 region_id;
+ struct device dev;
+ struct rpmb_dev *rdev;
+ struct ufs_hba *hba;
+ struct list_head node;
+};
+
+static int ufs_sec_submit(struct ufs_hba *hba, u16 spsp, void *buffer, size_t len, bool send)
+{
+ struct scsi_device *sdev = hba->ufs_rpmb_wlun;
+ u8 cdb[12] = { };
+
+ cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN;
+ cdb[1] = UFS_RPMB_SEC_PROTOCOL;
+ put_unaligned_be16(spsp, &cdb[2]);
+ put_unaligned_be32(len, &cdb[6]);
+
+ return scsi_execute_cmd(sdev, cdb, send ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
+ buffer, len, /*timeout=*/30 * HZ, 0, NULL);
+}
+
+/* UFS RPMB route frames implementation */
+static int ufs_rpmb_route_frames(struct device *dev, u8 *req, unsigned int req_len, u8 *resp,
+ unsigned int resp_len)
+{
+ struct ufs_rpmb_dev *ufs_rpmb = dev_get_drvdata(dev);
+ struct rpmb_frame *frm_out = (struct rpmb_frame *)req;
+ bool need_result_read = true;
+ u16 req_type, protocol_id;
+ struct ufs_hba *hba;
+ int ret;
+
+ if (!ufs_rpmb) {
+ dev_err(dev, "Missing driver data\n");
+ return -ENODEV;
+ }
+
+ hba = ufs_rpmb->hba;
+
+ req_type = be16_to_cpu(frm_out->req_resp);
+
+ switch (req_type) {
+ case RPMB_PROGRAM_KEY:
+ if (req_len != sizeof(struct rpmb_frame) || resp_len != sizeof(struct rpmb_frame))
+ return -EINVAL;
+ break;
+ case RPMB_GET_WRITE_COUNTER:
+ if (req_len != sizeof(struct rpmb_frame) || resp_len != sizeof(struct rpmb_frame))
+ return -EINVAL;
+ need_result_read = false;
+ break;
+ case RPMB_WRITE_DATA:
+ if (req_len % sizeof(struct rpmb_frame) || resp_len != sizeof(struct rpmb_frame))
+ return -EINVAL;
+ break;
+ case RPMB_READ_DATA:
+ if (req_len != sizeof(struct rpmb_frame) || resp_len % sizeof(struct rpmb_frame))
+ return -EINVAL;
+ need_result_read = false;
+ break;
+ default:
+ dev_err(dev, "Unknown request type=0x%04x\n", req_type);
+ return -EINVAL;
+ }
+
+ protocol_id = ufs_rpmb->region_id << 8 | UFS_RPMB_SEC_PROTOCOL_ID;
+
+ ret = ufs_sec_submit(hba, protocol_id, req, req_len, true);
+ if (ret) {
+ dev_err(dev, "Command failed with ret=%d\n", ret);
+ return ret;
+ }
+
+ if (need_result_read) {
+ struct rpmb_frame *frm_resp = (struct rpmb_frame *)resp;
+
+ memset(frm_resp, 0, sizeof(*frm_resp));
+ frm_resp->req_resp = cpu_to_be16(RPMB_RESULT_READ);
+ ret = ufs_sec_submit(hba, protocol_id, resp, resp_len, true);
+ if (ret) {
+ dev_err(dev, "Result read request failed with ret=%d\n", ret);
+ return ret;
+ }
+ }
+
+ if (!ret) {
+ ret = ufs_sec_submit(hba, protocol_id, resp, resp_len, false);
+ if (ret)
+ dev_err(dev, "Response read failed with ret=%d\n", ret);
+ }
+
+ return ret;
+}
+
+static void ufs_rpmb_device_release(struct device *dev)
+{
+ struct ufs_rpmb_dev *ufs_rpmb = dev_get_drvdata(dev);
+
+ rpmb_dev_unregister(ufs_rpmb->rdev);
+}
+
+/* UFS RPMB device registration */
+int ufs_rpmb_probe(struct ufs_hba *hba)
+{
+ struct ufs_rpmb_dev *ufs_rpmb, *it, *tmp;
+ struct rpmb_dev *rdev;
+ char *cid = NULL;
+ int region;
+ u32 cap;
+ int ret;
+
+ if (!hba->ufs_rpmb_wlun || hba->dev_info.b_advanced_rpmb_en) {
+ dev_info(hba->dev, "Skip OP-TEE RPMB registration\n");
+ return -ENODEV;
+ }
+
+ /* Check if device_id is available */
+ if (!hba->dev_info.device_id) {
+ dev_err(hba->dev, "UFS Device ID not available\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&hba->rpmbs);
+
+ struct rpmb_descr descr = {
+ .type = RPMB_TYPE_UFS,
+ .route_frames = ufs_rpmb_route_frames,
+ .reliable_wr_count = hba->dev_info.rpmb_io_size,
+ };
+
+ for (region = 0; region < ARRAY_SIZE(hba->dev_info.rpmb_region_size); region++) {
+ cap = hba->dev_info.rpmb_region_size[region];
+ if (!cap)
+ continue;
+
+ ufs_rpmb = devm_kzalloc(hba->dev, sizeof(*ufs_rpmb), GFP_KERNEL);
+ if (!ufs_rpmb) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ ufs_rpmb->hba = hba;
+ ufs_rpmb->dev.parent = &hba->ufs_rpmb_wlun->sdev_gendev;
+ ufs_rpmb->dev.bus = &ufs_rpmb_bus_type;
+ ufs_rpmb->dev.release = ufs_rpmb_device_release;
+ dev_set_name(&ufs_rpmb->dev, "ufs_rpmb%d", region);
+
+ /* Set driver data BEFORE device_register */
+ dev_set_drvdata(&ufs_rpmb->dev, ufs_rpmb);
+
+ ret = device_register(&ufs_rpmb->dev);
+ if (ret) {
+ dev_err(hba->dev, "Failed to register UFS RPMB device %d\n", region);
+ put_device(&ufs_rpmb->dev);
+ goto err_out;
+ }
+
+ /* Create unique ID by appending region number to device_id */
+ cid = kasprintf(GFP_KERNEL, "%s-R%d", hba->dev_info.device_id, region);
+ if (!cid) {
+ device_unregister(&ufs_rpmb->dev);
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ descr.dev_id = cid;
+ descr.dev_id_len = strlen(cid);
+ descr.capacity = cap;
+
+ /* Register RPMB device */
+ rdev = rpmb_dev_register(&ufs_rpmb->dev, &descr);
+ if (IS_ERR(rdev)) {
+ dev_err(hba->dev, "Failed to register UFS RPMB device.\n");
+ device_unregister(&ufs_rpmb->dev);
+ ret = PTR_ERR(rdev);
+ goto err_out;
+ }
+
+ kfree(cid);
+ cid = NULL;
+
+ ufs_rpmb->rdev = rdev;
+ ufs_rpmb->region_id = region;
+
+ list_add_tail(&ufs_rpmb->node, &hba->rpmbs);
+
+ dev_info(hba->dev, "UFS RPMB region %d registered (capacity=%u)\n", region, cap);
+ }
+
+ return 0;
+err_out:
+ kfree(cid);
+ list_for_each_entry_safe(it, tmp, &hba->rpmbs, node) {
+ list_del(&it->node);
+ device_unregister(&it->dev);
+ }
+
+ return ret;
+}
+
+/* UFS RPMB remove handler */
+void ufs_rpmb_remove(struct ufs_hba *hba)
+{
+ struct ufs_rpmb_dev *ufs_rpmb, *tmp;
+
+ if (list_empty(&hba->rpmbs))
+ return;
+
+ /* Remove all registered RPMB devices */
+ list_for_each_entry_safe(ufs_rpmb, tmp, &hba->rpmbs, node) {
+ dev_info(hba->dev, "Removing UFS RPMB region %d\n", ufs_rpmb->region_id);
+ /* Remove from list first */
+ list_del(&ufs_rpmb->node);
+ /* Unregister device */
+ device_unregister(&ufs_rpmb->dev);
+ }
+
+ dev_info(hba->dev, "All UFS RPMB devices unregistered\n");
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("OP-TEE UFS RPMB driver");
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 0086816b27cd..b33f8656edb5 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -235,7 +235,7 @@ static int ufshcd_ahit_to_us(u32 ahit)
}
/* Convert microseconds to Auto-Hibernate Idle Timer register value */
-static u32 ufshcd_us_to_ahit(unsigned int timer)
+u32 ufshcd_us_to_ahit(unsigned int timer)
{
unsigned int scale;
@@ -245,6 +245,7 @@ static u32 ufshcd_us_to_ahit(unsigned int timer)
return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) |
FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
}
+EXPORT_SYMBOL_GPL(ufshcd_us_to_ahit);
static int ufshcd_read_hci_reg(struct ufs_hba *hba, u32 *val, unsigned int reg)
{
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index 252186124669..58b506eac6dc 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -105,7 +105,7 @@ static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *j
if (dir != DMA_NONE) {
payload = &job->request_payload;
- if (!payload || !payload->payload_len || !payload->sg_cnt)
+ if (!payload->payload_len || !payload->sg_cnt)
return -EINVAL;
sg_cnt = dma_map_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);
diff --git a/drivers/ufs/core/ufs_trace.h b/drivers/ufs/core/ufs_trace.h
index 584c2b5c6ad9..309ae51b4906 100644
--- a/drivers/ufs/core/ufs_trace.h
+++ b/drivers/ufs/core/ufs_trace.h
@@ -42,7 +42,6 @@
#define UFS_CMD_TRACE_STRINGS \
EM(UFS_CMD_SEND, "send_req") \
EM(UFS_CMD_COMP, "complete_rsp") \
- EM(UFS_DEV_COMP, "dev_complete") \
EM(UFS_QUERY_SEND, "query_send") \
EM(UFS_QUERY_COMP, "query_complete") \
EM(UFS_QUERY_ERR, "query_complete_err") \
diff --git a/drivers/ufs/core/ufs_trace_types.h b/drivers/ufs/core/ufs_trace_types.h
index f2d5ad1d92b9..bf821970f092 100644
--- a/drivers/ufs/core/ufs_trace_types.h
+++ b/drivers/ufs/core/ufs_trace_types.h
@@ -5,7 +5,6 @@
enum ufs_trace_str_t {
UFS_CMD_SEND,
UFS_CMD_COMP,
- UFS_DEV_COMP,
UFS_QUERY_SEND,
UFS_QUERY_COMP,
UFS_QUERY_ERR,
diff --git a/drivers/ufs/core/ufshcd-crypto.h b/drivers/ufs/core/ufshcd-crypto.h
index 89bb97c14c15..c148a5194378 100644
--- a/drivers/ufs/core/ufshcd-crypto.h
+++ b/drivers/ufs/core/ufshcd-crypto.h
@@ -38,10 +38,10 @@ ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp,
}
static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
+ struct scsi_cmnd *cmd)
{
- struct scsi_cmnd *cmd = lrbp->cmd;
const struct bio_crypt_ctx *crypt_ctx = scsi_cmd_to_rq(cmd)->crypt_ctx;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
if (crypt_ctx && hba->vops && hba->vops->fill_crypto_prdt)
return hba->vops->fill_crypto_prdt(hba, crypt_ctx,
@@ -51,17 +51,19 @@ static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba,
}
static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
+ struct scsi_cmnd *cmd)
{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+
if (!(hba->quirks & UFSHCD_QUIRK_KEYS_IN_PRDT))
return;
- if (!(scsi_cmd_to_rq(lrbp->cmd)->crypt_ctx))
+ if (!(scsi_cmd_to_rq(cmd)->crypt_ctx))
return;
/* Zeroize the PRDT because it can contain cryptographic keys. */
memzero_explicit(lrbp->ucd_prdt_ptr,
- ufshcd_sg_entry_size(hba) * scsi_sg_count(lrbp->cmd));
+ ufshcd_sg_entry_size(hba) * scsi_sg_count(cmd));
}
bool ufshcd_crypto_enable(struct ufs_hba *hba);
@@ -82,13 +84,15 @@ ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp,
struct request_desc_header *h) { }
static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
+ struct scsi_cmnd *cmd)
{
return 0;
}
static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp) { }
+ struct scsi_cmnd *cmd)
+{
+}
static inline bool ufshcd_crypto_enable(struct ufs_hba *hba)
{
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index d0a2c963a27d..4259f499382f 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -6,6 +6,8 @@
#include <linux/pm_runtime.h>
#include <ufs/ufshcd.h>
+void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs);
+
static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba)
{
return !hba->shutting_down;
@@ -65,7 +67,7 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
struct cq_entry *cqe);
int ufshcd_mcq_init(struct ufs_hba *hba);
void ufshcd_mcq_disable(struct ufs_hba *hba);
-int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
+int ufshcd_get_hba_mac(struct ufs_hba *hba);
int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req);
@@ -75,14 +77,19 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd);
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag);
int ufshcd_mcq_abort(struct scsi_cmnd *cmd);
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
-void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp);
+void ufshcd_release_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd);
-#define SD_ASCII_STD true
-#define SD_RAW false
-int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
- u8 **buf, bool ascii);
+/**
+ * enum ufs_descr_fmt - UFS string descriptor format
+ * @SD_RAW: Raw UTF-16 format
+ * @SD_ASCII_STD: Convert to null-terminated ASCII string
+ */
+enum ufs_descr_fmt {
+ SD_RAW = 0,
+ SD_ASCII_STD = 1,
+};
+int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, u8 **buf, enum ufs_descr_fmt fmt);
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
@@ -361,6 +368,26 @@ static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, u8
return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
}
+/*
+ * Convert a block layer tag into a SCSI command pointer. This function is
+ * called once per I/O completion path and is also called from error paths.
+ */
+static inline struct scsi_cmnd *ufshcd_tag_to_cmd(struct ufs_hba *hba, u32 tag)
+{
+ /*
+ * Host-wide tags are enabled in MCQ mode only. See also the
+ * host->host_tagset assignment in ufs-mcq.c.
+ */
+ struct blk_mq_tags *tags = hba->host->tag_set.shared_tags ?:
+ hba->host->tag_set.tags[0];
+ struct request *rq = blk_mq_tag_to_rq(tags, tag);
+
+ if (WARN_ON_ONCE(!rq))
+ return NULL;
+
+ return blk_mq_rq_to_pdu(rq);
+}
+
static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q)
__must_hold(&q->sq_lock)
{
@@ -411,4 +438,17 @@ static inline u32 ufshcd_mcq_get_sq_head_slot(struct ufs_hw_queue *q)
return val / sizeof(struct utp_transfer_req_desc);
}
+#if IS_ENABLED(CONFIG_RPMB)
+int ufs_rpmb_probe(struct ufs_hba *hba);
+void ufs_rpmb_remove(struct ufs_hba *hba);
+#else
+static inline int ufs_rpmb_probe(struct ufs_hba *hba)
+{
+ return 0;
+}
+static inline void ufs_rpmb_remove(struct ufs_hba *hba)
+{
+}
+#endif
+
#endif /* _UFSHCD_PRIV_H_ */
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index d6a060a72461..040a0ceb170a 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -28,6 +28,7 @@
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#include "ufshcd-priv.h"
#include <ufs/ufs_quirks.h>
#include <ufs/unipro.h>
@@ -403,10 +404,11 @@ static void ufshcd_configure_wb(struct ufs_hba *hba)
ufshcd_wb_toggle_buf_flush(hba, true);
}
-static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrb,
enum ufs_trace_str_t str_t)
{
- struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+ struct utp_upiu_req *rq = lrb->ucd_req_ptr;
struct utp_upiu_header *header;
if (!trace_ufshcd_upiu_enabled())
@@ -415,7 +417,7 @@ static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
if (str_t == UFS_CMD_SEND)
header = &rq->header;
else
- header = &hba->lrb[tag].ucd_rsp_ptr->header;
+ header = &lrb->ucd_rsp_ptr->header;
trace_ufshcd_upiu(hba, str_t, header, &rq->sc.cdb,
UFS_TSF_CDB);
@@ -472,7 +474,7 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
}
-static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
+static void ufshcd_add_command_trace(struct ufs_hba *hba, struct scsi_cmnd *cmd,
enum ufs_trace_str_t str_t)
{
u64 lba = 0;
@@ -480,16 +482,13 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
u32 doorbell = 0;
u32 intr;
u32 hwq_id = 0;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
- struct scsi_cmnd *cmd = lrbp->cmd;
struct request *rq = scsi_cmd_to_rq(cmd);
+ unsigned int tag = rq->tag;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
int transfer_len = -1;
- if (!cmd)
- return;
-
/* trace UPIU also */
- ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
+ ufshcd_add_cmd_upiu_trace(hba, lrbp, str_t);
if (!trace_ufshcd_command_enabled())
return;
@@ -503,7 +502,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
lba = scsi_get_lba(cmd);
if (opcode == WRITE_10)
- group_id = lrbp->cmd->cmnd[6];
+ group_id = cmd->cmnd[6];
} else if (opcode == UNMAP) {
/*
* The number of Bytes to be unmapped beginning with the lba.
@@ -596,14 +595,13 @@ static void ufshcd_print_evt_hist(struct ufs_hba *hba)
ufshcd_vops_dbg_register_dump(hba);
}
-static
-void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt)
+static void ufshcd_print_tr(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ bool pr_prdt)
{
- const struct ufshcd_lrb *lrbp;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ const int tag = scsi_cmd_to_rq(cmd)->tag;
int prdt_length;
- lrbp = &hba->lrb[tag];
-
if (hba->monitor.enabled) {
dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", tag,
div_u64(lrbp->issue_time_stamp_local_clock, 1000));
@@ -646,7 +644,8 @@ static bool ufshcd_print_tr_iter(struct request *req, void *priv)
struct Scsi_Host *shost = sdev->host;
struct ufs_hba *hba = shost_priv(shost);
- ufshcd_print_tr(hba, req->tag, *(bool *)priv);
+ if (!blk_mq_is_reserved_rq(req))
+ ufshcd_print_tr(hba, blk_mq_rq_to_pdu(req), *(bool *)priv);
return true;
}
@@ -856,7 +855,7 @@ static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
struct cq_entry *cqe)
{
if (cqe)
- return le32_to_cpu(cqe->status) & MASK_OCS;
+ return cqe->overall_status & MASK_OCS;
return lrbp->utr_descriptor_ptr->header.ocs & MASK_OCS;
}
@@ -1076,7 +1075,7 @@ void ufshcd_pm_qos_exit(struct ufs_hba *hba)
* @hba: per adapter instance
* @on: If True, vote for perf PM QoS mode otherwise power save mode
*/
-static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
+void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
{
guard(mutex)(&hba->pm_qos_mutex);
@@ -1085,6 +1084,7 @@ static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
cpu_latency_qos_update_request(&hba->pm_qos_req, on ? 0 : PM_QOS_DEFAULT_VALUE);
}
+EXPORT_SYMBOL_GPL(ufshcd_pm_qos_update);
/**
* ufshcd_set_clk_freq - set UFS controller clock frequencies
@@ -1290,13 +1290,13 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
*/
static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
{
- const struct scsi_device *sdev;
+ struct scsi_device *sdev;
unsigned long flags;
u32 pending = 0;
spin_lock_irqsave(hba->host->host_lock, flags);
__shost_for_each_device(sdev, hba->host)
- pending += sbitmap_weight(&sdev->budget_map);
+ pending += scsi_device_busy(sdev);
spin_unlock_irqrestore(hba->host->host_lock, flags);
return pending;
@@ -2294,20 +2294,21 @@ static inline int ufshcd_monitor_opcode2dir(u8 opcode)
return -EINVAL;
}
+/* Must only be called for SCSI commands. */
static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
+ struct scsi_cmnd *cmd)
{
const struct ufs_hba_monitor *m = &hba->monitor;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
- return (m->enabled && lrbp && lrbp->cmd &&
- (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
- ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
+ return m->enabled &&
+ (!m->chunk_size || m->chunk_size == cmd->sdb.length) &&
+ ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp);
}
-static void ufshcd_start_monitor(struct ufs_hba *hba,
- const struct ufshcd_lrb *lrbp)
+static void ufshcd_start_monitor(struct ufs_hba *hba, struct scsi_cmnd *cmd)
{
- int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
+ int dir = ufshcd_monitor_opcode2dir(cmd->cmnd[0]);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2316,14 +2317,15 @@ static void ufshcd_start_monitor(struct ufs_hba *hba,
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
-static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp)
+static void ufshcd_update_monitor(struct ufs_hba *hba, struct scsi_cmnd *cmd)
{
- int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
+ struct request *req = scsi_cmd_to_rq(cmd);
+ const struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ int dir = ufshcd_monitor_opcode2dir(cmd->cmnd[0]);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
- const struct request *req = scsi_cmd_to_rq(lrbp->cmd);
struct ufs_hba_monitor *m = &hba->monitor;
ktime_t now, inc, lat;
@@ -2348,17 +2350,24 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
+/* Returns %true for SCSI commands and %false for device management commands. */
+static bool ufshcd_is_scsi_cmd(struct scsi_cmnd *cmd)
+{
+ return !blk_mq_is_reserved_rq(scsi_cmd_to_rq(cmd));
+}
+
/**
* ufshcd_send_command - Send SCSI or device management commands
* @hba: per adapter instance
- * @task_tag: Task tag of the command
+ * @cmd: SCSI command or device management command pointer
* @hwq: pointer to hardware queue instance
*/
-static inline
-void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
- struct ufs_hw_queue *hwq)
+static inline void ufshcd_send_command(struct ufs_hba *hba,
+ struct scsi_cmnd *cmd,
+ struct ufs_hw_queue *hwq)
{
- struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ const int tag = scsi_cmd_to_rq(cmd)->tag;
unsigned long flags;
if (hba->monitor.enabled) {
@@ -2367,11 +2376,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
lrbp->compl_time_stamp = ktime_set(0, 0);
lrbp->compl_time_stamp_local_clock = 0;
}
- ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
- if (lrbp->cmd)
+ if (ufshcd_is_scsi_cmd(cmd)) {
+ ufshcd_add_command_trace(hba, cmd, UFS_CMD_SEND);
ufshcd_clk_scaling_start_busy(hba);
- if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
- ufshcd_start_monitor(hba, lrbp);
+ if (unlikely(ufshcd_should_inform_monitor(hba, cmd)))
+ ufshcd_start_monitor(hba, cmd);
+ }
if (hba->mcq_enabled) {
int utrd_size = sizeof(struct utp_transfer_req_desc);
@@ -2386,22 +2396,22 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
} else {
spin_lock_irqsave(&hba->outstanding_lock, flags);
if (hba->vops && hba->vops->setup_xfer_req)
- hba->vops->setup_xfer_req(hba, lrbp->task_tag,
- !!lrbp->cmd);
- __set_bit(lrbp->task_tag, &hba->outstanding_reqs);
- ufshcd_writel(hba, 1 << lrbp->task_tag,
- REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ hba->vops->setup_xfer_req(hba, tag,
+ ufshcd_is_scsi_cmd(cmd));
+ __set_bit(tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
}
}
/**
* ufshcd_copy_sense_data - Copy sense data in case of check condition
- * @lrbp: pointer to local reference block
+ * @cmd: SCSI command
*/
-static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
+static inline void ufshcd_copy_sense_data(struct scsi_cmnd *cmd)
{
- u8 *const sense_buffer = lrbp->cmd->sense_buffer;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ u8 *const sense_buffer = cmd->sense_buffer;
u16 resp_len;
int len;
@@ -2474,7 +2484,6 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1;
hba->nutmrs =
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
- hba->reserved_slot = hba->nutrs - 1;
hba->nortt = FIELD_GET(MASK_NUMBER_OUTSTANDING_RTT, hba->capabilities) + 1;
@@ -2618,7 +2627,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
init_completion(&uic_cmd->done);
- uic_cmd->cmd_active = 1;
+ uic_cmd->cmd_active = true;
ufshcd_dispatch_uic_cmd(hba, uic_cmd);
return 0;
@@ -2706,13 +2715,13 @@ static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int
/**
* ufshcd_map_sg - Map scatter-gather list to prdt
* @hba: per adapter instance
- * @lrbp: pointer to local reference block
+ * @cmd: SCSI command
*
* Return: 0 in case of success, non-zero value in case of failure.
*/
-static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static int ufshcd_map_sg(struct ufs_hba *hba, struct scsi_cmnd *cmd)
{
- struct scsi_cmnd *cmd = lrbp->cmd;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
int sg_segments = scsi_dma_map(cmd);
if (sg_segments < 0)
@@ -2720,7 +2729,7 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
- return ufshcd_crypto_fill_prdt(hba, lrbp);
+ return ufshcd_crypto_fill_prdt(hba, cmd);
}
/**
@@ -2779,13 +2788,14 @@ ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
/**
* ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
* for scsi commands
- * @lrbp: local reference block pointer
+ * @cmd: SCSI command
* @upiu_flags: flags
*/
-static
-void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
+static void ufshcd_prepare_utp_scsi_cmd_upiu(struct scsi_cmnd *cmd,
+ u8 upiu_flags)
{
- struct scsi_cmnd *cmd = lrbp->cmd;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ const int tag = scsi_cmd_to_rq(cmd)->tag;
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
unsigned short cdb_len;
@@ -2793,11 +2803,11 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
.transaction_code = UPIU_TRANSACTION_COMMAND,
.flags = upiu_flags,
.lun = lrbp->lun,
- .task_tag = lrbp->task_tag,
+ .task_tag = tag,
.command_set_type = UPIU_COMMAND_SET_TYPE_SCSI,
};
- WARN_ON_ONCE(ucd_req_ptr->header.task_tag != lrbp->task_tag);
+ WARN_ON_ONCE(ucd_req_ptr->header.task_tag != tag);
ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
@@ -2810,13 +2820,15 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
/**
* ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
* @hba: UFS hba
- * @lrbp: local reference block pointer
+ * @cmd: SCSI command pointer
* @upiu_flags: flags
*/
static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp, u8 upiu_flags)
+ struct scsi_cmnd *cmd, u8 upiu_flags)
{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+ const int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufs_query *query = &hba->dev_cmd.query;
u16 len = be16_to_cpu(query->request.upiu_req.length);
@@ -2825,7 +2837,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
.transaction_code = UPIU_TRANSACTION_QUERY_REQ,
.flags = upiu_flags,
.lun = lrbp->lun,
- .task_tag = lrbp->task_tag,
+ .task_tag = tag,
.query_function = query->request.query_func,
/* Data segment length only need for WRITE_DESC */
.data_segment_length =
@@ -2844,15 +2856,17 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
memcpy(ucd_req_ptr + 1, query->descriptor, len);
}
-static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
+static inline void ufshcd_prepare_utp_nop_upiu(struct scsi_cmnd *cmd)
{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+ const int tag = scsi_cmd_to_rq(cmd)->tag;
memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
ucd_req_ptr->header = (struct utp_upiu_header){
.transaction_code = UPIU_TRANSACTION_NOP_OUT,
- .task_tag = lrbp->task_tag,
+ .task_tag = tag,
};
}
@@ -2860,22 +2874,23 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
* ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
* for Device Management Purposes
* @hba: per adapter instance
- * @lrbp: pointer to local reference block
+ * @cmd: SCSI command pointer
*
* Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
+ struct scsi_cmnd *cmd)
{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
u8 upiu_flags;
int ret = 0;
ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, 0);
if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
- ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
+ ufshcd_prepare_utp_query_req_upiu(hba, cmd, upiu_flags);
else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
- ufshcd_prepare_utp_nop_upiu(lrbp);
+ ufshcd_prepare_utp_nop_upiu(cmd);
else
ret = -EINVAL;
@@ -2888,38 +2903,69 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
* ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
* for SCSI Purposes
* @hba: per adapter instance
- * @lrbp: pointer to local reference block
+ * @cmd: SCSI command
*/
-static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct scsi_cmnd *cmd)
{
- struct request *rq = scsi_cmd_to_rq(lrbp->cmd);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ struct request *rq = scsi_cmd_to_rq(cmd);
unsigned int ioprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
u8 upiu_flags;
- ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
+ ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags,
+ cmd->sc_data_direction, 0);
if (ioprio_class == IOPRIO_CLASS_RT)
upiu_flags |= UPIU_CMD_FLAGS_CP;
- ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
+ ufshcd_prepare_utp_scsi_cmd_upiu(cmd, upiu_flags);
}
-static void __ufshcd_setup_cmd(struct ufshcd_lrb *lrbp, struct scsi_cmnd *cmd, u8 lun, int tag)
+static void ufshcd_init_lrb(struct ufs_hba *hba, struct scsi_cmnd *cmd)
{
+ const int i = scsi_cmd_to_rq(cmd)->tag;
+ struct utp_transfer_cmd_desc *cmd_descp =
+ (void *)hba->ucdl_base_addr + i * ufshcd_get_ucd_size(hba);
+ struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
+ dma_addr_t cmd_desc_element_addr =
+ hba->ucdl_dma_addr + i * ufshcd_get_ucd_size(hba);
+ u16 response_offset = le16_to_cpu(utrdlp[i].response_upiu_offset);
+ u16 prdt_offset = le16_to_cpu(utrdlp[i].prd_table_offset);
+ struct ufshcd_lrb *lrb = scsi_cmd_priv(cmd);
+
+ lrb->utr_descriptor_ptr = utrdlp + i;
+ lrb->utrd_dma_addr =
+ hba->utrdl_dma_addr + i * sizeof(struct utp_transfer_req_desc);
+ lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
+ lrb->ucd_req_dma_addr = cmd_desc_element_addr;
+ lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
+ lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
+ lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
+ lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
+}
+
+static void __ufshcd_setup_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ u8 lun, int tag)
+{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+
+ ufshcd_init_lrb(hba, cmd);
+
memset(lrbp->ucd_req_ptr, 0, sizeof(*lrbp->ucd_req_ptr));
- lrbp->cmd = cmd;
- lrbp->task_tag = tag;
lrbp->lun = lun;
- ufshcd_prepare_lrbp_crypto(cmd ? scsi_cmd_to_rq(cmd) : NULL, lrbp);
+ ufshcd_prepare_lrbp_crypto(ufshcd_is_scsi_cmd(cmd) ?
+ scsi_cmd_to_rq(cmd) : NULL, lrbp);
}
-static void ufshcd_setup_scsi_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
- struct scsi_cmnd *cmd, u8 lun, int tag)
+static void ufshcd_setup_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ u8 lun, int tag)
{
- __ufshcd_setup_cmd(lrbp, cmd, lun, tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+
+ __ufshcd_setup_cmd(hba, cmd, lun, tag);
lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
lrbp->req_abort_skip = false;
- ufshcd_comp_scsi_upiu(hba, lrbp);
+ ufshcd_comp_scsi_upiu(hba, cmd);
}
/**
@@ -2970,25 +3016,13 @@ static void ufshcd_map_queues(struct Scsi_Host *shost)
}
}
-static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
+/*
+ * The only purpose of this function is to make the SCSI core skip the memset()
+ * call for the private command data.
+ */
+static int ufshcd_init_cmd_priv(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
- struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
- i * ufshcd_get_ucd_size(hba);
- struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
- dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
- i * ufshcd_get_ucd_size(hba);
- u16 response_offset = le16_to_cpu(utrdlp[i].response_upiu_offset);
- u16 prdt_offset = le16_to_cpu(utrdlp[i].prd_table_offset);
-
- lrb->utr_descriptor_ptr = utrdlp + i;
- lrb->utrd_dma_addr = hba->utrdl_dma_addr +
- i * sizeof(struct utp_transfer_req_desc);
- lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
- lrb->ucd_req_dma_addr = cmd_desc_element_addr;
- lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
- lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
- lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
- lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
+ return 0;
}
/**
@@ -3002,7 +3036,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
struct ufs_hba *hba = shost_priv(host);
int tag = scsi_cmd_to_rq(cmd)->tag;
- struct ufshcd_lrb *lrbp;
int err = 0;
struct ufs_hw_queue *hwq = NULL;
@@ -3053,11 +3086,10 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_hold(hba);
- lrbp = &hba->lrb[tag];
+ ufshcd_setup_scsi_cmd(hba, cmd,
+ ufshcd_scsi_to_upiu_lun(cmd->device->lun), tag);
- ufshcd_setup_scsi_cmd(hba, lrbp, cmd, ufshcd_scsi_to_upiu_lun(cmd->device->lun), tag);
-
- err = ufshcd_map_sg(hba, lrbp);
+ err = ufshcd_map_sg(hba, cmd);
if (err) {
ufshcd_release(hba);
goto out;
@@ -3066,7 +3098,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (hba->mcq_enabled)
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
- ufshcd_send_command(hba, tag, hwq);
+ ufshcd_send_command(hba, cmd, hwq);
out:
if (ufs_trigger_eh(hba)) {
@@ -3080,10 +3112,26 @@ out:
return err;
}
-static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
- enum dev_cmd_type cmd_type, u8 lun, int tag)
+static int ufshcd_queue_reserved_command(struct Scsi_Host *host,
+ struct scsi_cmnd *cmd)
+{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ struct ufs_hba *hba = shost_priv(host);
+ struct ufs_hw_queue *hwq =
+ hba->mcq_enabled ? ufshcd_mcq_req_to_hwq(hba, rq) : NULL;
+
+ ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
+ ufshcd_send_command(hba, cmd, hwq);
+ return 0;
+}
+
+static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ enum dev_cmd_type cmd_type, u8 lun, int tag)
{
- __ufshcd_setup_cmd(lrbp, NULL, lun, tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+
+ __ufshcd_setup_cmd(hba, cmd, lun, tag);
lrbp->intr_cmd = true; /* No interrupt aggregation */
hba->dev_cmd.type = cmd_type;
}
@@ -3091,12 +3139,12 @@ static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
/*
* Return: 0 upon success; < 0 upon failure.
*/
-static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
+static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ enum dev_cmd_type cmd_type, int tag)
{
- ufshcd_setup_dev_cmd(hba, lrbp, cmd_type, 0, tag);
+ ufshcd_setup_dev_cmd(hba, cmd, cmd_type, 0, tag);
- return ufshcd_compose_devman_upiu(hba, lrbp);
+ return ufshcd_compose_devman_upiu(hba, cmd);
}
/*
@@ -3207,87 +3255,6 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
return err;
}
-/*
- * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
- * < 0 if another error occurred.
- */
-static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp, int max_timeout)
-{
- unsigned long time_left = msecs_to_jiffies(max_timeout);
- unsigned long flags;
- bool pending;
- int err;
-
-retry:
- time_left = wait_for_completion_timeout(&hba->dev_cmd.complete,
- time_left);
-
- if (likely(time_left)) {
- err = ufshcd_get_tr_ocs(lrbp, NULL);
- if (!err)
- err = ufshcd_dev_cmd_completion(hba, lrbp);
- } else {
- err = -ETIMEDOUT;
- dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
- __func__, lrbp->task_tag);
-
- /* MCQ mode */
- if (hba->mcq_enabled) {
- /* successfully cleared the command, retry if needed */
- if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
- err = -EAGAIN;
- return err;
- }
-
- /* SDB mode */
- if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
- /* successfully cleared the command, retry if needed */
- err = -EAGAIN;
- /*
- * Since clearing the command succeeded we also need to
- * clear the task tag bit from the outstanding_reqs
- * variable.
- */
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- pending = test_bit(lrbp->task_tag,
- &hba->outstanding_reqs);
- if (pending)
- __clear_bit(lrbp->task_tag,
- &hba->outstanding_reqs);
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
-
- if (!pending) {
- /*
- * The completion handler ran while we tried to
- * clear the command.
- */
- time_left = 1;
- goto retry;
- }
- } else {
- dev_err(hba->dev, "%s: failed to clear tag %d\n",
- __func__, lrbp->task_tag);
-
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- pending = test_bit(lrbp->task_tag,
- &hba->outstanding_reqs);
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
-
- if (!pending) {
- /*
- * The completion handler ran while we tried to
- * clear the command.
- */
- time_left = 1;
- goto retry;
- }
- }
- }
-
- return err;
-}
-
static void ufshcd_dev_man_lock(struct ufs_hba *hba)
{
ufshcd_hold(hba);
@@ -3302,23 +3269,40 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba)
ufshcd_release(hba);
}
+static struct scsi_cmnd *ufshcd_get_dev_mgmt_cmd(struct ufs_hba *hba)
+{
+ /*
+ * The caller must hold this lock to guarantee that the NOWAIT
+ * allocation will succeed.
+ */
+ lockdep_assert_held(&hba->dev_cmd.lock);
+
+ return scsi_get_internal_cmd(
+ hba->host->pseudo_sdev, DMA_TO_DEVICE,
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+}
+
+static void ufshcd_put_dev_mgmt_cmd(struct scsi_cmnd *cmd)
+{
+ scsi_put_internal_cmd(cmd);
+}
+
/*
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*/
-static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
- const u32 tag, int timeout)
+static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ const u32 tag, int timeout)
{
- int err;
-
- ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
- ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
- err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
-
- ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
- (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ blk_status_t sts;
- return err;
+ rq->timeout = timeout;
+ sts = blk_execute_rq(rq, true);
+ if (sts != BLK_STS_OK)
+ return blk_status_to_errno(sts);
+ return lrbp->utr_descriptor_ptr->header.ocs;
}
/**
@@ -3336,18 +3320,31 @@ static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
- const u32 tag = hba->reserved_slot;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct scsi_cmnd *cmd = ufshcd_get_dev_mgmt_cmd(hba);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ u32 tag;
int err;
- /* Protects use of hba->reserved_slot. */
+ /* Protects use of hba->dev_cmd. */
lockdep_assert_held(&hba->dev_cmd.lock);
- err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
+ if (WARN_ON_ONCE(!cmd))
+ return -ENOMEM;
+
+ tag = scsi_cmd_to_rq(cmd)->tag;
+
+ err = ufshcd_compose_dev_cmd(hba, cmd, cmd_type, tag);
if (unlikely(err))
- return err;
+ goto out;
+
+ err = ufshcd_issue_dev_cmd(hba, cmd, tag, timeout);
+ if (err == 0)
+ err = ufshcd_dev_cmd_completion(hba, lrbp);
+
+out:
+ ufshcd_put_dev_mgmt_cmd(cmd);
- return ufshcd_issue_dev_cmd(hba, lrbp, tag, timeout);
+ return err;
}
/**
@@ -3773,16 +3770,14 @@ static inline char ufshcd_remove_non_printable(u8 ch)
* @desc_index: descriptor index
* @buf: pointer to buffer where descriptor would be read,
* the caller should free the memory.
- * @ascii: if true convert from unicode to ascii characters
- * null terminated string.
+ * @fmt: if %SD_ASCII_STD, convert from UTF-16 to ASCII
*
* Return:
* * string size on success.
* * -ENOMEM: on allocation failure
* * -EINVAL: on a wrong parameter
*/
-int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
- u8 **buf, bool ascii)
+int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, u8 **buf, enum ufs_descr_fmt fmt)
{
struct uc_string_id *uc_str;
u8 *str;
@@ -3811,7 +3806,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
goto out;
}
- if (ascii) {
+ if (fmt == SD_ASCII_STD) {
ssize_t ascii_len;
int i;
/* remove header and divide by 2 to move from UTF16 to UTF8 */
@@ -3837,7 +3832,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
str[ret++] = '\0';
} else {
- str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
+ str = kmemdup(uc_str->uc, uc_str->len, GFP_KERNEL);
if (!str) {
ret = -ENOMEM;
goto out;
@@ -3977,14 +3972,6 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
}
skip_utmrdl:
- /* Allocate memory for local reference block */
- hba->lrb = devm_kcalloc(hba->dev,
- hba->nutrs, sizeof(struct ufshcd_lrb),
- GFP_KERNEL);
- if (!hba->lrb) {
- dev_err(hba->dev, "LRB Memory allocation failed\n");
- goto out;
- }
return 0;
out:
return -ENOMEM;
@@ -4046,8 +4033,6 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
utrdlp[i].response_upiu_length =
cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
}
-
- ufshcd_init_lrb(hba, &hba->lrb[i], i);
}
}
@@ -5253,10 +5238,15 @@ static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP)
hba->dev_info.is_lu_power_on_wp = true;
- /* In case of RPMB LU, check if advanced RPMB mode is enabled */
- if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN &&
- desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
- hba->dev_info.b_advanced_rpmb_en = true;
+ /* In case of RPMB LU, check if advanced RPMB mode is enabled, and get region size */
+ if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN) {
+ if (desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
+ hba->dev_info.b_advanced_rpmb_en = true;
+ hba->dev_info.rpmb_region_size[0] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION0_SIZE];
+ hba->dev_info.rpmb_region_size[1] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION1_SIZE];
+ hba->dev_info.rpmb_region_size[2] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION2_SIZE];
+ hba->dev_info.rpmb_region_size[3] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION3_SIZE];
+ }
kfree(desc_buf);
@@ -5396,19 +5386,18 @@ static void ufshcd_sdev_destroy(struct scsi_device *sdev)
/**
* ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
- * @lrbp: pointer to local reference block of completed command
+ * @cmd: SCSI command
* @scsi_status: SCSI command status
*
* Return: value base on SCSI command status.
*/
-static inline int
-ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
+static inline int ufshcd_scsi_cmd_status(struct scsi_cmnd *cmd, int scsi_status)
{
int result = 0;
switch (scsi_status) {
case SAM_STAT_CHECK_CONDITION:
- ufshcd_copy_sense_data(lrbp);
+ ufshcd_copy_sense_data(cmd);
fallthrough;
case SAM_STAT_GOOD:
result |= DID_OK << 16 | scsi_status;
@@ -5416,7 +5405,7 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
case SAM_STAT_TASK_SET_FULL:
case SAM_STAT_BUSY:
case SAM_STAT_TASK_ABORTED:
- ufshcd_copy_sense_data(lrbp);
+ ufshcd_copy_sense_data(cmd);
result |= scsi_status;
break;
default:
@@ -5430,15 +5419,17 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
/**
* ufshcd_transfer_rsp_status - Get overall status of the response
* @hba: per adapter instance
- * @lrbp: pointer to local reference block of completed command
+ * @cmd: SCSI command
* @cqe: pointer to the completion queue entry
*
* Return: result of the command to notify SCSI midlayer.
*/
-static inline int
-ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
- struct cq_entry *cqe)
+static inline int ufshcd_transfer_rsp_status(struct ufs_hba *hba,
+ struct scsi_cmnd *cmd,
+ struct cq_entry *cqe)
{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ const int tag = scsi_cmd_to_rq(cmd)->tag;
int result = 0;
int scsi_status;
enum utp_ocs ocs;
@@ -5452,7 +5443,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
* not set either flag.
*/
if (resid && !(upiu_flags & UPIU_RSP_FLAG_OVERFLOW))
- scsi_set_resid(lrbp->cmd, resid);
+ scsi_set_resid(cmd, resid);
/* overall command status of utrd */
ocs = ufshcd_get_tr_ocs(lrbp, cqe);
@@ -5473,7 +5464,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
* to notify the SCSI midlayer of the command status
*/
scsi_status = lrbp->ucd_rsp_ptr->header.status;
- result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
+ result = ufshcd_scsi_cmd_status(cmd, scsi_status);
/*
* Currently we are only supporting BKOPs exception
@@ -5510,10 +5501,8 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
case OCS_ABORTED:
case OCS_INVALID_COMMAND_STATUS:
result |= DID_REQUEUE << 16;
- dev_warn(hba->dev,
- "OCS %s from controller for tag %d\n",
- (ocs == OCS_ABORTED ? "aborted" : "invalid"),
- lrbp->task_tag);
+ dev_warn(hba->dev, "OCS %s from controller for tag %d\n",
+ ocs == OCS_ABORTED ? "aborted" : "invalid", tag);
break;
case OCS_INVALID_CMD_TABLE_ATTR:
case OCS_INVALID_PRDT_ATTR:
@@ -5526,17 +5515,19 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
case OCS_GENERAL_CRYPTO_ERROR:
default:
result |= DID_ERROR << 16;
- dev_err(hba->dev,
- "OCS error from controller = %x for tag %d\n",
- ocs, lrbp->task_tag);
+ dev_err(hba->dev, "OCS error from controller = %x for tag %d\n",
+ ocs, tag);
ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
break;
} /* end of switch */
if ((host_byte(result) != DID_OK) &&
- (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
- ufshcd_print_tr(hba, lrbp->task_tag, true);
+ (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs) {
+ if (cqe)
+ ufshcd_hex_dump("UPIU CQE: ", cqe, sizeof(struct cq_entry));
+ ufshcd_print_tr(hba, cmd, true);
+ }
return result;
}
@@ -5575,7 +5566,7 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
guard(spinlock_irqsave)(hba->host->host_lock);
cmd = hba->active_uic_cmd;
if (!cmd)
- goto unlock;
+ return retval;
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
@@ -5584,13 +5575,13 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
cmd->argument2 |= ufshcd_get_uic_cmd_result(hba);
cmd->argument3 = ufshcd_get_dme_attr_val(hba);
if (!hba->uic_async_done)
- cmd->cmd_active = 0;
+ cmd->cmd_active = false;
complete(&cmd->done);
retval = IRQ_HANDLED;
}
if (intr_status & UFSHCD_UIC_PWR_MASK && hba->uic_async_done) {
- cmd->cmd_active = 0;
+ cmd->cmd_active = false;
complete(hba->uic_async_done);
retval = IRQ_HANDLED;
}
@@ -5598,18 +5589,14 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
if (retval == IRQ_HANDLED)
ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP);
-unlock:
return retval;
}
/* Release the resources allocated for processing a SCSI command. */
-void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
+void ufshcd_release_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd)
{
- struct scsi_cmnd *cmd = lrbp->cmd;
-
scsi_dma_unmap(cmd);
- ufshcd_crypto_clear_prdt(hba, lrbp);
+ ufshcd_crypto_clear_prdt(hba, cmd);
ufshcd_release(hba);
ufshcd_clk_scaling_update_busy(hba);
}
@@ -5623,31 +5610,39 @@ void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
struct cq_entry *cqe)
{
- struct ufshcd_lrb *lrbp;
- struct scsi_cmnd *cmd;
+ struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, task_tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
enum utp_ocs ocs;
- lrbp = &hba->lrb[task_tag];
+ if (WARN_ONCE(!cmd, "cqe->command_desc_base_addr = %#llx\n",
+ le64_to_cpu(cqe->command_desc_base_addr)))
+ return;
+
if (hba->monitor.enabled) {
lrbp->compl_time_stamp = ktime_get();
lrbp->compl_time_stamp_local_clock = local_clock();
}
- cmd = lrbp->cmd;
- if (cmd) {
- if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
- ufshcd_update_monitor(hba, lrbp);
- ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP);
- cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe);
- ufshcd_release_scsi_cmd(hba, lrbp);
- /* Do not touch lrbp after scsi done */
- scsi_done(cmd);
+ if (ufshcd_is_scsi_cmd(cmd)) {
+ if (unlikely(ufshcd_should_inform_monitor(hba, cmd)))
+ ufshcd_update_monitor(hba, cmd);
+ ufshcd_add_command_trace(hba, cmd, UFS_CMD_COMP);
+ cmd->result = ufshcd_transfer_rsp_status(hba, cmd, cqe);
+ ufshcd_release_scsi_cmd(hba, cmd);
} else {
if (cqe) {
- ocs = le32_to_cpu(cqe->status) & MASK_OCS;
+ ocs = cqe->overall_status & MASK_OCS;
lrbp->utr_descriptor_ptr->header.ocs = ocs;
+ } else {
+ ocs = lrbp->utr_descriptor_ptr->header.ocs;
}
- complete(&hba->dev_cmd.complete);
+ ufshcd_add_query_upiu_trace(
+ hba,
+ ocs == OCS_SUCCESS ? UFS_QUERY_COMP : UFS_QUERY_ERR,
+ (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
+ cmd->result = 0;
}
+ /* Do not touch lrbp after scsi_done() has been called. */
+ scsi_done(cmd);
}
/**
@@ -5675,7 +5670,7 @@ static void ufshcd_clear_polled(struct ufs_hba *hba,
int tag;
for_each_set_bit(tag, completed_reqs, hba->nutrs) {
- struct scsi_cmnd *cmd = hba->lrb[tag].cmd;
+ struct scsi_cmnd *cmd = scsi_host_find_tag(hba->host, tag);
if (!cmd)
continue;
@@ -5720,6 +5715,47 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
return completed_reqs != 0;
}
+static bool ufshcd_mcq_force_compl_one(struct request *rq, void *priv)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct scsi_device *sdev = rq->q->queuedata;
+ struct Scsi_Host *shost = sdev->host;
+ struct ufs_hba *hba = shost_priv(shost);
+ struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
+
+ if (blk_mq_is_reserved_rq(rq) || !hwq)
+ return true;
+
+ ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
+
+ /*
+ * For those cmds of which the cqes are not present in the cq, complete
+ * them explicitly.
+ */
+ scoped_guard(spinlock_irqsave, &hwq->cq_lock) {
+ if (!test_bit(SCMD_STATE_COMPLETE, &cmd->state)) {
+ set_host_byte(cmd, DID_REQUEUE);
+ ufshcd_release_scsi_cmd(hba, cmd);
+ scsi_done(cmd);
+ }
+ }
+
+ return true;
+}
+
+static bool ufshcd_mcq_compl_one(struct request *rq, void *priv)
+{
+ struct scsi_device *sdev = rq->q->queuedata;
+ struct Scsi_Host *shost = sdev->host;
+ struct ufs_hba *hba = shost_priv(shost);
+ struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
+
+ if (!blk_mq_is_reserved_rq(rq) && hwq)
+ ufshcd_mcq_poll_cqe_lock(hba, hwq);
+
+ return true;
+}
+
/**
* ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
* invoked from the error handler context or ufshcd_host_reset_and_restore()
@@ -5734,40 +5770,10 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
bool force_compl)
{
- struct ufs_hw_queue *hwq;
- struct ufshcd_lrb *lrbp;
- struct scsi_cmnd *cmd;
- unsigned long flags;
- int tag;
-
- for (tag = 0; tag < hba->nutrs; tag++) {
- lrbp = &hba->lrb[tag];
- cmd = lrbp->cmd;
- if (!ufshcd_cmd_inflight(cmd) ||
- test_bit(SCMD_STATE_COMPLETE, &cmd->state))
- continue;
-
- hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
- if (!hwq)
- continue;
-
- if (force_compl) {
- ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
- /*
- * For those cmds of which the cqes are not present
- * in the cq, complete them explicitly.
- */
- spin_lock_irqsave(&hwq->cq_lock, flags);
- if (cmd && !test_bit(SCMD_STATE_COMPLETE, &cmd->state)) {
- set_host_byte(cmd, DID_REQUEUE);
- ufshcd_release_scsi_cmd(hba, lrbp);
- scsi_done(cmd);
- }
- spin_unlock_irqrestore(&hwq->cq_lock, flags);
- } else {
- ufshcd_mcq_poll_cqe_lock(hba, hwq);
- }
- }
+ blk_mq_tagset_busy_iter(&hba->host->tag_set,
+ force_compl ? ufshcd_mcq_force_compl_one :
+ ufshcd_mcq_compl_one,
+ NULL);
}
/**
@@ -6612,9 +6618,12 @@ static bool ufshcd_abort_one(struct request *rq, void *priv)
struct Scsi_Host *shost = sdev->host;
struct ufs_hba *hba = shost_priv(shost);
+ if (blk_mq_is_reserved_rq(rq))
+ return true;
+
*ret = ufshcd_try_to_abort_task(hba, tag);
dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
- hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
+ ufshcd_is_scsi_cmd(cmd) ? cmd->cmnd[0] : -1,
*ret ? "failed" : "succeeded");
return *ret == 0;
@@ -7349,15 +7358,21 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type,
enum query_opcode desc_op)
{
- const u32 tag = hba->reserved_slot;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct scsi_cmnd *cmd = ufshcd_get_dev_mgmt_cmd(hba);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ u32 tag;
int err = 0;
u8 upiu_flags;
- /* Protects use of hba->reserved_slot. */
+ /* Protects use of hba->dev_cmd. */
lockdep_assert_held(&hba->dev_cmd.lock);
- ufshcd_setup_dev_cmd(hba, lrbp, cmd_type, 0, tag);
+ if (WARN_ON_ONCE(!cmd))
+ return -ENOMEM;
+
+ tag = scsi_cmd_to_rq(cmd)->tag;
+
+ ufshcd_setup_dev_cmd(hba, cmd, cmd_type, 0, tag);
ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, 0);
@@ -7377,12 +7392,9 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
- /*
- * ignore the returning value here - ufshcd_check_query_response is
- * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
- * read the response directly ignoring all errors.
- */
- ufshcd_issue_dev_cmd(hba, lrbp, tag, dev_cmd_timeout);
+ err = ufshcd_issue_dev_cmd(hba, cmd, tag, dev_cmd_timeout);
+ if (err)
+ goto put_dev_mgmt_cmd;
/* just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
@@ -7403,6 +7415,9 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
}
}
+put_dev_mgmt_cmd:
+ ufshcd_put_dev_mgmt_cmd(cmd);
+
return err;
}
@@ -7496,8 +7511,9 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
enum dma_data_direction dir)
{
- const u32 tag = hba->reserved_slot;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct scsi_cmnd *cmd;
+ struct ufshcd_lrb *lrbp;
+ u32 tag;
int err = 0;
int result;
u8 upiu_flags;
@@ -7505,10 +7521,20 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
u16 ehs_len;
int ehs = (hba->capabilities & MASK_EHSLUTRD_SUPPORTED) ? 2 : 0;
- /* Protects use of hba->reserved_slot. */
ufshcd_dev_man_lock(hba);
- ufshcd_setup_dev_cmd(hba, lrbp, DEV_CMD_TYPE_RPMB, UFS_UPIU_RPMB_WLUN, tag);
+ cmd = ufshcd_get_dev_mgmt_cmd(hba);
+
+ if (WARN_ON_ONCE(!cmd)) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ lrbp = scsi_cmd_priv(cmd);
+ tag = scsi_cmd_to_rq(cmd)->tag;
+
+ ufshcd_setup_dev_cmd(hba, cmd, DEV_CMD_TYPE_RPMB, UFS_UPIU_RPMB_WLUN,
+ tag);
ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, ehs);
@@ -7525,8 +7551,11 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
- err = ufshcd_issue_dev_cmd(hba, lrbp, tag, ADVANCED_RPMB_REQ_TIMEOUT);
+ err = ufshcd_issue_dev_cmd(hba, cmd, tag, ADVANCED_RPMB_REQ_TIMEOUT);
+ if (err)
+ goto put_dev_mgmt_cmd;
+ err = ufshcd_dev_cmd_completion(hba, lrbp);
if (!err) {
/* Just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
@@ -7550,11 +7579,45 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
}
}
+put_dev_mgmt_cmd:
+ ufshcd_put_dev_mgmt_cmd(cmd);
+
+unlock:
ufshcd_dev_man_unlock(hba);
return err ? : result;
}
+static bool ufshcd_clear_lu_cmds(struct request *req, void *priv)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+ struct scsi_device *sdev = cmd->device;
+ struct Scsi_Host *shost = sdev->host;
+ struct ufs_hba *hba = shost_priv(shost);
+ const u64 lun = *(u64 *)priv;
+ const u32 tag = req->tag;
+
+ if (blk_mq_is_reserved_rq(req) || sdev->lun != lun)
+ return true;
+
+ if (ufshcd_clear_cmd(hba, tag) < 0) {
+ dev_err(hba->dev, "%s: failed to clear request %d\n", __func__,
+ tag);
+ return true;
+ }
+
+ if (hba->mcq_enabled) {
+ struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, req);
+
+ if (hwq)
+ ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ return true;
+ }
+
+ ufshcd_compl_one_cqe(hba, tag, NULL);
+ return true;
+}
+
/**
* ufshcd_eh_device_reset_handler() - Reset a single logical unit.
* @cmd: SCSI command pointer
@@ -7563,12 +7626,8 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
*/
static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
- unsigned long flags, pending_reqs = 0, not_cleared = 0;
struct Scsi_Host *host;
struct ufs_hba *hba;
- struct ufs_hw_queue *hwq;
- struct ufshcd_lrb *lrbp;
- u32 pos, not_cleared_mask = 0;
int err;
u8 resp = 0xF, lun;
@@ -7577,50 +7636,16 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
- if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
- if (!err)
- err = resp;
- goto out;
- }
-
- if (hba->mcq_enabled) {
- for (pos = 0; pos < hba->nutrs; pos++) {
- lrbp = &hba->lrb[pos];
- if (ufshcd_cmd_inflight(lrbp->cmd) &&
- lrbp->lun == lun) {
- ufshcd_clear_cmd(hba, pos);
- hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
- ufshcd_mcq_poll_cqe_lock(hba, hwq);
- }
- }
- err = 0;
- goto out;
- }
-
- /* clear the commands that were pending for corresponding LUN */
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
- if (hba->lrb[pos].lun == lun)
- __set_bit(pos, &pending_reqs);
- hba->outstanding_reqs &= ~pending_reqs;
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
-
- for_each_set_bit(pos, &pending_reqs, hba->nutrs) {
- if (ufshcd_clear_cmd(hba, pos) < 0) {
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- not_cleared = 1U << pos &
- ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- hba->outstanding_reqs |= not_cleared;
- not_cleared_mask |= not_cleared;
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
-
- dev_err(hba->dev, "%s: failed to clear request %d\n",
- __func__, pos);
- }
+ if (err) {
+ } else if (resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ err = resp;
+ } else {
+ /* clear the commands that were pending for corresponding LUN */
+ blk_mq_tagset_busy_iter(&hba->host->tag_set,
+ ufshcd_clear_lu_cmds,
+ &cmd->device->lun);
}
- __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask);
-out:
hba->req_abort_count = 0;
ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
if (!err) {
@@ -7634,11 +7659,12 @@ out:
static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
{
- struct ufshcd_lrb *lrbp;
int tag;
for_each_set_bit(tag, &bitmap, hba->nutrs) {
- lrbp = &hba->lrb[tag];
+ struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+
lrbp->req_abort_skip = true;
}
}
@@ -7646,7 +7672,7 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
/**
* ufshcd_try_to_abort_task - abort a specific task
* @hba: Pointer to adapter instance
- * @tag: Task tag/index to be aborted
+ * @tag: Tag of the task to be aborted
*
* Abort the pending command in device by sending UFS_ABORT_TASK task management
* command, and in host controller by clearing the door-bell register. There can
@@ -7658,14 +7684,15 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
*/
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
{
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
int err;
int poll_cnt;
u8 resp = 0xF;
for (poll_cnt = 100; poll_cnt; poll_cnt--) {
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
- UFS_QUERY_TASK, &resp);
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, tag, UFS_QUERY_TASK,
+ &resp);
if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
/* cmd pending in the device */
dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
@@ -7680,7 +7707,7 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
hba->dev,
"%s: cmd with tag %d not pending in the device.\n",
__func__, tag);
- if (!ufshcd_cmd_inflight(lrbp->cmd)) {
+ if (!ufshcd_cmd_inflight(cmd)) {
dev_info(hba->dev,
"%s: cmd with tag=%d completed.\n",
__func__, tag);
@@ -7698,8 +7725,7 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
if (!poll_cnt)
return -EBUSY;
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
- UFS_ABORT_TASK, &resp);
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, tag, UFS_ABORT_TASK, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
if (!err) {
err = resp; /* service response error */
@@ -7727,8 +7753,9 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct ufs_hba *hba = shost_priv(host);
- int tag = scsi_cmd_to_rq(cmd)->tag;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ int tag = rq->tag;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
unsigned long flags;
int err = FAILED;
bool outstanding;
@@ -7757,15 +7784,16 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
* to reduce repeated printouts. For other aborted requests only print
* basic details.
*/
- scsi_print_command(cmd);
+ if (ufshcd_is_scsi_cmd(cmd))
+ scsi_print_command(cmd);
if (!hba->req_abort_count) {
ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
- ufshcd_print_tr(hba, tag, true);
+ ufshcd_print_tr(hba, cmd, true);
} else {
- ufshcd_print_tr(hba, tag, false);
+ ufshcd_print_tr(hba, cmd, false);
}
hba->req_abort_count++;
@@ -7809,7 +7837,10 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto release;
}
- err = ufshcd_try_to_abort_task(hba, tag);
+ if (blk_mq_is_reserved_rq(rq))
+ err = ufshcd_clear_cmd(hba, tag);
+ else
+ err = ufshcd_try_to_abort_task(hba, tag);
if (err) {
dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
@@ -7826,7 +7857,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (outstanding)
- ufshcd_release_scsi_cmd(hba, lrbp);
+ ufshcd_release_scsi_cmd(hba, cmd);
err = SUCCESS;
@@ -8192,8 +8223,11 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
if (IS_ERR(sdev_rpmb)) {
ret = PTR_ERR(sdev_rpmb);
+ hba->ufs_rpmb_wlun = NULL;
+ dev_err(hba->dev, "%s: RPMB WLUN not found\n", __func__);
goto remove_ufs_device_wlun;
}
+ hba->ufs_rpmb_wlun = sdev_rpmb;
ufshcd_blk_pm_runtime_init(sdev_rpmb);
scsi_device_put(sdev_rpmb);
@@ -8461,12 +8495,74 @@ static void ufs_init_rtc(struct ufs_hba *hba, u8 *desc_buf)
dev_info->rtc_update_period = 0;
}
+/**
+ * ufshcd_create_device_id - Generate unique device identifier string
+ * @hba: per-adapter instance
+ * @desc_buf: device descriptor buffer
+ *
+ * Creates a unique device ID string combining manufacturer ID, spec version,
+ * model name, serial number (as hex), device version, and manufacture date.
+ *
+ * Returns: Allocated device ID string on success, NULL on failure
+ */
+static char *ufshcd_create_device_id(struct ufs_hba *hba, u8 *desc_buf)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ u16 manufacture_date;
+ u16 device_version;
+ u8 *serial_number;
+ char *serial_hex;
+ char *device_id;
+ u8 serial_index;
+ int serial_len;
+ int ret;
+
+ serial_index = desc_buf[DEVICE_DESC_PARAM_SN];
+
+ ret = ufshcd_read_string_desc(hba, serial_index, &serial_number, SD_RAW);
+ if (ret < 0) {
+ dev_err(hba->dev, "Failed reading Serial Number. err = %d\n", ret);
+ return NULL;
+ }
+
+ device_version = get_unaligned_be16(&desc_buf[DEVICE_DESC_PARAM_DEV_VER]);
+ manufacture_date = get_unaligned_be16(&desc_buf[DEVICE_DESC_PARAM_MANF_DATE]);
+
+ serial_len = ret;
+ /* Allocate buffer for hex string: 2 chars per byte + null terminator */
+ serial_hex = kzalloc(serial_len * 2 + 1, GFP_KERNEL);
+ if (!serial_hex) {
+ kfree(serial_number);
+ return NULL;
+ }
+
+ bin2hex(serial_hex, serial_number, serial_len);
+
+ /*
+ * Device ID format is ABI with secure world - do not change without firmware
+ * coordination.
+ */
+ device_id = kasprintf(GFP_KERNEL, "%04X-%04X-%s-%s-%04X-%04X",
+ dev_info->wmanufacturerid, dev_info->wspecversion,
+ dev_info->model, serial_hex, device_version,
+ manufacture_date);
+
+ kfree(serial_hex);
+ kfree(serial_number);
+
+ if (!device_id)
+ dev_warn(hba->dev, "Failed to allocate unique device ID\n");
+
+ return device_id;
+}
+
static int ufs_get_device_desc(struct ufs_hba *hba)
{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ struct Scsi_Host *shost = hba->host;
int err;
u8 model_index;
u8 *desc_buf;
- struct ufs_dev_info *dev_info = &hba->dev_info;
desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!desc_buf) {
@@ -8494,6 +8590,18 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
+ /*
+ * According to the UFS standard, the UFS device queue depth
+ * (bQueueDepth) must be in the range 1..255 if the shared queueing
+ * architecture is supported. bQueueDepth is zero if the shared queueing
+ * architecture is not supported.
+ */
+ if (dev_info->bqueuedepth)
+ shost->cmd_per_lun = min(hba->nutrs, dev_info->bqueuedepth) -
+ UFSHCD_NUM_RESERVED;
+ else
+ shost->cmd_per_lun = shost->can_queue;
+
dev_info->rtt_cap = desc_buf[DEVICE_DESC_PARAM_RTT_CAP];
dev_info->hid_sup = get_unaligned_be32(desc_buf +
@@ -8510,6 +8618,9 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
goto out;
}
+ /* Generate unique device ID */
+ dev_info->device_id = ufshcd_create_device_id(hba, desc_buf);
+
hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
@@ -8545,6 +8656,8 @@ static void ufs_put_device_desc(struct ufs_hba *hba)
kfree(dev_info->model);
dev_info->model = NULL;
+ kfree(dev_info->device_id);
+ dev_info->device_id = NULL;
}
/**
@@ -8688,6 +8801,8 @@ static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
hba->dev_info.max_lu_supported = 8;
+ hba->dev_info.rpmb_io_size = desc_buf[GEOMETRY_DESC_PARAM_RPMB_RW_SIZE];
+
out:
kfree(desc_buf);
return err;
@@ -8874,6 +8989,7 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
ufs_bsg_probe(hba);
scsi_scan_host(hba->host);
+ ufs_rpmb_probe(hba);
out:
return ret;
@@ -8891,8 +9007,6 @@ static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
hba->utrdl_dma_addr);
-
- devm_kfree(hba->dev, hba->lrb);
}
static int ufshcd_alloc_mcq(struct ufs_hba *hba)
@@ -8900,7 +9014,7 @@ static int ufshcd_alloc_mcq(struct ufs_hba *hba)
int ret;
int old_nutrs = hba->nutrs;
- ret = ufshcd_mcq_decide_queue_depth(hba);
+ ret = ufshcd_get_hba_mac(hba);
if (ret < 0)
return ret;
@@ -8926,7 +9040,6 @@ static int ufshcd_alloc_mcq(struct ufs_hba *hba)
goto err;
hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
- hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
return 0;
err:
@@ -9164,7 +9277,11 @@ static const struct scsi_host_template ufshcd_driver_template = {
.name = UFSHCD,
.proc_name = UFSHCD,
.map_queues = ufshcd_map_queues,
+ .cmd_size = sizeof(struct ufshcd_lrb),
+ .init_cmd_priv = ufshcd_init_cmd_priv,
.queuecommand = ufshcd_queuecommand,
+ .queue_reserved_command = ufshcd_queue_reserved_command,
+ .nr_reserved_cmds = UFSHCD_NUM_RESERVED,
.mq_poll = ufshcd_poll,
.sdev_init = ufshcd_sdev_init,
.sdev_configure = ufshcd_sdev_configure,
@@ -9775,11 +9892,11 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
}
/*
- * Some UFS devices require delay after VCC power rail is turned-off.
+ * All UFS devices require delay after VCC power rail is turned-off.
*/
- if (vcc_off && hba->vreg_info.vcc &&
- hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
- usleep_range(5000, 5100);
+ if (vcc_off && hba->vreg_info.vcc && !hba->vreg_info.vcc->always_on)
+ usleep_range(hba->vcc_off_delay_us,
+ hba->vcc_off_delay_us + 100);
}
#ifdef CONFIG_PM
@@ -10428,6 +10545,7 @@ void ufshcd_remove(struct ufs_hba *hba)
ufshcd_rpm_get_sync(hba);
ufs_hwmon_remove(hba);
ufs_bsg_remove(hba);
+ ufs_rpmb_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
blk_mq_destroy_queue(hba->tmf_queue);
@@ -10587,6 +10705,9 @@ static int ufshcd_add_scsi_host(struct ufs_hba *hba)
{
int err;
+ WARN_ON_ONCE(!hba->host->can_queue);
+ WARN_ON_ONCE(!hba->host->cmd_per_lun);
+
if (is_mcq_supported(hba)) {
ufshcd_mcq_enable(hba);
err = ufshcd_alloc_mcq(hba);
@@ -10706,7 +10827,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
- init_completion(&hba->dev_cmd.complete);
+ /*
+ * Most ufs devices require 1ms delay after vcc is powered off before
+ * it can be powered on again. Set the default to 2ms. The platform
+ * drivers can override this setting as needed.
+ */
+ hba->vcc_off_delay_us = 2000;
err = ufshcd_hba_init(hba);
if (err)
@@ -10740,7 +10866,11 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
ufshcd_host_memory_configure(hba);
host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
- host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
+ /*
+ * Set the queue depth for WLUNs. ufs_get_device_desc() will increase
+ * host->cmd_per_lun to a larger value.
+ */
+ host->cmd_per_lun = 1;
host->max_id = UFSHCD_MAX_ID;
host->max_lun = UFS_MAX_LUNS;
host->max_channel = UFSHCD_MAX_CHANNEL;
@@ -10832,6 +10962,10 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
}
+ err = ufshcd_add_scsi_host(hba);
+ if (err)
+ goto out_disable;
+
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
@@ -10882,10 +11016,6 @@ initialized:
if (err)
goto out_disable;
- err = ufshcd_add_scsi_host(hba);
- if (err)
- goto out_disable;
-
ufs_sysfs_add_nodes(hba->dev);
async_schedule(ufshcd_async_scan, hba);
diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig
index 191fbd799ec5..7d5117b2dab4 100644
--- a/drivers/ufs/host/Kconfig
+++ b/drivers/ufs/host/Kconfig
@@ -154,3 +154,16 @@ config SCSI_UFS_ROCKCHIP
Select this if you have UFS controller on Rockchip chipset.
If unsure, say N.
+
+config SCSI_UFS_AMD_VERSAL2
+ tristate "AMD Versal Gen 2 UFS controller platform driver"
+ depends on SCSI_UFSHCD_PLATFORM && (ARCH_ZYNQMP || COMPILE_TEST)
+ help
+ This selects the AMD Versal Gen 2 specific additions on top of
+ the UFSHCD DWC and UFSHCD platform driver. UFS host on AMD
+ Versal Gen 2 needs some vendor specific configurations like PHY
+ and vendor specific register accesses before accessing the
+ hardware.
+
+ Select this if you have UFS controller on AMD Versal Gen 2 SoC.
+ If unsure, say N.
diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile
index 2f97feb5db3f..65d8bb23ab7b 100644
--- a/drivers/ufs/host/Makefile
+++ b/drivers/ufs/host/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o
obj-$(CONFIG_SCSI_UFS_ROCKCHIP) += ufs-rockchip.o
obj-$(CONFIG_SCSI_UFS_SPRD) += ufs-sprd.o
obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
+obj-$(CONFIG_SCSI_UFS_AMD_VERSAL2) += ufs-amd-versal2.o ufshcd-dwc.o
diff --git a/drivers/ufs/host/ti-j721e-ufs.c b/drivers/ufs/host/ti-j721e-ufs.c
index 21214e5d5896..43781593b5c1 100644
--- a/drivers/ufs/host/ti-j721e-ufs.c
+++ b/drivers/ufs/host/ti-j721e-ufs.c
@@ -15,18 +15,26 @@
#define TI_UFS_SS_RST_N_PCS BIT(0)
#define TI_UFS_SS_CLK_26MHZ BIT(4)
+struct ti_j721e_ufs {
+ void __iomem *regbase;
+ u32 reg;
+};
+
static int ti_j721e_ufs_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct ti_j721e_ufs *ufs;
unsigned long clk_rate;
- void __iomem *regbase;
struct clk *clk;
- u32 reg = 0;
int ret;
- regbase = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(regbase))
- return PTR_ERR(regbase);
+ ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
+ if (!ufs)
+ return -ENOMEM;
+
+ ufs->regbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ufs->regbase))
+ return PTR_ERR(ufs->regbase);
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
@@ -42,12 +50,14 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
}
clk_rate = clk_get_rate(clk);
if (clk_rate == 26000000)
- reg |= TI_UFS_SS_CLK_26MHZ;
+ ufs->reg |= TI_UFS_SS_CLK_26MHZ;
devm_clk_put(dev, clk);
/* Take UFS slave device out of reset */
- reg |= TI_UFS_SS_RST_N_PCS;
- writel(reg, regbase + TI_UFS_SS_CTRL);
+ ufs->reg |= TI_UFS_SS_RST_N_PCS;
+ writel(ufs->reg, ufs->regbase + TI_UFS_SS_CTRL);
+
+ dev_set_drvdata(dev, ufs);
ret = of_platform_populate(pdev->dev.of_node, NULL, NULL,
dev);
@@ -72,6 +82,16 @@ static void ti_j721e_ufs_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
+static int ti_j721e_ufs_resume(struct device *dev)
+{
+ struct ti_j721e_ufs *ufs = dev_get_drvdata(dev);
+
+ writel(ufs->reg, ufs->regbase + TI_UFS_SS_CTRL);
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(ti_j721e_ufs_pm_ops, NULL, ti_j721e_ufs_resume);
+
static const struct of_device_id ti_j721e_ufs_of_match[] = {
{
.compatible = "ti,j721e-ufs",
@@ -87,6 +107,7 @@ static struct platform_driver ti_j721e_ufs_driver = {
.driver = {
.name = "ti-j721e-ufs",
.of_match_table = ti_j721e_ufs_of_match,
+ .pm = pm_sleep_ptr(&ti_j721e_ufs_pm_ops),
},
};
module_platform_driver(ti_j721e_ufs_driver);
diff --git a/drivers/ufs/host/ufs-amd-versal2.c b/drivers/ufs/host/ufs-amd-versal2.c
new file mode 100644
index 000000000000..40543db621a1
--- /dev/null
+++ b/drivers/ufs/host/ufs-amd-versal2.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ *
+ * Authors: Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/irqreturn.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <ufs/unipro.h>
+
+#include "ufshcd-dwc.h"
+#include "ufshcd-pltfrm.h"
+#include "ufshci-dwc.h"
+
+/* PHY modes */
+#define UFSHCD_DWC_PHY_MODE_ROM 0
+
+#define MPHY_FAST_RX_AFE_CAL BIT(2)
+#define MPHY_FW_CALIB_CFG_VAL BIT(8)
+
+#define MPHY_RX_OVRD_EN BIT(3)
+#define MPHY_RX_OVRD_VAL BIT(2)
+#define MPHY_RX_ACK_MASK BIT(0)
+
+#define TIMEOUT_MICROSEC 1000000
+
+struct ufs_versal2_host {
+ struct ufs_hba *hba;
+ struct reset_control *rstc;
+ struct reset_control *rstphy;
+ u32 phy_mode;
+ unsigned long host_clk;
+ u8 attcompval0;
+ u8 attcompval1;
+ u8 ctlecompval0;
+ u8 ctlecompval1;
+};
+
+static int ufs_versal2_phy_reg_write(struct ufs_hba *hba, u32 addr, u32 val)
+{
+ static struct ufshcd_dme_attr_val phy_write_attrs[] = {
+ { UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL },
+ { UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL },
+ { UIC_ARG_MIB(CBCREGWRLSB), 0, DME_LOCAL },
+ { UIC_ARG_MIB(CBCREGWRMSB), 0, DME_LOCAL },
+ { UIC_ARG_MIB(CBCREGRDWRSEL), 1, DME_LOCAL },
+ { UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL }
+ };
+
+ phy_write_attrs[0].mib_val = (u8)addr;
+ phy_write_attrs[1].mib_val = (u8)(addr >> 8);
+ phy_write_attrs[2].mib_val = (u8)val;
+ phy_write_attrs[3].mib_val = (u8)(val >> 8);
+
+ return ufshcd_dwc_dme_set_attrs(hba, phy_write_attrs, ARRAY_SIZE(phy_write_attrs));
+}
+
+static int ufs_versal2_phy_reg_read(struct ufs_hba *hba, u32 addr, u32 *val)
+{
+ u32 mib_val;
+ int ret;
+ static struct ufshcd_dme_attr_val phy_read_attrs[] = {
+ { UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL },
+ { UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL },
+ { UIC_ARG_MIB(CBCREGRDWRSEL), 0, DME_LOCAL },
+ { UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL }
+ };
+
+ phy_read_attrs[0].mib_val = (u8)addr;
+ phy_read_attrs[1].mib_val = (u8)(addr >> 8);
+
+ ret = ufshcd_dwc_dme_set_attrs(hba, phy_read_attrs, ARRAY_SIZE(phy_read_attrs));
+ if (ret)
+ return ret;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDLSB), &mib_val);
+ if (ret)
+ return ret;
+
+ *val = mib_val;
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDMSB), &mib_val);
+ if (ret)
+ return ret;
+
+ *val |= (mib_val << 8);
+
+ return 0;
+}
+
+static int ufs_versal2_enable_phy(struct ufs_hba *hba)
+{
+ u32 offset, reg;
+ int ret;
+
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYDISABLE), 0);
+ if (ret)
+ return ret;
+
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1);
+ if (ret)
+ return ret;
+
+ /* Check Tx/Rx FSM states */
+ for (offset = 0; offset < 2; offset++) {
+ u32 time_left, mibsel;
+
+ time_left = TIMEOUT_MICROSEC;
+ mibsel = UIC_ARG_MIB_SEL(MTX_FSM_STATE, UIC_ARG_MPHY_TX_GEN_SEL_INDEX(offset));
+ do {
+ ret = ufshcd_dme_get(hba, mibsel, &reg);
+ if (ret)
+ return ret;
+
+ if (reg == TX_STATE_HIBERN8 || reg == TX_STATE_SLEEP ||
+ reg == TX_STATE_LSBURST)
+ break;
+
+ time_left--;
+ usleep_range(1, 5);
+ } while (time_left);
+
+ if (!time_left) {
+ dev_err(hba->dev, "Invalid Tx FSM state.\n");
+ return -ETIMEDOUT;
+ }
+
+ time_left = TIMEOUT_MICROSEC;
+ mibsel = UIC_ARG_MIB_SEL(MRX_FSM_STATE, UIC_ARG_MPHY_RX_GEN_SEL_INDEX(offset));
+ do {
+ ret = ufshcd_dme_get(hba, mibsel, &reg);
+ if (ret)
+ return ret;
+
+ if (reg == RX_STATE_HIBERN8 || reg == RX_STATE_SLEEP ||
+ reg == RX_STATE_LSBURST)
+ break;
+
+ time_left--;
+ usleep_range(1, 5);
+ } while (time_left);
+
+ if (!time_left) {
+ dev_err(hba->dev, "Invalid Rx FSM state.\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static int ufs_versal2_setup_phy(struct ufs_hba *hba)
+{
+ struct ufs_versal2_host *host = ufshcd_get_variant(hba);
+ int ret;
+ u32 reg;
+
+ /* Bypass RX-AFE offset calibrations (ATT/CTLE) */
+ ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(0), &reg);
+ if (ret)
+ return ret;
+
+ reg |= MPHY_FAST_RX_AFE_CAL;
+ ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(0), reg);
+ if (ret)
+ return ret;
+
+ ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(1), &reg);
+ if (ret)
+ return ret;
+
+ reg |= MPHY_FAST_RX_AFE_CAL;
+ ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(1), reg);
+ if (ret)
+ return ret;
+
+ /* Program ATT and CTLE compensation values */
+ if (host->attcompval0) {
+ ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(0), host->attcompval0);
+ if (ret)
+ return ret;
+ }
+
+ if (host->attcompval1) {
+ ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(1), host->attcompval1);
+ if (ret)
+ return ret;
+ }
+
+ if (host->ctlecompval0) {
+ ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(0), host->ctlecompval0);
+ if (ret)
+ return ret;
+ }
+
+ if (host->ctlecompval1) {
+ ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(1), host->ctlecompval1);
+ if (ret)
+ return ret;
+ }
+
+ ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(0), &reg);
+ if (ret)
+ return ret;
+
+ reg |= MPHY_FW_CALIB_CFG_VAL;
+ ret = ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(0), reg);
+ if (ret)
+ return ret;
+
+ ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(1), &reg);
+ if (ret)
+ return ret;
+
+ reg |= MPHY_FW_CALIB_CFG_VAL;
+ return ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(1), reg);
+}
+
+static int ufs_versal2_phy_init(struct ufs_hba *hba)
+{
+ struct ufs_versal2_host *host = ufshcd_get_variant(hba);
+ u32 time_left;
+ bool is_ready;
+ int ret;
+ static const struct ufshcd_dme_attr_val rmmi_attrs[] = {
+ { UIC_ARG_MIB(CBREFCLKCTRL2), CBREFREFCLK_GATE_OVR_EN, DME_LOCAL },
+ { UIC_ARG_MIB(CBCRCTRL), 1, DME_LOCAL },
+ { UIC_ARG_MIB(CBC10DIRECTCONF2), 1, DME_LOCAL },
+ { UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL }
+ };
+
+ /* Wait for Tx/Rx config_rdy */
+ time_left = TIMEOUT_MICROSEC;
+ do {
+ time_left--;
+ ret = zynqmp_pm_is_mphy_tx_rx_config_ready(&is_ready);
+ if (ret)
+ return ret;
+
+ if (!is_ready)
+ break;
+
+ usleep_range(1, 5);
+ } while (time_left);
+
+ if (!time_left) {
+ dev_err(hba->dev, "Tx/Rx configuration signal busy.\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = ufshcd_dwc_dme_set_attrs(hba, rmmi_attrs, ARRAY_SIZE(rmmi_attrs));
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(host->rstphy);
+ if (ret) {
+ dev_err(hba->dev, "ufsphy reset deassert failed, err = %d\n", ret);
+ return ret;
+ }
+
+ /* Wait for SRAM init done */
+ time_left = TIMEOUT_MICROSEC;
+ do {
+ time_left--;
+ ret = zynqmp_pm_is_sram_init_done(&is_ready);
+ if (ret)
+ return ret;
+
+ if (is_ready)
+ break;
+
+ usleep_range(1, 5);
+ } while (time_left);
+
+ if (!time_left) {
+ dev_err(hba->dev, "SRAM initialization failed.\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = ufs_versal2_setup_phy(hba);
+ if (ret)
+ return ret;
+
+ return ufs_versal2_enable_phy(hba);
+}
+
+static int ufs_versal2_init(struct ufs_hba *hba)
+{
+ struct ufs_versal2_host *host;
+ struct device *dev = hba->dev;
+ struct ufs_clk_info *clki;
+ int ret;
+ u32 cal;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->hba = hba;
+ ufshcd_set_variant(hba, host);
+
+ host->phy_mode = UFSHCD_DWC_PHY_MODE_ROM;
+
+ list_for_each_entry(clki, &hba->clk_list_head, list) {
+ if (!strcmp(clki->name, "core"))
+ host->host_clk = clk_get_rate(clki->clk);
+ }
+
+ host->rstc = devm_reset_control_get_exclusive(dev, "host");
+ if (IS_ERR(host->rstc)) {
+ dev_err(dev, "failed to get reset ctrl: host\n");
+ return PTR_ERR(host->rstc);
+ }
+
+ host->rstphy = devm_reset_control_get_exclusive(dev, "phy");
+ if (IS_ERR(host->rstphy)) {
+ dev_err(dev, "failed to get reset ctrl: phy\n");
+ return PTR_ERR(host->rstphy);
+ }
+
+ ret = reset_control_assert(host->rstc);
+ if (ret) {
+ dev_err(hba->dev, "host reset assert failed, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_assert(host->rstphy);
+ if (ret) {
+ dev_err(hba->dev, "phy reset assert failed, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = zynqmp_pm_set_sram_bypass();
+ if (ret) {
+ dev_err(dev, "Bypass SRAM interface failed, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(host->rstc);
+ if (ret)
+ dev_err(hba->dev, "host reset deassert failed, err = %d\n", ret);
+
+ ret = zynqmp_pm_get_ufs_calibration_values(&cal);
+ if (ret) {
+ dev_err(dev, "failed to read calibration values\n");
+ return ret;
+ }
+
+ host->attcompval0 = (u8)cal;
+ host->attcompval1 = (u8)(cal >> 8);
+ host->ctlecompval0 = (u8)(cal >> 16);
+ host->ctlecompval1 = (u8)(cal >> 24);
+
+ hba->quirks |= UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING;
+
+ return 0;
+}
+
+static int ufs_versal2_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int ret = 0;
+
+ if (status == PRE_CHANGE) {
+ ret = ufs_versal2_phy_init(hba);
+ if (ret)
+ dev_err(hba->dev, "Phy init failed (%d)\n", ret);
+ }
+
+ return ret;
+}
+
+static int ufs_versal2_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ struct ufs_versal2_host *host = ufshcd_get_variant(hba);
+ int ret = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ if (host->host_clk)
+ ufshcd_writel(hba, host->host_clk / 1000000, DWC_UFS_REG_HCLKDIV);
+
+ break;
+ case POST_CHANGE:
+ ret = ufshcd_dwc_link_startup_notify(hba, status);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ufs_versal2_phy_ratesel(struct ufs_hba *hba, u32 activelanes, u32 rx_req)
+{
+ u32 time_left, reg, lane;
+ int ret;
+
+ for (lane = 0; lane < activelanes; lane++) {
+ time_left = TIMEOUT_MICROSEC;
+ ret = ufs_versal2_phy_reg_read(hba, RX_OVRD_IN_1(lane), &reg);
+ if (ret)
+ return ret;
+
+ reg |= MPHY_RX_OVRD_EN;
+ if (rx_req)
+ reg |= MPHY_RX_OVRD_VAL;
+ else
+ reg &= ~MPHY_RX_OVRD_VAL;
+
+ ret = ufs_versal2_phy_reg_write(hba, RX_OVRD_IN_1(lane), reg);
+ if (ret)
+ return ret;
+
+ do {
+ ret = ufs_versal2_phy_reg_read(hba, RX_PCS_OUT(lane), &reg);
+ if (ret)
+ return ret;
+
+ reg &= MPHY_RX_ACK_MASK;
+ if (reg == rx_req)
+ break;
+
+ time_left--;
+ usleep_range(1, 5);
+ } while (time_left);
+
+ if (!time_left) {
+ dev_err(hba->dev, "Invalid Rx Ack value.\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static int ufs_versal2_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ struct ufs_versal2_host *host = ufshcd_get_variant(hba);
+ u32 lane, reg, rate = 0;
+ int ret = 0;
+
+ if (status == PRE_CHANGE) {
+ memcpy(dev_req_params, dev_max_params, sizeof(struct ufs_pa_layer_attr));
+
+ /* If it is not a calibrated part, switch PWRMODE to SLOW_MODE */
+ if (!host->attcompval0 && !host->attcompval1 && !host->ctlecompval0 &&
+ !host->ctlecompval1) {
+ dev_req_params->pwr_rx = SLOW_MODE;
+ dev_req_params->pwr_tx = SLOW_MODE;
+ return 0;
+ }
+
+ if (dev_req_params->pwr_rx == SLOW_MODE || dev_req_params->pwr_rx == SLOWAUTO_MODE)
+ return 0;
+
+ if (dev_req_params->hs_rate == PA_HS_MODE_B)
+ rate = 1;
+
+ /* Select the rate */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(CBRATESEL), rate);
+ if (ret)
+ return ret;
+
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1);
+ if (ret)
+ return ret;
+
+ ret = ufs_versal2_phy_ratesel(hba, dev_req_params->lane_tx, 1);
+ if (ret)
+ return ret;
+
+ ret = ufs_versal2_phy_ratesel(hba, dev_req_params->lane_tx, 0);
+ if (ret)
+ return ret;
+
+ /* Remove rx_req override */
+ for (lane = 0; lane < dev_req_params->lane_tx; lane++) {
+ ret = ufs_versal2_phy_reg_read(hba, RX_OVRD_IN_1(lane), &reg);
+ if (ret)
+ return ret;
+
+ reg &= ~MPHY_RX_OVRD_EN;
+ ret = ufs_versal2_phy_reg_write(hba, RX_OVRD_IN_1(lane), reg);
+ if (ret)
+ return ret;
+ }
+
+ if (dev_req_params->lane_tx == UFS_LANE_2 && dev_req_params->lane_rx == UFS_LANE_2)
+ ret = ufshcd_dme_configure_adapt(hba, dev_req_params->gear_tx,
+ PA_INITIAL_ADAPT);
+ }
+
+ return ret;
+}
+
+static struct ufs_hba_variant_ops ufs_versal2_hba_vops = {
+ .name = "ufs-versal2-pltfm",
+ .init = ufs_versal2_init,
+ .link_startup_notify = ufs_versal2_link_startup_notify,
+ .hce_enable_notify = ufs_versal2_hce_enable_notify,
+ .pwr_change_notify = ufs_versal2_pwr_change_notify,
+};
+
+static const struct of_device_id ufs_versal2_pltfm_match[] = {
+ {
+ .compatible = "amd,versal2-ufs",
+ .data = &ufs_versal2_hba_vops,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ufs_versal2_pltfm_match);
+
+static int ufs_versal2_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ /* Perform generic probe */
+ ret = ufshcd_pltfrm_init(pdev, &ufs_versal2_hba_vops);
+ if (ret)
+ dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", ret);
+
+ return ret;
+}
+
+static void ufs_versal2_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&(pdev)->dev);
+ ufshcd_remove(hba);
+}
+
+static const struct dev_pm_ops ufs_versal2_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
+};
+
+static struct platform_driver ufs_versal2_pltfm = {
+ .probe = ufs_versal2_probe,
+ .remove = ufs_versal2_remove,
+ .driver = {
+ .name = "ufshcd-versal2",
+ .pm = &ufs_versal2_pm_ops,
+ .of_match_table = of_match_ptr(ufs_versal2_pltfm_match),
+ },
+};
+
+module_platform_driver(ufs_versal2_pltfm);
+
+MODULE_AUTHOR("Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>");
+MODULE_DESCRIPTION("AMD Versal Gen 2 UFS Host Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 758a393a9de1..ecbbf52bf734 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -41,8 +41,7 @@ static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up);
static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
{ .wmanufacturerid = UFS_ANY_VENDOR,
.model = UFS_ANY_MODEL,
- .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
+ .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = "H9HQ21AFAMZDAR",
.quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
@@ -280,12 +279,21 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
REG_UFS_XOUFS_CTRL);
+ if (host->legacy_ip_ver)
+ return 0;
+
/* DDR_EN setting */
if (host->ip_ver >= IP_VER_MT6989) {
ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8),
0x453000, REG_UFS_MMIO_OPT_CTRL_0);
}
+ if (host->ip_ver >= IP_VER_MT6991_A0) {
+ /* Enable multi-rtt */
+ ufshcd_rmwl(hba, MRTT_EN, MRTT_EN, REG_UFS_MMIO_OPT_CTRL_0);
+ /* Enable random performance improvement */
+ ufshcd_rmwl(hba, RDN_PFM_IMPV_DIS, 0, REG_UFS_MMIO_OPT_CTRL_0);
+ }
}
return 0;
@@ -405,7 +413,7 @@ static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
+ if (!host->legacy_ip_ver && host->ip_ver >= IP_VER_MT6983) {
ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
@@ -422,6 +430,7 @@ static int ufs_mtk_wait_idle_state(struct ufs_hba *hba,
u64 timeout, time_checked;
u32 val, sm;
bool wait_idle;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
/* cannot use plain ktime_get() in suspend */
timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
@@ -432,8 +441,13 @@ static int ufs_mtk_wait_idle_state(struct ufs_hba *hba,
do {
time_checked = ktime_get_mono_fast_ns();
- ufs_mtk_dbg_sel(hba);
- val = ufshcd_readl(hba, REG_UFS_PROBE);
+ if (host->legacy_ip_ver || host->ip_ver < IP_VER_MT6899) {
+ ufs_mtk_dbg_sel(hba);
+ val = ufshcd_readl(hba, REG_UFS_PROBE);
+ } else {
+ val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL);
+ val = val >> 16;
+ }
sm = val & 0x1f;
@@ -465,13 +479,20 @@ static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
{
ktime_t timeout, time_checked;
u32 val;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
timeout = ktime_add_ms(ktime_get(), max_wait_ms);
do {
time_checked = ktime_get();
- ufs_mtk_dbg_sel(hba);
- val = ufshcd_readl(hba, REG_UFS_PROBE);
- val = val >> 28;
+
+ if (host->legacy_ip_ver || host->ip_ver < IP_VER_MT6899) {
+ ufs_mtk_dbg_sel(hba);
+ val = ufshcd_readl(hba, REG_UFS_PROBE);
+ val = val >> 28;
+ } else {
+ val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL);
+ val = val >> 24;
+ }
if (val == state)
return 0;
@@ -1109,18 +1130,6 @@ static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
}
}
-/* Convert microseconds to Auto-Hibernate Idle Timer register value */
-static u32 ufs_mtk_us_to_ahit(unsigned int timer)
-{
- unsigned int scale;
-
- for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale)
- timer /= UFSHCI_AHIBERN8_SCALE_FACTOR;
-
- return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) |
- FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
-}
-
static void ufs_mtk_fix_ahit(struct ufs_hba *hba)
{
unsigned int us;
@@ -1143,7 +1152,7 @@ static void ufs_mtk_fix_ahit(struct ufs_hba *hba)
break;
}
- hba->ahit = ufs_mtk_us_to_ahit(us);
+ hba->ahit = ufshcd_us_to_ahit(us);
}
ufs_mtk_setup_clk_gating(hba);
@@ -1332,6 +1341,36 @@ static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
return true;
}
+static void ufs_mtk_adjust_sync_length(struct ufs_hba *hba)
+{
+ int i;
+ u32 value;
+ u32 cnt, att, min;
+ struct attr_min {
+ u32 attr;
+ u32 min_value;
+ } pa_min_sync_length[] = {
+ {PA_TXHSG1SYNCLENGTH, 0x48},
+ {PA_TXHSG2SYNCLENGTH, 0x48},
+ {PA_TXHSG3SYNCLENGTH, 0x48},
+ {PA_TXHSG4SYNCLENGTH, 0x48},
+ {PA_TXHSG5SYNCLENGTH, 0x48}
+ };
+
+ cnt = sizeof(pa_min_sync_length) / sizeof(struct attr_min);
+ for (i = 0; i < cnt; i++) {
+ att = pa_min_sync_length[i].attr;
+ min = pa_min_sync_length[i].min_value;
+ ufshcd_dme_get(hba, UIC_ARG_MIB(att), &value);
+ if (value < min)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(att), min);
+
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(att), &value);
+ if (value < min)
+ ufshcd_dme_peer_set(hba, UIC_ARG_MIB(att), min);
+ }
+}
+
static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
@@ -1355,6 +1394,8 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
}
if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
+ ufs_mtk_adjust_sync_length(hba);
+
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
@@ -1619,14 +1660,26 @@ static int ufs_mtk_device_reset(struct ufs_hba *hba)
static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
{
int err;
+ u32 val;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
err = ufshcd_hba_enable(hba);
if (err)
return err;
err = ufs_mtk_unipro_set_lpm(hba, false);
- if (err)
+ if (err) {
+ if (host->ip_ver < IP_VER_MT6899) {
+ ufs_mtk_dbg_sel(hba);
+ val = ufshcd_readl(hba, REG_UFS_PROBE);
+ } else {
+ val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL);
+ }
+ ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)val);
+ val = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)val);
return err;
+ }
err = ufshcd_uic_hibern8_exit(hba);
if (err)
@@ -1744,6 +1797,7 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
{
int err;
struct arm_smccc_res res;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (status == PRE_CHANGE) {
if (ufshcd_is_auto_hibern8_supported(hba))
@@ -1773,6 +1827,15 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
ufs_mtk_sram_pwr_ctrl(false, res);
+ /* Release pm_qos/clk if in scale-up mode during suspend */
+ if (ufshcd_is_clkscaling_supported(hba) && (host->clk_scale_up)) {
+ ufshcd_pm_qos_update(hba, false);
+ _ufs_mtk_clk_scale(hba, false);
+ } else if ((!ufshcd_is_clkscaling_supported(hba) &&
+ hba->pwr_info.gear_rx >= UFS_HS_G5)) {
+ _ufs_mtk_clk_scale(hba, false);
+ }
+
return 0;
fail:
/*
@@ -1788,6 +1851,7 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int err;
struct arm_smccc_res res;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
ufs_mtk_dev_vreg_set_lpm(hba, false);
@@ -1798,6 +1862,15 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (err)
goto fail;
+ /* Request pm_qos/clk if in scale-up mode after resume */
+ if (ufshcd_is_clkscaling_supported(hba) && (host->clk_scale_up)) {
+ ufshcd_pm_qos_update(hba, true);
+ _ufs_mtk_clk_scale(hba, true);
+ } else if ((!ufshcd_is_clkscaling_supported(hba) &&
+ hba->pwr_info.gear_rx >= UFS_HS_G5)) {
+ _ufs_mtk_clk_scale(hba, true);
+ }
+
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_hpm(hba);
if (err)
@@ -1889,15 +1962,13 @@ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
{
ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
- if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
- (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
+ if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc) {
hba->vreg_info.vcc->always_on = true;
/*
* VCC will be kept always-on thus we don't
- * need any delay during regulator operations
+ * need any delay before putting device's VCC in LPM mode.
*/
- hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
- UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
+ hba->dev_quirks &= ~UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM;
}
ufs_mtk_vreg_fix_vcc(hba);
@@ -2373,6 +2444,11 @@ static int ufs_mtk_system_suspend(struct device *dev)
struct arm_smccc_res res;
int ret;
+ if (hba->shutting_down) {
+ ret = -EBUSY;
+ goto out;
+ }
+
ret = ufshcd_system_suspend(dev);
if (ret)
goto out;
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index dfbf78bd8664..9747277f11e8 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -20,6 +20,9 @@
#define MCQ_MULTI_INTR_EN BIT(2)
#define MCQ_CMB_INTR_EN BIT(3)
#define MCQ_AH8 BIT(4)
+#define MON_EN BIT(5)
+#define MRTT_EN BIT(25)
+#define RDN_PFM_IMPV_DIS BIT(28)
#define MCQ_INTR_EN_MSK (MCQ_MULTI_INTR_EN | MCQ_CMB_INTR_EN)
@@ -28,6 +31,7 @@
*/
#define REG_UFS_XOUFS_CTRL 0x140
#define REG_UFS_REFCLK_CTRL 0x144
+#define REG_UFS_UFS_MMIO_OTSD_CTRL 0x14C
#define REG_UFS_MMIO_OPT_CTRL_0 0x160
#define REG_UFS_EXTREG 0x2100
#define REG_UFS_MPHYCTRL 0x2200
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index eba0e6617483..8d119b3223cb 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -1037,9 +1037,6 @@ static struct ufs_dev_quirk ufs_qcom_dev_fixups[] = {
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
- { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
- .model = UFS_ANY_MODEL,
- .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM },
{ .wmanufacturerid = UFS_VENDOR_WDC,
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE },
diff --git a/drivers/ufs/host/ufs-rockchip.c b/drivers/ufs/host/ufs-rockchip.c
index 8754085dd0cc..7fff34513a60 100644
--- a/drivers/ufs/host/ufs-rockchip.c
+++ b/drivers/ufs/host/ufs-rockchip.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -20,9 +21,17 @@
#include "ufshcd-pltfrm.h"
#include "ufs-rockchip.h"
+static void ufs_rockchip_controller_reset(struct ufs_rockchip_host *host)
+{
+ reset_control_assert(host->rst);
+ udelay(1);
+ reset_control_deassert(host->rst);
+}
+
static int ufs_rockchip_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
int err = 0;
if (status == POST_CHANGE) {
@@ -37,6 +46,9 @@ static int ufs_rockchip_hce_enable_notify(struct ufs_hba *hba,
return ufshcd_vops_phy_initialization(hba);
}
+ /* PRE_CHANGE */
+ ufs_rockchip_controller_reset(host);
+
return 0;
}
@@ -156,9 +168,7 @@ static int ufs_rockchip_common_init(struct ufs_hba *hba)
return dev_err_probe(dev, PTR_ERR(host->rst),
"failed to get reset control\n");
- reset_control_assert(host->rst);
- udelay(1);
- reset_control_deassert(host->rst);
+ ufs_rockchip_controller_reset(host);
host->ref_out_clk = devm_clk_get_enabled(dev, "ref_out");
if (IS_ERR(host->ref_out_clk))
@@ -282,9 +292,7 @@ static int ufs_rockchip_runtime_resume(struct device *dev)
return err;
}
- reset_control_assert(host->rst);
- udelay(1);
- reset_control_deassert(host->rst);
+ ufs_rockchip_controller_reset(host);
return ufshcd_runtime_resume(dev);
}
diff --git a/drivers/ufs/host/ufshcd-dwc.h b/drivers/ufs/host/ufshcd-dwc.h
index ad91ea56662c..c618bb914904 100644
--- a/drivers/ufs/host/ufshcd-dwc.h
+++ b/drivers/ufs/host/ufshcd-dwc.h
@@ -12,6 +12,52 @@
#include <ufs/ufshcd.h>
+/* RMMI Attributes */
+#define CBREFCLKCTRL2 0x8132
+#define CBCRCTRL 0x811F
+#define CBC10DIRECTCONF2 0x810E
+#define CBRATESEL 0x8114
+#define CBCREGADDRLSB 0x8116
+#define CBCREGADDRMSB 0x8117
+#define CBCREGWRLSB 0x8118
+#define CBCREGWRMSB 0x8119
+#define CBCREGRDLSB 0x811A
+#define CBCREGRDMSB 0x811B
+#define CBCREGRDWRSEL 0x811C
+
+#define CBREFREFCLK_GATE_OVR_EN BIT(7)
+
+/* M-PHY Attributes */
+#define MTX_FSM_STATE 0x41
+#define MRX_FSM_STATE 0xC1
+
+/* M-PHY registers */
+#define RX_OVRD_IN_1(n) (0x3006 + ((n) * 0x100))
+#define RX_PCS_OUT(n) (0x300F + ((n) * 0x100))
+#define FAST_FLAGS(n) (0x401C + ((n) * 0x100))
+#define RX_AFE_ATT_IDAC(n) (0x4000 + ((n) * 0x100))
+#define RX_AFE_CTLE_IDAC(n) (0x4001 + ((n) * 0x100))
+#define FW_CALIB_CCFG(n) (0x404D + ((n) * 0x100))
+
+/* Tx/Rx FSM state */
+enum rx_fsm_state {
+ RX_STATE_DISABLED = 0,
+ RX_STATE_HIBERN8 = 1,
+ RX_STATE_SLEEP = 2,
+ RX_STATE_STALL = 3,
+ RX_STATE_LSBURST = 4,
+ RX_STATE_HSBURST = 5,
+};
+
+enum tx_fsm_state {
+ TX_STATE_DISABLED = 0,
+ TX_STATE_HIBERN8 = 1,
+ TX_STATE_SLEEP = 2,
+ TX_STATE_STALL = 3,
+ TX_STATE_LSBURST = 4,
+ TX_STATE_HSBURST = 5,
+};
+
struct ufshcd_dme_attr_val {
u32 attr_sel;
u32 mib_val;
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index 4c36a3e409be..cb6ff15a21db 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/fb.h>
@@ -65,7 +66,7 @@ struct gbefb_par {
static unsigned int gbe_mem_size = CONFIG_FB_GBE_MEM * 1024*1024;
static void *gbe_mem;
static dma_addr_t gbe_dma_addr;
-static unsigned long gbe_mem_phys;
+static phys_addr_t gbe_mem_phys;
static struct {
uint16_t *cpu;
@@ -1183,7 +1184,7 @@ static int gbefb_probe(struct platform_device *p_dev)
goto out_release_mem_region;
}
- gbe_mem_phys = (unsigned long) gbe_dma_addr;
+ gbe_mem_phys = dma_to_phys(&p_dev->dev, gbe_dma_addr);
}
par = info->par;
diff --git a/drivers/video/fbdev/gxt4500.c b/drivers/video/fbdev/gxt4500.c
index 15a82c6b609e..1ee0a1efa18e 100644
--- a/drivers/video/fbdev/gxt4500.c
+++ b/drivers/video/fbdev/gxt4500.c
@@ -704,7 +704,7 @@ static int gxt4500_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
info->var = var;
if (gxt4500_set_par(info)) {
- printk(KERN_ERR "gxt4500: cannot set video mode\n");
+ dev_err(&pdev->dev, "cannot set video mode\n");
goto err_free_cmap;
}
diff --git a/drivers/video/fbdev/i810/i810_main.c b/drivers/video/fbdev/i810/i810_main.c
index d73a795fe1be..10b914a24114 100644
--- a/drivers/video/fbdev/i810/i810_main.c
+++ b/drivers/video/fbdev/i810/i810_main.c
@@ -1012,7 +1012,7 @@ static int i810_check_params(struct fb_var_screeninfo *var,
var->bits_per_pixel);
vidmem = line_length * info->var.yres;
if (vxres < var->xres) {
- printk("i810fb: required video memory, "
+ dev_info(&par->dev->dev, "required video memory, "
"%d bytes, for %dx%d-%d (virtual) "
"is out of range\n",
vidmem, vxres, vyres,
@@ -1067,9 +1067,9 @@ static int i810_check_params(struct fb_var_screeninfo *var,
|(info->monspecs.hfmax-HFMAX)
|(info->monspecs.vfmin-VFMIN)
|(info->monspecs.vfmax-VFMAX);
- printk("i810fb: invalid video mode%s\n",
- default_sync ? "" : ". Specifying "
- "vsyncN/hsyncN parameters may help");
+ dev_err(&par->dev->dev, "invalid video mode%s\n",
+ default_sync ? "" : ". Specifying "
+ "vsyncN/hsyncN parameters may help");
retval = -EINVAL;
}
}
@@ -1674,19 +1674,19 @@ static int i810_alloc_agp_mem(struct fb_info *info)
size = par->fb.size + par->iring.size;
if (!(bridge = agp_backend_acquire(par->dev))) {
- printk("i810fb_alloc_fbmem: cannot acquire agpgart\n");
+ dev_warn(&par->dev->dev, "cannot acquire agpgart\n");
return -ENODEV;
}
if (!(par->i810_gtt.i810_fb_memory =
agp_allocate_memory(bridge, size >> 12, AGP_NORMAL_MEMORY))) {
- printk("i810fb_alloc_fbmem: can't allocate framebuffer "
+ dev_warn(&par->dev->dev, "can't allocate framebuffer "
"memory\n");
agp_backend_release(bridge);
return -ENOMEM;
}
if (agp_bind_memory(par->i810_gtt.i810_fb_memory,
par->fb.offset)) {
- printk("i810fb_alloc_fbmem: can't bind framebuffer memory\n");
+ dev_warn(&par->dev->dev, "can't bind framebuffer memory\n");
agp_backend_release(bridge);
return -EBUSY;
}
@@ -1694,14 +1694,14 @@ static int i810_alloc_agp_mem(struct fb_info *info)
if (!(par->i810_gtt.i810_cursor_memory =
agp_allocate_memory(bridge, par->cursor_heap.size >> 12,
AGP_PHYSICAL_MEMORY))) {
- printk("i810fb_alloc_cursormem: can't allocate "
+ dev_warn(&par->dev->dev, "can't allocate "
"cursor memory\n");
agp_backend_release(bridge);
return -ENOMEM;
}
if (agp_bind_memory(par->i810_gtt.i810_cursor_memory,
par->cursor_heap.offset)) {
- printk("i810fb_alloc_cursormem: cannot bind cursor memory\n");
+ dev_warn(&par->dev->dev, "cannot bind cursor memory\n");
agp_backend_release(bridge);
return -EBUSY;
}
@@ -1844,7 +1844,7 @@ static int i810_allocate_pci_resource(struct i810fb_par *par,
int err;
if ((err = pci_enable_device(par->dev))) {
- printk("i810fb_init: cannot enable device\n");
+ dev_err(&par->dev->dev, "cannot enable device\n");
return err;
}
par->res_flags |= PCI_DEVICE_ENABLED;
@@ -1859,14 +1859,14 @@ static int i810_allocate_pci_resource(struct i810fb_par *par,
par->mmio_start_phys = pci_resource_start(par->dev, 0);
}
if (!par->aperture.size) {
- printk("i810fb_init: device is disabled\n");
+ dev_warn(&par->dev->dev, "device is disabled\n");
return -ENOMEM;
}
if (!request_mem_region(par->aperture.physical,
par->aperture.size,
i810_pci_list[entry->driver_data])) {
- printk("i810fb_init: cannot request framebuffer region\n");
+ dev_warn(&par->dev->dev, "cannot request framebuffer region\n");
return -ENODEV;
}
par->res_flags |= FRAMEBUFFER_REQ;
@@ -1874,14 +1874,14 @@ static int i810_allocate_pci_resource(struct i810fb_par *par,
par->aperture.virtual = ioremap_wc(par->aperture.physical,
par->aperture.size);
if (!par->aperture.virtual) {
- printk("i810fb_init: cannot remap framebuffer region\n");
+ dev_warn(&par->dev->dev, "cannot remap framebuffer region\n");
return -ENODEV;
}
if (!request_mem_region(par->mmio_start_phys,
MMIO_SIZE,
i810_pci_list[entry->driver_data])) {
- printk("i810fb_init: cannot request mmio region\n");
+ dev_warn(&par->dev->dev, "cannot request mmio region\n");
return -ENODEV;
}
par->res_flags |= MMIO_REQ;
@@ -1889,7 +1889,7 @@ static int i810_allocate_pci_resource(struct i810fb_par *par,
par->mmio_start_virtual = ioremap(par->mmio_start_phys,
MMIO_SIZE);
if (!par->mmio_start_virtual) {
- printk("i810fb_init: cannot remap mmio region\n");
+ dev_warn(&par->dev->dev, "cannot remap mmio region\n");
return -ENODEV;
}
@@ -1921,12 +1921,12 @@ static void i810fb_find_init_mode(struct fb_info *info)
}
if (!err)
- printk("i810fb_init_pci: DDC probe successful\n");
+ dev_info(&par->dev->dev, "DDC probe successful\n");
fb_edid_to_monspecs(par->edid, specs);
if (specs->modedb == NULL)
- printk("i810fb_init_pci: Unable to get Mode Database\n");
+ dev_info(&par->dev->dev, "Unable to get Mode Database\n");
fb_videomode_to_modelist(specs->modedb, specs->modedb_len,
&info->modelist);
@@ -2072,7 +2072,7 @@ static int i810fb_init_pci(struct pci_dev *dev,
if (err < 0) {
i810fb_release_resource(info, par);
- printk("i810fb_init: cannot register framebuffer device\n");
+ dev_warn(&par->dev->dev, "cannot register framebuffer device\n");
return err;
}
@@ -2084,10 +2084,10 @@ static int i810fb_init_pci(struct pci_dev *dev,
vfreq = hfreq/(info->var.yres + info->var.upper_margin +
info->var.vsync_len + info->var.lower_margin);
- printk("I810FB: fb%d : %s v%d.%d.%d%s\n"
- "I810FB: Video RAM : %dK\n"
- "I810FB: Monitor : H: %d-%d KHz V: %d-%d Hz\n"
- "I810FB: Mode : %dx%d-%dbpp@%dHz\n",
+ dev_info(&par->dev->dev, "fb%d : %s v%d.%d.%d%s\n"
+ "Video RAM : %dK\n"
+ "Monitor : H: %d-%d KHz V: %d-%d Hz\n"
+ "Mode : %dx%d-%dbpp@%dHz\n",
info->node,
i810_pci_list[entry->driver_data],
VERSION_MAJOR, VERSION_MINOR, VERSION_TEENIE, BRANCH_VERSION,
@@ -2137,7 +2137,7 @@ static void i810fb_remove_pci(struct pci_dev *dev)
unregister_framebuffer(info);
i810fb_release_resource(info, par);
- printk("cleanup_module: unloaded i810 framebuffer device\n");
+ dev_info(&par->dev->dev, "unloaded i810 framebuffer device\n");
}
#ifndef MODULE
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index b96a8a96bce8..e418eee825fb 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -419,12 +419,12 @@ static int pxafb_adjust_timing(struct pxafb_info *fbi,
var->yres = max_t(int, var->yres, MIN_YRES);
if (!(fbi->lccr0 & LCCR0_LCDT)) {
- clamp_val(var->hsync_len, 1, 64);
- clamp_val(var->vsync_len, 1, 64);
- clamp_val(var->left_margin, 1, 255);
- clamp_val(var->right_margin, 1, 255);
- clamp_val(var->upper_margin, 1, 255);
- clamp_val(var->lower_margin, 1, 255);
+ var->hsync_len = clamp(var->hsync_len, 1, 64);
+ var->vsync_len = clamp(var->vsync_len, 1, 64);
+ var->left_margin = clamp(var->left_margin, 1, 255);
+ var->right_margin = clamp(var->right_margin, 1, 255);
+ var->upper_margin = clamp(var->upper_margin, 1, 255);
+ var->lower_margin = clamp(var->lower_margin, 1, 255);
}
/* make sure each line is aligned on word boundary */
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index aa6cc0a8151a..83dd31fa1fab 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -680,7 +680,7 @@ static int ssd1307fb_probe(struct i2c_client *client)
if (!ssd1307fb_defio) {
dev_err(dev, "Couldn't allocate deferred io.\n");
ret = -ENOMEM;
- goto fb_alloc_error;
+ goto fb_defio_error;
}
ssd1307fb_defio->delay = HZ / refreshrate;
@@ -757,6 +757,8 @@ regulator_enable_error:
regulator_disable(par->vbat_reg);
reset_oled_error:
fb_deferred_io_cleanup(info);
+fb_defio_error:
+ __free_pages(vmem, get_order(vmem_size));
fb_alloc_error:
framebuffer_release(info);
return ret;
diff --git a/drivers/video/fbdev/tcx.c b/drivers/video/fbdev/tcx.c
index f9a0085ad72b..ca9e84e8d860 100644
--- a/drivers/video/fbdev/tcx.c
+++ b/drivers/video/fbdev/tcx.c
@@ -428,7 +428,7 @@ static int tcx_probe(struct platform_device *op)
j = i;
break;
}
- par->mmap_map[i].poff = op->resource[j].start;
+ par->mmap_map[i].poff = op->resource[j].start - info->fix.smem_start;
}
info->fbops = &tcx_ops;
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index 516cf2a18757..17b7253b8fbe 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -1631,7 +1631,7 @@ static int trident_pci_probe(struct pci_dev *dev,
}
if (noaccel) {
- printk(KERN_DEBUG "disabling acceleration\n");
+ dev_dbg(&dev->dev, "disabling acceleration\n");
info->flags |= FBINFO_HWACCEL_DISABLED;
info->pixmap.scan_align = 1;
}
@@ -1693,7 +1693,7 @@ static int trident_pci_probe(struct pci_dev *dev,
info->var.activate |= FB_ACTIVATE_NOW;
info->device = &dev->dev;
if (register_framebuffer(info) < 0) {
- printk(KERN_ERR "tridentfb: could not register framebuffer\n");
+ dev_err(&dev->dev, "could not register framebuffer\n");
fb_dealloc_cmap(&info->cmap);
err = -EINVAL;
goto out_unmap2;
diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
index a81df8865143..f135033c22fb 100644
--- a/drivers/video/fbdev/vesafb.c
+++ b/drivers/video/fbdev/vesafb.c
@@ -314,8 +314,8 @@ static int vesafb_probe(struct platform_device *dev)
#endif
if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
- printk(KERN_WARNING
- "vesafb: cannot reserve video memory at 0x%lx\n",
+ dev_warn(&dev->dev,
+ "cannot reserve video memory at 0x%lx\n",
vesafb_fix.smem_start);
/* We cannot make this fatal. Sometimes this comes from magic
spaces our resource handlers simply don't know about */
@@ -333,12 +333,12 @@ static int vesafb_probe(struct platform_device *dev)
par->base = si->lfb_base;
par->size = size_total;
- printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
+ dev_info(&dev->dev, "mode is %dx%dx%d, linelength=%d, pages=%d\n",
vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel,
vesafb_fix.line_length, si->pages);
if (si->vesapm_seg) {
- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
+ dev_info(&dev->dev, "protected mode interface info at %04x:%04x\n",
si->vesapm_seg, si->vesapm_off);
}
@@ -352,9 +352,10 @@ static int vesafb_probe(struct platform_device *dev)
pmi_base = (unsigned short *)phys_to_virt(pmi_phys);
pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
- printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
+ dev_info(&dev->dev, "pmi: set display start = %p, set palette = %p\n",
+ pmi_start, pmi_pal);
if (pmi_base[3]) {
- printk(KERN_INFO "vesafb: pmi: ports = ");
+ dev_info(&dev->dev, "pmi: ports = ");
for (i = pmi_base[3]/2; pmi_base[i] != 0xffff; i++)
printk("%x ", pmi_base[i]);
printk("\n");
@@ -365,14 +366,14 @@ static int vesafb_probe(struct platform_device *dev)
* Rules are: we have to set up a descriptor for the requested
* memory area and pass it in the ES register to the BIOS function.
*/
- printk(KERN_INFO "vesafb: can't handle memory requests, pmi disabled\n");
+ dev_info(&dev->dev, "can't handle memory requests, pmi disabled\n");
ypan = pmi_setpal = 0;
}
}
}
if (vesafb_defined.bits_per_pixel == 8 && !pmi_setpal && !vga_compat) {
- printk(KERN_WARNING "vesafb: hardware palette is unchangeable,\n"
+ dev_warn(&dev->dev, "hardware palette is unchangeable,\n"
" colors may be incorrect\n");
vesafb_fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR;
}
@@ -380,10 +381,10 @@ static int vesafb_probe(struct platform_device *dev)
vesafb_defined.xres_virtual = vesafb_defined.xres;
vesafb_defined.yres_virtual = vesafb_fix.smem_len / vesafb_fix.line_length;
if (ypan && vesafb_defined.yres_virtual > vesafb_defined.yres) {
- printk(KERN_INFO "vesafb: scrolling: %s using protected mode interface, yres_virtual=%d\n",
+ dev_info(&dev->dev, "scrolling: %s using protected mode interface, yres_virtual=%d\n",
(ypan > 1) ? "ywrap" : "ypan",vesafb_defined.yres_virtual);
} else {
- printk(KERN_INFO "vesafb: scrolling: redraw\n");
+ dev_info(&dev->dev, "scrolling: redraw\n");
vesafb_defined.yres_virtual = vesafb_defined.yres;
ypan = 0;
}
@@ -410,7 +411,7 @@ static int vesafb_probe(struct platform_device *dev)
vesafb_defined.bits_per_pixel;
}
- printk(KERN_INFO "vesafb: %s: "
+ dev_info(&dev->dev, "%s: "
"size=%d:%d:%d:%d, shift=%d:%d:%d:%d\n",
(vesafb_defined.bits_per_pixel > 8) ?
"Truecolor" : (vga_compat || pmi_setpal) ?
@@ -453,14 +454,14 @@ static int vesafb_probe(struct platform_device *dev)
}
if (!info->screen_base) {
- printk(KERN_ERR
- "vesafb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
+ dev_err(&dev->dev,
+ "abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
vesafb_fix.smem_len, vesafb_fix.smem_start);
err = -EIO;
goto err_release_region;
}
- printk(KERN_INFO "vesafb: framebuffer at 0x%lx, mapped to 0x%p, "
+ dev_info(&dev->dev, "framebuffer at 0x%lx, mapped to 0x%p, "
"using %dk, total %dk\n",
vesafb_fix.smem_start, info->screen_base,
size_remap/1024, size_total/1024);
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index eedab14c7d51..6b81337a4909 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -1319,7 +1319,12 @@ static int vga16fb_probe(struct platform_device *dev)
if (ret)
return ret;
- printk(KERN_DEBUG "vga16fb: initializing\n");
+ dev_dbg(&dev->dev, "initializing\n");
+ if (!request_mem_region(vga16fb_fix.smem_start, vga16fb_fix.smem_len,
+ "vga16b")) {
+ dev_err(&dev->dev, "cannot reserve video memory at 0x%lx\n",
+ vga16fb_fix.smem_start);
+ }
info = framebuffer_alloc(sizeof(struct vga16fb_par), &dev->dev);
if (!info) {
@@ -1331,12 +1336,12 @@ static int vga16fb_probe(struct platform_device *dev)
info->screen_base = (void __iomem *)VGA_MAP_MEM(VGA_FB_PHYS_BASE, 0);
if (!info->screen_base) {
- printk(KERN_ERR "vga16fb: unable to map device\n");
+ dev_err(&dev->dev, "unable to map device\n");
ret = -ENOMEM;
goto err_ioremap;
}
- printk(KERN_INFO "vga16fb: mapped to 0x%p\n", info->screen_base);
+ dev_info(&dev->dev, "mapped to 0x%p\n", info->screen_base);
par = info->par;
par->isVGA = screen_info_video_type(si) == VIDEO_TYPE_VGAC;
@@ -1364,13 +1369,13 @@ static int vga16fb_probe(struct platform_device *dev)
i = (info->var.bits_per_pixel == 8) ? 256 : 16;
ret = fb_alloc_cmap(&info->cmap, i, 0);
if (ret) {
- printk(KERN_ERR "vga16fb: unable to allocate colormap\n");
+ dev_err(&dev->dev, "unable to allocate colormap\n");
ret = -ENOMEM;
goto err_alloc_cmap;
}
if (vga16fb_check_var(&info->var, info)) {
- printk(KERN_ERR "vga16fb: unable to validate variable\n");
+ dev_err(&dev->dev, "unable to validate variable\n");
ret = -EINVAL;
goto err_check_var;
}
@@ -1381,7 +1386,7 @@ static int vga16fb_probe(struct platform_device *dev)
if (ret)
goto err_check_var;
if (register_framebuffer(info) < 0) {
- printk(KERN_ERR "vga16fb: unable to register framebuffer\n");
+ dev_err(&dev->dev, "unable to register framebuffer\n");
ret = -EINVAL;
goto err_check_var;
}
@@ -1398,6 +1403,8 @@ static int vga16fb_probe(struct platform_device *dev)
err_ioremap:
framebuffer_release(info);
err_fb_alloc:
+ release_mem_region(vga16fb_fix.smem_start,
+ vga16fb_fix.smem_len);
return ret;
}
@@ -1407,6 +1414,8 @@ static void vga16fb_remove(struct platform_device *dev)
if (info)
unregister_framebuffer(info);
+ release_mem_region(vga16fb_fix.smem_start,
+ vga16fb_fix.smem_len);
}
static const struct platform_device_id vga16fb_driver_id_table[] = {
diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig
index d8c848cf09a6..52eb7e4ba71f 100644
--- a/drivers/virt/Kconfig
+++ b/drivers/virt/Kconfig
@@ -47,6 +47,6 @@ source "drivers/virt/nitro_enclaves/Kconfig"
source "drivers/virt/acrn/Kconfig"
-source "drivers/virt/coco/Kconfig"
-
endif
+
+source "drivers/virt/coco/Kconfig"
diff --git a/drivers/virt/coco/Kconfig b/drivers/virt/coco/Kconfig
index 819a97e8ba99..df1cfaf26c65 100644
--- a/drivers/virt/coco/Kconfig
+++ b/drivers/virt/coco/Kconfig
@@ -3,6 +3,7 @@
# Confidential computing related collateral
#
+if VIRT_DRIVERS
source "drivers/virt/coco/efi_secret/Kconfig"
source "drivers/virt/coco/pkvm-guest/Kconfig"
@@ -14,3 +15,7 @@ source "drivers/virt/coco/tdx-guest/Kconfig"
source "drivers/virt/coco/arm-cca-guest/Kconfig"
source "drivers/virt/coco/guest/Kconfig"
+endif
+
+config TSM
+ bool
diff --git a/drivers/virt/coco/Makefile b/drivers/virt/coco/Makefile
index f918bbb61737..cb52021912b3 100644
--- a/drivers/virt/coco/Makefile
+++ b/drivers/virt/coco/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_ARM_PKVM_GUEST) += pkvm-guest/
obj-$(CONFIG_SEV_GUEST) += sev-guest/
obj-$(CONFIG_INTEL_TDX_GUEST) += tdx-guest/
obj-$(CONFIG_ARM_CCA_GUEST) += arm-cca-guest/
+obj-$(CONFIG_TSM) += tsm-core.o
obj-$(CONFIG_TSM_GUEST) += guest/
diff --git a/drivers/virt/coco/tsm-core.c b/drivers/virt/coco/tsm-core.c
new file mode 100644
index 000000000000..f027876a2f19
--- /dev/null
+++ b/drivers/virt/coco/tsm-core.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/tsm.h>
+#include <linux/pci.h>
+#include <linux/rwsem.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/cleanup.h>
+#include <linux/pci-tsm.h>
+#include <linux/pci-ide.h>
+
+static struct class *tsm_class;
+static DECLARE_RWSEM(tsm_rwsem);
+static DEFINE_IDA(tsm_ida);
+
+static int match_id(struct device *dev, const void *data)
+{
+ struct tsm_dev *tsm_dev = container_of(dev, struct tsm_dev, dev);
+ int id = *(const int *)data;
+
+ return tsm_dev->id == id;
+}
+
+struct tsm_dev *find_tsm_dev(int id)
+{
+ struct device *dev = class_find_device(tsm_class, NULL, &id, match_id);
+
+ if (!dev)
+ return NULL;
+ return container_of(dev, struct tsm_dev, dev);
+}
+
+static struct tsm_dev *alloc_tsm_dev(struct device *parent)
+{
+ struct device *dev;
+ int id;
+
+ struct tsm_dev *tsm_dev __free(kfree) =
+ kzalloc(sizeof(*tsm_dev), GFP_KERNEL);
+ if (!tsm_dev)
+ return ERR_PTR(-ENOMEM);
+
+ id = ida_alloc(&tsm_ida, GFP_KERNEL);
+ if (id < 0)
+ return ERR_PTR(id);
+
+ tsm_dev->id = id;
+ dev = &tsm_dev->dev;
+ dev->parent = parent;
+ dev->class = tsm_class;
+ device_initialize(dev);
+
+ return no_free_ptr(tsm_dev);
+}
+
+static struct tsm_dev *tsm_register_pci_or_reset(struct tsm_dev *tsm_dev,
+ struct pci_tsm_ops *pci_ops)
+{
+ int rc;
+
+ if (!pci_ops)
+ return tsm_dev;
+
+ tsm_dev->pci_ops = pci_ops;
+ rc = pci_tsm_register(tsm_dev);
+ if (rc) {
+ dev_err(tsm_dev->dev.parent,
+ "PCI/TSM registration failure: %d\n", rc);
+ device_unregister(&tsm_dev->dev);
+ return ERR_PTR(rc);
+ }
+
+ /* Notify TSM userspace that PCI/TSM operations are now possible */
+ kobject_uevent(&tsm_dev->dev.kobj, KOBJ_CHANGE);
+ return tsm_dev;
+}
+
+struct tsm_dev *tsm_register(struct device *parent, struct pci_tsm_ops *pci_ops)
+{
+ struct tsm_dev *tsm_dev __free(put_tsm_dev) = alloc_tsm_dev(parent);
+ struct device *dev;
+ int rc;
+
+ if (IS_ERR(tsm_dev))
+ return tsm_dev;
+
+ dev = &tsm_dev->dev;
+ rc = dev_set_name(dev, "tsm%d", tsm_dev->id);
+ if (rc)
+ return ERR_PTR(rc);
+
+ rc = device_add(dev);
+ if (rc)
+ return ERR_PTR(rc);
+
+ return tsm_register_pci_or_reset(no_free_ptr(tsm_dev), pci_ops);
+}
+EXPORT_SYMBOL_GPL(tsm_register);
+
+void tsm_unregister(struct tsm_dev *tsm_dev)
+{
+ if (tsm_dev->pci_ops)
+ pci_tsm_unregister(tsm_dev);
+ device_unregister(&tsm_dev->dev);
+}
+EXPORT_SYMBOL_GPL(tsm_unregister);
+
+/* must be invoked between tsm_register / tsm_unregister */
+int tsm_ide_stream_register(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_tsm *tsm = pdev->tsm;
+ struct tsm_dev *tsm_dev = tsm->tsm_dev;
+ int rc;
+
+ rc = sysfs_create_link(&tsm_dev->dev.kobj, &pdev->dev.kobj, ide->name);
+ if (rc)
+ return rc;
+
+ ide->tsm_dev = tsm_dev;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tsm_ide_stream_register);
+
+void tsm_ide_stream_unregister(struct pci_ide *ide)
+{
+ struct tsm_dev *tsm_dev = ide->tsm_dev;
+
+ ide->tsm_dev = NULL;
+ sysfs_remove_link(&tsm_dev->dev.kobj, ide->name);
+}
+EXPORT_SYMBOL_GPL(tsm_ide_stream_unregister);
+
+static void tsm_release(struct device *dev)
+{
+ struct tsm_dev *tsm_dev = container_of(dev, typeof(*tsm_dev), dev);
+
+ ida_free(&tsm_ida, tsm_dev->id);
+ kfree(tsm_dev);
+}
+
+static int __init tsm_init(void)
+{
+ tsm_class = class_create("tsm");
+ if (IS_ERR(tsm_class))
+ return PTR_ERR(tsm_class);
+
+ tsm_class->dev_release = tsm_release;
+ return 0;
+}
+module_init(tsm_init)
+
+static void __exit tsm_exit(void)
+{
+ class_destroy(tsm_class);
+}
+module_exit(tsm_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TEE Security Manager Class Device");
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 05008d937e40..d3b9df7d466b 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -969,6 +969,14 @@ config RENESAS_WDT
This driver adds watchdog support for the integrated watchdogs in the
Renesas R-Car and other SH-Mobile SoCs (usually named RWDT or SWDT).
+config RENESAS_WWDT
+ tristate "Renesas Window WWDT Watchdog"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ This driver adds watchdog support for a window timer found in some
+ Renesas R-Car Gen3 and later SoCs.
+
config RENESAS_RZAWDT
tristate "Renesas RZ/A WDT Watchdog"
depends on ARCH_RENESAS || COMPILE_TEST
@@ -1976,10 +1984,10 @@ config LANTIQ_WDT
config LOONGSON1_WDT
tristate "Loongson1 SoC hardware watchdog"
- depends on MACH_LOONGSON32 || COMPILE_TEST
+ depends on MACH_LOONGSON32 || MACH_LOONGSON64 || COMPILE_TEST
select WATCHDOG_CORE
help
- Hardware driver for the Loongson1 SoC Watchdog Timer.
+ Hardware driver for the Loongson family Watchdog Timer.
config RALINK_WDT
tristate "Ralink SoC watchdog"
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index b680e4d3c1bc..ba52099b1253 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -85,6 +85,7 @@ obj-$(CONFIG_DIGICOLOR_WATCHDOG) += digicolor_wdt.o
obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o
obj-$(CONFIG_BCM7038_WDT) += bcm7038_wdt.o
obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o
+obj-$(CONFIG_RENESAS_WWDT) += renesas_wwdt.o
obj-$(CONFIG_RENESAS_RZAWDT) += rza_wdt.o
obj-$(CONFIG_RENESAS_RZN1WDT) += rzn1_wdt.o
obj-$(CONFIG_RENESAS_RZG2LWDT) += rzg2l_wdt.o
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index 837e15701c0e..c9e79851504c 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -35,6 +35,7 @@ struct aspeed_wdt_config {
u32 irq_shift;
u32 irq_mask;
struct aspeed_wdt_scu scu;
+ u32 num_reset_masks;
};
struct aspeed_wdt {
@@ -66,6 +67,7 @@ static const struct aspeed_wdt_config ast2500_config = {
.wdt_reset_mask = 0x1,
.wdt_reset_mask_shift = 2,
},
+ .num_reset_masks = 1,
};
static const struct aspeed_wdt_config ast2600_config = {
@@ -78,12 +80,27 @@ static const struct aspeed_wdt_config ast2600_config = {
.wdt_reset_mask = 0xf,
.wdt_reset_mask_shift = 16,
},
+ .num_reset_masks = 2,
+};
+
+static const struct aspeed_wdt_config ast2700_config = {
+ .ext_pulse_width_mask = 0xfffff,
+ .irq_shift = 0,
+ .irq_mask = GENMASK(31, 10),
+ .scu = {
+ .compatible = "aspeed,ast2700-scu0",
+ .reset_status_reg = 0x70,
+ .wdt_reset_mask = 0xf,
+ .wdt_reset_mask_shift = 0,
+ },
+ .num_reset_masks = 5,
};
static const struct of_device_id aspeed_wdt_of_table[] = {
{ .compatible = "aspeed,ast2400-wdt", .data = &ast2400_config },
{ .compatible = "aspeed,ast2500-wdt", .data = &ast2500_config },
{ .compatible = "aspeed,ast2600-wdt", .data = &ast2600_config },
+ { .compatible = "aspeed,ast2700-wdt", .data = &ast2700_config },
{ },
};
MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
@@ -479,11 +496,11 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
}
- if ((of_device_is_compatible(np, "aspeed,ast2500-wdt")) ||
- (of_device_is_compatible(np, "aspeed,ast2600-wdt"))) {
- u32 reset_mask[2];
- size_t nrstmask = of_device_is_compatible(np, "aspeed,ast2600-wdt") ? 2 : 1;
+ if (!of_device_is_compatible(np, "aspeed,ast2400-wdt")) {
+ u32 reset_mask[5];
+ size_t nrstmask = wdt->cfg->num_reset_masks;
u32 reg = readl(wdt->base + WDT_RESET_WIDTH);
+ int i;
reg &= wdt->cfg->ext_pulse_width_mask;
if (of_property_read_bool(np, "aspeed,ext-active-high"))
@@ -503,9 +520,8 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
ret = of_property_read_u32_array(np, "aspeed,reset-mask", reset_mask, nrstmask);
if (!ret) {
- writel(reset_mask[0], wdt->base + WDT_RESET_MASK1);
- if (nrstmask > 1)
- writel(reset_mask[1], wdt->base + WDT_RESET_MASK2);
+ for (i = 0; i < nrstmask; i++)
+ writel(reset_mask[i], wdt->base + WDT_RESET_MASK1 + i * 4);
}
}
diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c
index 9daed2758ae5..ea2b0171e70b 100644
--- a/drivers/watchdog/diag288_wdt.c
+++ b/drivers/watchdog/diag288_wdt.c
@@ -6,10 +6,10 @@
* to CP.
*
* The command can be altered using the module parameter "cmd". This is
- * not recommended because it's only supported on z/VM but not whith LPAR.
+ * not recommended because it's only supported on z/VM but not with LPAR.
*
- * On LPAR, the watchdog will always trigger a system restart. the module
- * paramter cmd is meaningless here.
+ * On LPAR, the watchdog will always trigger a system restart. The module
+ * parameter cmd is meaningless here.
*
*
* Copyright IBM Corp. 2004, 2013
diff --git a/drivers/watchdog/loongson1_wdt.c b/drivers/watchdog/loongson1_wdt.c
index 0587ff44d3a1..2417519717cc 100644
--- a/drivers/watchdog/loongson1_wdt.c
+++ b/drivers/watchdog/loongson1_wdt.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2016 Yang Ling <gnaygnil@gmail.com>
+ * Copyright (C) 2025 Binbin Zhou <zhoubinbin@loongson.cn>
*/
#include <linux/clk.h>
@@ -10,31 +11,52 @@
#include <linux/platform_device.h>
#include <linux/watchdog.h>
-/* Loongson 1 Watchdog Register Definitions */
+/* Loongson Watchdog Register Definitions */
#define WDT_EN 0x0
-#define WDT_TIMER 0x4
-#define WDT_SET 0x8
#define DEFAULT_HEARTBEAT 30
static bool nowayout = WATCHDOG_NOWAYOUT;
-module_param(nowayout, bool, 0444);
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static unsigned int heartbeat;
-module_param(heartbeat, uint, 0444);
+module_param(heartbeat, uint, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (default="
+ __MODULE_STRING(DEFAULT_HEARTBEAT) ")");
+
+struct ls1x_wdt_pdata {
+ u32 timer_offset;
+ u32 set_offset;
+ u32 wdt_en_bit;
+};
+
+static const struct ls1x_wdt_pdata ls1b_wdt_pdata = {
+ .timer_offset = 0x4,
+ .set_offset = 0x8,
+ .wdt_en_bit = BIT(0),
+};
+
+static const struct ls1x_wdt_pdata ls2k0300_wdt_pdata = {
+ .timer_offset = 0x8,
+ .set_offset = 0x4,
+ .wdt_en_bit = BIT(1),
+};
struct ls1x_wdt_drvdata {
void __iomem *base;
struct clk *clk;
unsigned long clk_rate;
struct watchdog_device wdt;
+ const struct ls1x_wdt_pdata *pdata;
};
static int ls1x_wdt_ping(struct watchdog_device *wdt_dev)
{
struct ls1x_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
- writel(0x1, drvdata->base + WDT_SET);
+ writel(0x1, drvdata->base + drvdata->pdata->set_offset);
return 0;
}
@@ -49,7 +71,7 @@ static int ls1x_wdt_set_timeout(struct watchdog_device *wdt_dev,
wdt_dev->timeout = timeout;
counts = drvdata->clk_rate * min(timeout, max_hw_heartbeat);
- writel(counts, drvdata->base + WDT_TIMER);
+ writel(counts, drvdata->base + drvdata->pdata->timer_offset);
return 0;
}
@@ -58,7 +80,7 @@ static int ls1x_wdt_start(struct watchdog_device *wdt_dev)
{
struct ls1x_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
- writel(0x1, drvdata->base + WDT_EN);
+ writel(drvdata->pdata->wdt_en_bit, drvdata->base + WDT_EN);
return 0;
}
@@ -66,8 +88,10 @@ static int ls1x_wdt_start(struct watchdog_device *wdt_dev)
static int ls1x_wdt_stop(struct watchdog_device *wdt_dev)
{
struct ls1x_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
+ u32 val = readl(drvdata->base + WDT_EN);
- writel(0x0, drvdata->base + WDT_EN);
+ val &= ~(drvdata->pdata->wdt_en_bit);
+ writel(val, drvdata->base + WDT_EN);
return 0;
}
@@ -77,9 +101,9 @@ static int ls1x_wdt_restart(struct watchdog_device *wdt_dev,
{
struct ls1x_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
- writel(0x1, drvdata->base + WDT_EN);
- writel(0x1, drvdata->base + WDT_TIMER);
- writel(0x1, drvdata->base + WDT_SET);
+ writel(drvdata->pdata->wdt_en_bit, drvdata->base + WDT_EN);
+ writel(0x1, drvdata->base + drvdata->pdata->timer_offset);
+ writel(0x1, drvdata->base + drvdata->pdata->set_offset);
return 0;
}
@@ -104,11 +128,13 @@ static int ls1x_wdt_probe(struct platform_device *pdev)
struct ls1x_wdt_drvdata *drvdata;
struct watchdog_device *ls1x_wdt;
unsigned long clk_rate;
- int err;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
+ platform_set_drvdata(pdev, drvdata);
+
+ drvdata->pdata = of_device_get_match_data(dev);
drvdata->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(drvdata->base))
@@ -135,36 +161,51 @@ static int ls1x_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(ls1x_wdt, nowayout);
watchdog_set_drvdata(ls1x_wdt, drvdata);
- err = devm_watchdog_register_device(dev, &drvdata->wdt);
- if (err)
- return err;
+ return devm_watchdog_register_device(dev, &drvdata->wdt);
+}
- platform_set_drvdata(pdev, drvdata);
+static int ls1x_wdt_resume(struct device *dev)
+{
+ struct ls1x_wdt_drvdata *data = dev_get_drvdata(dev);
+
+ if (watchdog_active(&data->wdt))
+ ls1x_wdt_start(&data->wdt);
+
+ return 0;
+}
+
+static int ls1x_wdt_suspend(struct device *dev)
+{
+ struct ls1x_wdt_drvdata *data = dev_get_drvdata(dev);
- dev_info(dev, "Loongson1 Watchdog driver registered\n");
+ if (watchdog_active(&data->wdt))
+ ls1x_wdt_stop(&data->wdt);
return 0;
}
-#ifdef CONFIG_OF
+static DEFINE_SIMPLE_DEV_PM_OPS(ls1x_wdt_pm_ops, ls1x_wdt_suspend, ls1x_wdt_resume);
+
static const struct of_device_id ls1x_wdt_dt_ids[] = {
- { .compatible = "loongson,ls1b-wdt", },
- { .compatible = "loongson,ls1c-wdt", },
+ { .compatible = "loongson,ls1b-wdt", .data = &ls1b_wdt_pdata },
+ { .compatible = "loongson,ls1c-wdt", .data = &ls1b_wdt_pdata },
+ { .compatible = "loongson,ls2k0300-wdt", .data = &ls2k0300_wdt_pdata },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ls1x_wdt_dt_ids);
-#endif
static struct platform_driver ls1x_wdt_driver = {
.probe = ls1x_wdt_probe,
.driver = {
.name = "ls1x-wdt",
- .of_match_table = of_match_ptr(ls1x_wdt_dt_ids),
+ .of_match_table = ls1x_wdt_dt_ids,
+ .pm = pm_ptr(&ls1x_wdt_pm_ops),
},
};
module_platform_driver(ls1x_wdt_driver);
MODULE_AUTHOR("Yang Ling <gnaygnil@gmail.com>");
-MODULE_DESCRIPTION("Loongson1 Watchdog Driver");
+MODULE_AUTHOR("Binbin Zhou <zhoubinbin@loongson.cn>");
+MODULE_DESCRIPTION("Loongson Watchdog Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/renesas_wwdt.c b/drivers/watchdog/renesas_wwdt.c
new file mode 100644
index 000000000000..b250913c349a
--- /dev/null
+++ b/drivers/watchdog/renesas_wwdt.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Renesas Window Watchdog Timer (WWDT)
+ *
+ * The WWDT can only be setup once after boot. Because we cannot know if this
+ * already happened in early boot stages, it is mandated that the firmware
+ * configures the watchdog. Linux then adapts according to the given setup.
+ * Note that this watchdog reports in the default configuration an overflow to
+ * the Error Control Module which then decides further actions. Or the WWDT is
+ * configured to generate an interrupt.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+#define WDTA0WDTE 0x00
+#define WDTA0RUN BIT(7)
+#define WDTA0_KEY 0x2c
+
+#define WDTA0MD 0x0c
+#define WDTA0OVF(x) FIELD_GET(GENMASK(6, 4), x)
+#define WDTA0WIE BIT(3)
+#define WDTA0ERM BIT(2)
+#define WDTA0WS(x) FIELD_GET(GENMASK(1, 0), x)
+
+struct wwdt_priv {
+ void __iomem *base;
+ struct watchdog_device wdev;
+};
+
+static int wwdt_start(struct watchdog_device *wdev)
+{
+ struct wwdt_priv *priv = container_of(wdev, struct wwdt_priv, wdev);
+
+ writeb(WDTA0RUN | WDTA0_KEY, priv->base + WDTA0WDTE);
+ return 0;
+}
+
+static const struct watchdog_info wwdt_ident = {
+ .options = WDIOF_KEEPALIVEPING | WDIOF_ALARMONLY,
+ .identity = "Renesas Window Watchdog",
+};
+
+static const struct watchdog_ops wwdt_ops = {
+ .owner = THIS_MODULE,
+ .start = wwdt_start,
+};
+
+static irqreturn_t wwdt_error_irq(int irq, void *dev_id)
+{
+ struct device *dev = dev_id;
+
+ dev_warn(dev, "Watchdog timed out\n");
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wwdt_pretimeout_irq(int irq, void *dev_id)
+{
+ struct watchdog_device *wdev = dev_id;
+
+ watchdog_notify_pretimeout(wdev);
+ return IRQ_HANDLED;
+}
+
+static int wwdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct wwdt_priv *priv;
+ struct watchdog_device *wdev;
+ struct clk *clk;
+ unsigned long rate;
+ unsigned int interval, window_size;
+ int ret;
+ u8 val;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ clk = devm_clk_get(dev, "cnt");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rate = clk_get_rate(clk);
+ if (!rate)
+ return -EINVAL;
+
+ wdev = &priv->wdev;
+
+ val = readb(priv->base + WDTA0WDTE);
+ if (val & WDTA0RUN)
+ set_bit(WDOG_HW_RUNNING, &wdev->status);
+
+ val = readb(priv->base + WDTA0MD);
+ interval = 1 << (9 + WDTA0OVF(val));
+ /* size of the closed(!) window per mille */
+ window_size = 250 * (3 - WDTA0WS(val));
+
+ wdev->info = &wwdt_ident;
+ wdev->ops = &wwdt_ops;
+ wdev->parent = dev;
+ wdev->min_hw_heartbeat_ms = window_size * interval / rate;
+ wdev->max_hw_heartbeat_ms = 1000 * interval / rate;
+ wdev->timeout = DIV_ROUND_UP(wdev->max_hw_heartbeat_ms, 1000);
+ watchdog_set_nowayout(wdev, true);
+
+ if (!(val & WDTA0ERM)) {
+ ret = platform_get_irq_byname(pdev, "error");
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_threaded_irq(dev, ret, NULL, wwdt_error_irq,
+ IRQF_ONESHOT, NULL, dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (val & WDTA0WIE) {
+ ret = platform_get_irq_byname(pdev, "pretimeout");
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_threaded_irq(dev, ret, NULL, wwdt_pretimeout_irq,
+ IRQF_ONESHOT, NULL, wdev);
+ if (ret < 0)
+ return ret;
+ }
+
+ devm_watchdog_register_device(dev, wdev);
+
+ return 0;
+}
+
+static const struct of_device_id renesas_wwdt_ids[] = {
+ { .compatible = "renesas,rcar-gen3-wwdt", },
+ { .compatible = "renesas,rcar-gen4-wwdt", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, renesas_wwdt_ids);
+
+static struct platform_driver renesas_wwdt_driver = {
+ .driver = {
+ .name = "renesas_wwdt",
+ .of_match_table = renesas_wwdt_ids,
+ },
+ .probe = wwdt_probe,
+};
+module_platform_driver(renesas_wwdt_driver);
+
+MODULE_DESCRIPTION("Renesas Window Watchdog (WWDT) Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Wolfram Sang <wsa+renesas@sang-engineering.com>");
diff --git a/drivers/watchdog/starfive-wdt.c b/drivers/watchdog/starfive-wdt.c
index 355918d62f63..ed71d3960a0f 100644
--- a/drivers/watchdog/starfive-wdt.c
+++ b/drivers/watchdog/starfive-wdt.c
@@ -500,12 +500,14 @@ static int starfive_wdt_probe(struct platform_device *pdev)
if (pm_runtime_enabled(&pdev->dev)) {
ret = pm_runtime_put_sync(&pdev->dev);
if (ret)
- goto err_exit;
+ goto err_unregister_wdt;
}
}
return 0;
+err_unregister_wdt:
+ watchdog_unregister_device(&wdt->wdd);
err_exit:
starfive_wdt_disable_clock(wdt);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
index d647923d68fe..f55576392651 100644
--- a/drivers/watchdog/via_wdt.c
+++ b/drivers/watchdog/via_wdt.c
@@ -165,6 +165,7 @@ static int wdt_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "cannot enable PCI device\n");
return -ENODEV;
}
+ wdt_res.name = "via_wdt";
/*
* Allocate a MMIO region which contains watchdog control register
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index 650fdc7996e1..dd3c2d69c9df 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -326,19 +326,27 @@ static int wdat_wdt_probe(struct platform_device *pdev)
return -ENODEV;
wdat = devm_kzalloc(dev, sizeof(*wdat), GFP_KERNEL);
- if (!wdat)
- return -ENOMEM;
+ if (!wdat) {
+ ret = -ENOMEM;
+ goto out_put_table;
+ }
regs = devm_kcalloc(dev, pdev->num_resources, sizeof(*regs),
GFP_KERNEL);
- if (!regs)
- return -ENOMEM;
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_put_table;
+ }
/* WDAT specification wants to have >= 1ms period */
- if (tbl->timer_period < 1)
- return -EINVAL;
- if (tbl->min_count > tbl->max_count)
- return -EINVAL;
+ if (tbl->timer_period < 1) {
+ ret = -EINVAL;
+ goto out_put_table;
+ }
+ if (tbl->min_count > tbl->max_count) {
+ ret = -EINVAL;
+ goto out_put_table;
+ }
wdat->period = tbl->timer_period;
wdat->wdd.min_timeout = DIV_ROUND_UP(wdat->period * tbl->min_count, 1000);
@@ -355,15 +363,20 @@ static int wdat_wdt_probe(struct platform_device *pdev)
res = &pdev->resource[i];
if (resource_type(res) == IORESOURCE_MEM) {
reg = devm_ioremap_resource(dev, res);
- if (IS_ERR(reg))
- return PTR_ERR(reg);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ goto out_put_table;
+ }
} else if (resource_type(res) == IORESOURCE_IO) {
reg = devm_ioport_map(dev, res->start, 1);
- if (!reg)
- return -ENOMEM;
+ if (!reg) {
+ ret = -ENOMEM;
+ goto out_put_table;
+ }
} else {
dev_err(dev, "Unsupported resource\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_put_table;
}
regs[i] = reg;
@@ -385,8 +398,10 @@ static int wdat_wdt_probe(struct platform_device *pdev)
}
instr = devm_kzalloc(dev, sizeof(*instr), GFP_KERNEL);
- if (!instr)
- return -ENOMEM;
+ if (!instr) {
+ ret = -ENOMEM;
+ goto out_put_table;
+ }
INIT_LIST_HEAD(&instr->node);
instr->entry = entries[i];
@@ -417,7 +432,8 @@ static int wdat_wdt_probe(struct platform_device *pdev)
if (!instr->reg) {
dev_err(dev, "I/O resource not found\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_put_table;
}
instructions = wdat->instructions[action];
@@ -425,8 +441,10 @@ static int wdat_wdt_probe(struct platform_device *pdev)
instructions = devm_kzalloc(dev,
sizeof(*instructions),
GFP_KERNEL);
- if (!instructions)
- return -ENOMEM;
+ if (!instructions) {
+ ret = -ENOMEM;
+ goto out_put_table;
+ }
INIT_LIST_HEAD(instructions);
wdat->instructions[action] = instructions;
@@ -443,7 +461,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
ret = wdat_wdt_enable_reboot(wdat);
if (ret)
- return ret;
+ goto out_put_table;
platform_set_drvdata(pdev, wdat);
@@ -460,12 +478,16 @@ static int wdat_wdt_probe(struct platform_device *pdev)
ret = wdat_wdt_set_timeout(&wdat->wdd, timeout);
if (ret)
- return ret;
+ goto out_put_table;
watchdog_set_nowayout(&wdat->wdd, nowayout);
watchdog_stop_on_reboot(&wdat->wdd);
watchdog_stop_on_unregister(&wdat->wdd);
- return devm_watchdog_register_device(dev, &wdat->wdd);
+ ret = devm_watchdog_register_device(dev, &wdat->wdd);
+
+out_put_table:
+ acpi_put_table((struct acpi_table_header *)tbl);
+ return ret;
}
static int wdat_wdt_suspend_noirq(struct device *dev)
diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
index 29257d2639db..14077d23f2a1 100644
--- a/drivers/xen/grant-dma-ops.c
+++ b/drivers/xen/grant-dma-ops.c
@@ -163,18 +163,22 @@ static void xen_grant_dma_free_pages(struct device *dev, size_t size,
xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0);
}
-static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
+static dma_addr_t xen_grant_dma_map_phys(struct device *dev, phys_addr_t phys,
+ size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
struct xen_grant_dma_data *data;
+ unsigned long offset = offset_in_page(phys);
unsigned long dma_offset = xen_offset_in_page(offset),
pfn_offset = XEN_PFN_DOWN(offset);
unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
grant_ref_t grant;
dma_addr_t dma_handle;
+ if (unlikely(attrs & DMA_ATTR_MMIO))
+ return DMA_MAPPING_ERROR;
+
if (WARN_ON(dir == DMA_NONE))
return DMA_MAPPING_ERROR;
@@ -190,7 +194,7 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
for (i = 0; i < n_pages; i++) {
gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
- pfn_to_gfn(page_to_xen_pfn(page) + i + pfn_offset),
+ pfn_to_gfn(page_to_xen_pfn(phys_to_page(phys)) + i + pfn_offset),
dir == DMA_TO_DEVICE);
}
@@ -199,7 +203,7 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
return dma_handle;
}
-static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+static void xen_grant_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
@@ -242,7 +246,7 @@ static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
return;
for_each_sg(sg, s, nents, i)
- xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir,
+ xen_grant_dma_unmap_phys(dev, s->dma_address, sg_dma_len(s), dir,
attrs);
}
@@ -257,7 +261,7 @@ static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg,
return -EINVAL;
for_each_sg(sg, s, nents, i) {
- s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset,
+ s->dma_address = xen_grant_dma_map_phys(dev, sg_phys(s),
s->length, dir, attrs);
if (s->dma_address == DMA_MAPPING_ERROR)
goto out;
@@ -286,8 +290,8 @@ static const struct dma_map_ops xen_grant_dma_ops = {
.free_pages = xen_grant_dma_free_pages,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
- .map_page = xen_grant_dma_map_page,
- .unmap_page = xen_grant_dma_unmap_page,
+ .map_phys = xen_grant_dma_map_phys,
+ .unmap_phys = xen_grant_dma_unmap_phys,
.map_sg = xen_grant_dma_map_sg,
.unmap_sg = xen_grant_dma_unmap_sg,
.dma_supported = xen_grant_dma_supported,
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 478d2ad725ac..3e76e33f6e08 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1204,7 +1204,7 @@ void gnttab_foreach_grant_in_range(struct page *page,
unsigned int glen;
unsigned long xen_pfn;
- len = min_t(unsigned int, PAGE_SIZE - offset, len);
+ len = min(PAGE_SIZE - offset, len);
goffset = xen_offset_in_page(offset);
xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index dd7747a2de87..ccf25027bec1 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -200,17 +200,32 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
* physical address to use is returned.
*
* Once the device is given the dma address, the device owns this memory until
- * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
+ * either xen_swiotlb_unmap_phys or xen_swiotlb_dma_sync_single is performed.
*/
-static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
+static dma_addr_t xen_swiotlb_map_phys(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- phys_addr_t map, phys = page_to_phys(page) + offset;
- dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
+ dma_addr_t dev_addr;
+ phys_addr_t map;
BUG_ON(dir == DMA_NONE);
+
+ if (attrs & DMA_ATTR_MMIO) {
+ if (unlikely(!dma_capable(dev, phys, size, false))) {
+ dev_err_once(
+ dev,
+ "DMA addr %pa+%zu overflow (mask %llx, bus limit %llx).\n",
+ &phys, size, *dev->dma_mask,
+ dev->bus_dma_limit);
+ WARN_ON_ONCE(1);
+ return DMA_MAPPING_ERROR;
+ }
+ return phys;
+ }
+
+ dev_addr = xen_phys_to_dma(dev, phys);
+
/*
* If the address happens to be in the device's DMA window,
* we can safely return the device addr and not worry about bounce
@@ -257,13 +272,13 @@ done:
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
- * match what was provided for in a previous xen_swiotlb_map_page call. All
+ * match what was provided for in a previous xen_swiotlb_map_phys call. All
* other usages are undefined.
*
* After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there.
*/
-static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+static void xen_swiotlb_unmap_phys(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
@@ -325,7 +340,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
/*
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_page() above.
+ * concerning calls here are the same as for swiotlb_unmap_phys() above.
*/
static void
xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
@@ -337,7 +352,7 @@ xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i)
- xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
+ xen_swiotlb_unmap_phys(hwdev, sg->dma_address, sg_dma_len(sg),
dir, attrs);
}
@@ -352,8 +367,8 @@ xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i) {
- sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
- sg->offset, sg->length, dir, attrs);
+ sg->dma_address = xen_swiotlb_map_phys(dev, sg_phys(sg),
+ sg->length, dir, attrs);
if (sg->dma_address == DMA_MAPPING_ERROR)
goto out_unmap;
sg_dma_len(sg) = sg->length;
@@ -392,25 +407,6 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
}
}
-static dma_addr_t xen_swiotlb_direct_map_resource(struct device *dev,
- phys_addr_t paddr,
- size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t dma_addr = paddr;
-
- if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
- dev_err_once(dev,
- "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
- &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
- WARN_ON_ONCE(1);
- return DMA_MAPPING_ERROR;
- }
-
- return dma_addr;
-}
-
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
@@ -437,13 +433,12 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
.map_sg = xen_swiotlb_map_sg,
.unmap_sg = xen_swiotlb_unmap_sg,
- .map_page = xen_swiotlb_map_page,
- .unmap_page = xen_swiotlb_unmap_page,
+ .map_phys = xen_swiotlb_map_phys,
+ .unmap_phys = xen_swiotlb_unmap_phys,
.dma_supported = xen_swiotlb_dma_supported,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
.alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
.max_mapping_size = swiotlb_max_mapping_size,
- .map_resource = xen_swiotlb_direct_map_resource,
};
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 296703939846..f2e8eaf684ba 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -495,7 +495,7 @@ static void xen_acpi_processor_resume_worker(struct work_struct *dummy)
pr_info("ACPI data upload failed, error = %d\n", rc);
}
-static void xen_acpi_processor_resume(void)
+static void xen_acpi_processor_resume(void *data)
{
static DECLARE_WORK(wq, xen_acpi_processor_resume_worker);
@@ -509,10 +509,14 @@ static void xen_acpi_processor_resume(void)
schedule_work(&wq);
}
-static struct syscore_ops xap_syscore_ops = {
+static const struct syscore_ops xap_syscore_ops = {
.resume = xen_acpi_processor_resume,
};
+static struct syscore xap_syscore = {
+ .ops = &xap_syscore_ops,
+};
+
static int __init xen_acpi_processor_init(void)
{
int i;
@@ -563,7 +567,7 @@ static int __init xen_acpi_processor_init(void)
if (rc)
goto err_unregister;
- register_syscore_ops(&xap_syscore_ops);
+ register_syscore(&xap_syscore);
return 0;
err_unregister:
@@ -580,7 +584,7 @@ static void __exit xen_acpi_processor_exit(void)
{
int i;
- unregister_syscore_ops(&xap_syscore_ops);
+ unregister_syscore(&xap_syscore);
bitmap_free(acpi_ids_done);
bitmap_free(acpi_id_present);
bitmap_free(acpi_id_cst_present);
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index f794014814be..15f18374020e 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -407,7 +407,7 @@ static char *join(const char *dir, const char *name)
buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir);
else
buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name);
- return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
+ return buffer ?: ERR_PTR(-ENOMEM);
}
static char **split_strings(char *strings, unsigned int len, unsigned int *num)
@@ -546,18 +546,12 @@ int xenbus_transaction_start(struct xenbus_transaction *t)
EXPORT_SYMBOL_GPL(xenbus_transaction_start);
/* End a transaction.
- * If abandon is true, transaction is discarded instead of committed.
+ * If abort is true, transaction is discarded instead of committed.
*/
-int xenbus_transaction_end(struct xenbus_transaction t, int abort)
+int xenbus_transaction_end(struct xenbus_transaction t, bool abort)
{
- char abortstr[2];
-
- if (abort)
- strcpy(abortstr, "F");
- else
- strcpy(abortstr, "T");
-
- return xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
+ return xs_error(xs_single(t, XS_TRANSACTION_END, abort ? "F" : "T",
+ NULL));
}
EXPORT_SYMBOL_GPL(xenbus_transaction_end);