summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-27 10:04:49 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-27 10:04:49 -0800
commit11c70529983e8136ea1bd5c32e4f9cd14503c644 (patch)
tree8e36f8397a68a4906fd49b72de8c7efb9b0e2a0d
parentd40b2f4c94f221bd5aab205f945e6f88d3df0929 (diff)
parent524af30c931382726b6a46ee4f392fb6e60f8a03 (diff)
Merge tag 'soc-drivers-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
Pull ARM SoC driver updates from Arnd Bergmann: "As usual, there are lots of minor driver changes across SoC platforms from NXP, Amlogic, AMD Zynq, Mediatek, Qualcomm, Apple and Samsung. These usually add support for additional chip variations in existing drivers, but also add features or bugfixes. The SCMI firmware subsystem gains a unified raw userspace interface through debugfs, which can be used for validation purposes. Newly added drivers include: - New power management drivers for StarFive JH7110, Allwinner D1 and Renesas RZ/V2M - A driver for Qualcomm battery and power supply status - A SoC device driver for identifying Nuvoton WPCM450 chips - A regulator coupler driver for Mediatek MT81xxv" * tag 'soc-drivers-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (165 commits) power: supply: Introduce Qualcomm PMIC GLINK power supply soc: apple: rtkit: Do not copy the reg state structure to the stack soc: sunxi: SUN20I_PPU should depend on PM memory: renesas-rpc-if: Remove redundant division of dummy soc: qcom: socinfo: Add IDs for IPQ5332 and its variant dt-bindings: arm: qcom,ids: Add IDs for IPQ5332 and its variant dt-bindings: power: qcom,rpmpd: add RPMH_REGULATOR_LEVEL_LOW_SVS_L1 firmware: qcom_scm: Move qcom_scm.h to include/linux/firmware/qcom/ MAINTAINERS: Update qcom CPR maintainer entry dt-bindings: firmware: document Qualcomm SM8550 SCM dt-bindings: firmware: qcom,scm: add qcom,scm-sa8775p compatible soc: qcom: socinfo: Add Soc IDs for IPQ8064 and variants dt-bindings: arm: qcom,ids: Add Soc IDs for IPQ8064 and variants soc: qcom: socinfo: Add support for new field in revision 17 soc: qcom: smd-rpm: Add IPQ9574 compatible soc: qcom: pmic_glink: remove redundant calculation of svid soc: qcom: stats: Populate all subsystem debugfs files dt-bindings: soc: qcom,rpmh-rsc: Update to allow for generic nodes soc: qcom: pmic_glink: add CONFIG_NET/CONFIG_OF dependencies soc: qcom: pmic_glink: Introduce altmode support ...
-rw-r--r--Documentation/ABI/testing/debugfs-driver-dcc127
-rw-r--r--Documentation/ABI/testing/debugfs-scmi70
-rw-r--r--Documentation/ABI/testing/debugfs-scmi-raw117
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml4
-rw-r--r--Documentation/devicetree/bindings/firmware/amlogic,meson-gxbb-sm.yaml39
-rw-r--r--Documentation/devicetree/bindings/firmware/meson/meson_sm.txt15
-rw-r--r--Documentation/devicetree/bindings/firmware/qcom,scm.yaml68
-rw-r--r--Documentation/devicetree/bindings/input/mediatek,pmic-keys.yaml1
-rw-r--r--Documentation/devicetree/bindings/leds/leds-mt6323.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/mediatek,mt6357.yaml111
-rw-r--r--Documentation/devicetree/bindings/mfd/mt6397.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom-rpm.txt283
-rw-r--r--Documentation/devicetree/bindings/phy/samsung,dp-video-phy.yaml5
-rw-r--r--Documentation/devicetree/bindings/phy/samsung,mipi-video-phy.yaml11
-rw-r--r--Documentation/devicetree/bindings/power/allwinner,sun20i-d1-ppu.yaml54
-rw-r--r--Documentation/devicetree/bindings/power/amlogic,meson-gx-pwrc.txt4
-rw-r--r--Documentation/devicetree/bindings/power/mediatek,power-controller.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/qcom,rpmpd.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/starfive,jh7110-pmu.yaml45
-rw-r--r--Documentation/devicetree/bindings/reserved-memory/qcom,rmtfs-mem.yaml6
-rw-r--r--Documentation/devicetree/bindings/riscv/sifive,ccache0.yaml9
-rw-r--r--Documentation/devicetree/bindings/soc/mediatek/mediatek,mutex.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/mediatek/mediatek,pwrap.yaml147
-rw-r--r--Documentation/devicetree/bindings/soc/mediatek/pwrap.txt75
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,dcc.yaml44
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,msm8976-ramp-controller.yaml36
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml95
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,rpm.yaml101
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,rpmh-rsc.yaml3
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml1
-rw-r--r--MAINTAINERS16
-rw-r--r--arch/arm/mach-qcom/platsmp.c2
-rw-r--r--drivers/bus/imx-weim.c4
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c2
-rw-r--r--drivers/firmware/arm_scmi/Kconfig32
-rw-r--r--drivers/firmware/arm_scmi/Makefile9
-rw-r--r--drivers/firmware/arm_scmi/bus.c395
-rw-r--r--drivers/firmware/arm_scmi/common.h100
-rw-r--r--drivers/firmware/arm_scmi/driver.c1223
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c6
-rw-r--r--drivers/firmware/arm_scmi/optee.c6
-rw-r--r--drivers/firmware/arm_scmi/protocols.h7
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c1443
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.h31
-rw-r--r--drivers/firmware/arm_scmi/smc.c6
-rw-r--r--drivers/firmware/arm_scmi/virtio.c4
-rw-r--r--drivers/firmware/meson/meson_sm.c2
-rw-r--r--drivers/firmware/qcom_scm-legacy.c2
-rw-r--r--drivers/firmware/qcom_scm-smc.c88
-rw-r--r--drivers/firmware/qcom_scm.c92
-rw-r--r--drivers/firmware/qcom_scm.h8
-rw-r--r--drivers/firmware/xilinx/zynqmp.c27
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c2
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c17
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c2
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c2
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c2
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c2
-rw-r--r--drivers/memory/renesas-rpc-if.c155
-rw-r--r--drivers/memory/ti-emif-pm.c7
-rw-r--r--drivers/misc/fastrpc.c2
-rw-r--r--drivers/mmc/host/sdhci-msm.c2
-rw-r--r--drivers/mtd/hyperbus/rpc-if.c18
-rw-r--r--drivers/net/ipa/ipa_main.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c2
-rw-r--r--drivers/phy/samsung/phy-exynos-dp-video.c7
-rw-r--r--drivers/phy/samsung/phy-exynos-mipi-video.c7
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c2
-rw-r--r--drivers/power/supply/Kconfig9
-rw-r--r--drivers/power/supply/Makefile1
-rw-r--r--drivers/power/supply/qcom_battmgr.c1411
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c2
-rw-r--r--drivers/remoteproc/qcom_wcnss.c2
-rw-r--r--drivers/soc/Kconfig2
-rw-r--r--drivers/soc/Makefile4
-rw-r--r--drivers/soc/amlogic/meson-ee-pwrc.c17
-rw-r--r--drivers/soc/apple/apple-pmgr-pwrstate.c11
-rw-r--r--drivers/soc/apple/rtkit-crashlog.c93
-rw-r--r--drivers/soc/apple/rtkit.c36
-rw-r--r--drivers/soc/bcm/bcm2835-power.c6
-rw-r--r--drivers/soc/imx/Kconfig11
-rw-r--r--drivers/soc/imx/Makefile6
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c27
-rw-r--r--drivers/soc/imx/imx8mp-blk-ctrl.c108
-rw-r--r--drivers/soc/imx/imx93-pd.c1
-rw-r--r--drivers/soc/imx/imx93-src.c1
-rw-r--r--drivers/soc/mediatek/Kconfig7
-rw-r--r--drivers/soc/mediatek/Makefile1
-rw-r--r--drivers/soc/mediatek/mt8186-pm-domains.h4
-rw-r--r--drivers/soc/mediatek/mt8188-mmsys.h149
-rw-r--r--drivers/soc/mediatek/mt8188-pm-domains.h623
-rw-r--r--drivers/soc/mediatek/mt8195-mmsys.h146
-rw-r--r--drivers/soc/mediatek/mtk-devapc.c11
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c200
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.h2
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c113
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.c13
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.h5
-rw-r--r--drivers/soc/mediatek/mtk-regulator-coupler.c159
-rw-r--r--drivers/soc/mediatek/mtk-svs.c155
-rw-r--r--drivers/soc/nuvoton/Kconfig11
-rw-r--r--drivers/soc/nuvoton/Makefile2
-rw-r--r--drivers/soc/nuvoton/wpcm450-soc.c109
-rw-r--r--drivers/soc/qcom/Kconfig27
-rw-r--r--drivers/soc/qcom/Makefile3
-rw-r--r--drivers/soc/qcom/mdt_loader.c2
-rw-r--r--drivers/soc/qcom/ocmem.c2
-rw-r--r--drivers/soc/qcom/pmic_glink.c336
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c478
-rw-r--r--drivers/soc/qcom/qcom_stats.c10
-rw-r--r--drivers/soc/qcom/ramp_controller.c343
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c31
-rw-r--r--drivers/soc/qcom/rpmhpd.c34
-rw-r--r--drivers/soc/qcom/rpmpd.c18
-rw-r--r--drivers/soc/qcom/smd-rpm.c1
-rw-r--r--drivers/soc/qcom/socinfo.c112
-rw-r--r--drivers/soc/renesas/Kconfig4
-rw-r--r--drivers/soc/renesas/Makefile1
-rw-r--r--drivers/soc/renesas/pwc-rzv2m.c141
-rw-r--r--drivers/soc/renesas/r8a779g0-sysc.c1
-rw-r--r--drivers/soc/sifive/Kconfig2
-rw-r--r--drivers/soc/starfive/Kconfig12
-rw-r--r--drivers/soc/starfive/Makefile3
-rw-r--r--drivers/soc/starfive/jh71xx_pmu.c383
-rw-r--r--drivers/soc/sunxi/Kconfig9
-rw-r--r--drivers/soc/sunxi/Makefile1
-rw-r--r--drivers/soc/sunxi/sun20i-ppu.c207
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c3
-rw-r--r--drivers/soc/xilinx/xlnx_event_manager.c4
-rw-r--r--drivers/soc/xilinx/zynqmp_pm_domains.c2
-rw-r--r--drivers/spi/spi-rpc-if.c14
-rw-r--r--drivers/thermal/qcom/lmh.c2
-rw-r--r--drivers/ufs/host/ufs-qcom-ice.c2
-rw-r--r--fs/debugfs/file.c1
-rw-r--r--include/dt-bindings/arm/qcom,ids.h83
-rw-r--r--include/dt-bindings/firmware/qcom,scm.h16
-rw-r--r--include/dt-bindings/power/allwinner,sun20i-d1-ppu.h10
-rw-r--r--include/dt-bindings/power/mediatek,mt8188-power.h44
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h30
-rw-r--r--include/dt-bindings/power/r8a779g0-sysc.h1
-rw-r--r--include/dt-bindings/power/starfive,jh7110-pmu.h17
-rw-r--r--include/dt-bindings/reset/mt8195-resets.h45
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h (renamed from include/linux/qcom_scm.h)6
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h4
-rw-r--r--include/linux/scmi_protocol.h5
-rw-r--r--include/linux/soc/apple/rtkit.h26
-rw-r--r--include/linux/soc/mediatek/infracfg.h121
-rw-r--r--include/linux/soc/mediatek/mtk-mmsys.h25
-rw-r--r--include/linux/soc/qcom/apr.h2
-rw-r--r--include/linux/soc/qcom/pmic_glink.h32
-rw-r--r--include/memory/renesas-rpc-if.h34
-rw-r--r--include/trace/events/scmi.h18
-rw-r--r--sound/soc/qcom/qdsp6/q6core.c4
156 files changed, 10055 insertions, 1337 deletions
diff --git a/Documentation/ABI/testing/debugfs-driver-dcc b/Documentation/ABI/testing/debugfs-driver-dcc
new file mode 100644
index 000000000000..27ed5919d21b
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-driver-dcc
@@ -0,0 +1,127 @@
+What: /sys/kernel/debug/dcc/.../ready
+Date: December 2022
+Contact: Souradeep Chowdhury <quic_schowdhu@quicinc.com>
+Description:
+ This file is used to check the status of the dcc
+ hardware if it's ready to receive user configurations.
+ A 'Y' here indicates dcc is ready.
+
+What: /sys/kernel/debug/dcc/.../trigger
+Date: December 2022
+Contact: Souradeep Chowdhury <quic_schowdhu@quicinc.com>
+Description:
+ This is the debugfs interface for manual software
+ triggers. The trigger can be invoked by writing '1'
+ to the file.
+
+What: /sys/kernel/debug/dcc/.../config_reset
+Date: December 2022
+Contact: Souradeep Chowdhury <quic_schowdhu@quicinc.com>
+Description:
+ This file is used to reset the configuration of
+ a dcc driver to the default configuration. When '1'
+ is written to the file, all the previous addresses
+ stored in the driver gets removed and users need to
+ reconfigure addresses again.
+
+What: /sys/kernel/debug/dcc/.../[list-number]/config
+Date: December 2022
+Contact: Souradeep Chowdhury <quic_schowdhu@quicinc.com>
+Description:
+ This stores the addresses of the registers which
+ can be read in case of a hardware crash or manual
+ software triggers. The input addresses type
+ can be one of following dcc instructions: read,
+ write, read-write, and loop type. The lists need to
+ be configured sequentially and not in a overlapping
+ manner; e.g. users can jump to list x only after
+ list y is configured and enabled. The input format for
+ each type is as follows:
+
+ i) Read instruction
+
+ ::
+
+ echo R <addr> <n> <bus> >/sys/kernel/debug/dcc/../[list-number]/config
+
+ where:
+
+ <addr>
+ The address to be read.
+
+ <n>
+ The addresses word count, starting from address <1>.
+ Each word is 32 bits (4 bytes). If omitted, defaulted
+ to 1.
+
+ <bus type>
+ The bus type, which can be either 'apb' or 'ahb'.
+ The default is 'ahb' if leaved out.
+
+ ii) Write instruction
+
+ ::
+
+ echo W <addr> <n> <bus type> > /sys/kernel/debug/dcc/../[list-number]/config
+
+ where:
+
+ <addr>
+ The address to be written.
+
+ <n>
+ The value to be written at <addr>.
+
+ <bus type>
+ The bus type, which can be either 'apb' or 'ahb'.
+
+ iii) Read-write instruction
+
+ ::
+
+ echo RW <addr> <n> <mask> > /sys/kernel/debug/dcc/../[list-number]/config
+
+ where:
+
+ <addr>
+ The address to be read and written.
+
+ <n>
+ The value to be written at <addr>.
+
+ <mask>
+ The value mask.
+
+ iv) Loop instruction
+
+ ::
+
+ echo L <loop count> <address count> <address>... > /sys/kernel/debug/dcc/../[list-number]/config
+
+ where:
+
+ <loop count>
+ Number of iterations
+
+ <address count>
+ total number of addresses to be written
+
+ <address>
+ Space-separated list of addresses.
+
+What: /sys/kernel/debug/dcc/.../[list-number]/enable
+Date: December 2022
+Contact: Souradeep Chowdhury <quic_schowdhu@quicinc.com>
+Description:
+ This debugfs interface is used for enabling the
+ the dcc hardware. A file named "enable" is in the
+ directory list number where users can enable/disable
+ the specific list by writing boolean (1 or 0) to the
+ file.
+
+ On enabling the dcc, all the addresses specified
+ by the user for the corresponding list is written
+ into dcc sram which is read by the dcc hardware
+ on manual or crash induced triggers. Lists must
+ be configured and enabled sequentially, e.g. list
+ 2 can only be enabled when list 1 have so.
diff --git a/Documentation/ABI/testing/debugfs-scmi b/Documentation/ABI/testing/debugfs-scmi
new file mode 100644
index 000000000000..ee7179ab2edf
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-scmi
@@ -0,0 +1,70 @@
+What: /sys/kernel/debug/scmi/<n>/instance_name
+Date: March 2023
+KernelVersion: 6.3
+Contact: cristian.marussi@arm.com
+Description: The name of the underlying SCMI instance <n> described by
+ all the debugfs accessors rooted at /sys/kernel/debug/scmi/<n>,
+ expressed as the full name of the top DT SCMI node under which
+ this SCMI instance is rooted.
+Users: Debugging, any userspace test suite
+
+What: /sys/kernel/debug/scmi/<n>/atomic_threshold_us
+Date: March 2023
+KernelVersion: 6.3
+Contact: cristian.marussi@arm.com
+Description: An optional time value, expressed in microseconds, representing,
+ on this SCMI instance <n>, the threshold above which any SCMI
+ command, advertised to have an higher-than-threshold execution
+ latency, should not be considered for atomic mode of operation,
+ even if requested.
+Users: Debugging, any userspace test suite
+
+What: /sys/kernel/debug/scmi/<n>/transport/type
+Date: March 2023
+KernelVersion: 6.3
+Contact: cristian.marussi@arm.com
+Description: A string representing the type of transport configured for this
+ SCMI instance <n>.
+Users: Debugging, any userspace test suite
+
+What: /sys/kernel/debug/scmi/<n>/transport/is_atomic
+Date: March 2023
+KernelVersion: 6.3
+Contact: cristian.marussi@arm.com
+Description: A boolean stating if the transport configured on the underlying
+ SCMI instance <n> is capable of atomic mode of operation.
+Users: Debugging, any userspace test suite
+
+What: /sys/kernel/debug/scmi/<n>/transport/max_rx_timeout_ms
+Date: March 2023
+KernelVersion: 6.3
+Contact: cristian.marussi@arm.com
+Description: Timeout in milliseconds allowed for SCMI synchronous replies
+ for the currently configured SCMI transport for instance <n>.
+Users: Debugging, any userspace test suite
+
+What: /sys/kernel/debug/scmi/<n>/transport/max_msg_size
+Date: March 2023
+KernelVersion: 6.3
+Contact: cristian.marussi@arm.com
+Description: Max message size of allowed SCMI messages for the currently
+ configured SCMI transport for instance <n>.
+Users: Debugging, any userspace test suite
+
+What: /sys/kernel/debug/scmi/<n>/transport/tx_max_msg
+Date: March 2023
+KernelVersion: 6.3
+Contact: cristian.marussi@arm.com
+Description: Max number of concurrently allowed in-flight SCMI messages for
+ the currently configured SCMI transport for instance <n> on the
+ TX channels.
+Users: Debugging, any userspace test suite
+
+What: /sys/kernel/debug/scmi/<n>/transport/rx_max_msg
+Date: March 2023
+KernelVersion: 6.3
+Contact: cristian.marussi@arm.com
+Description: Max number of concurrently allowed in-flight SCMI messages for
+ the currently configured SCMI transport for instance <n> on the
+ RX channels.
+Users: Debugging, any userspace test suite
diff --git a/Documentation/ABI/testing/debugfs-scmi-raw b/Documentation/ABI/testing/debugfs-scmi-raw
new file mode 100644
index 000000000000..97678cc9535c
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-scmi-raw
@@ -0,0 +1,117 @@
+What: /sys/kernel/debug/scmi/<n>/raw/message
+Date: March 2023
+KernelVersion: 6.3
+Contact: cristian.marussi@arm.com
+Description: SCMI Raw synchronous message injection/snooping facility; write
+ a complete SCMI synchronous command message (header included)
+ in little-endian binary format to have it sent to the configured
+ backend SCMI server for instance <n>.
+ Any subsequently received response can be read from this same
+ entry if it arrived within the configured timeout.
+ Each write to the entry causes one command request to be built
+ and sent while the replies are read back one message at time
+ (receiving an EOF at each message boundary).
+Users: Debugging, any userspace test suite
+
+What: /sys/kernel/debug/scmi/<n>/raw/message_async
+Date: March 2023
+KernelVersion: 6.3
+Contact: cristian.marussi@arm.com
+Description: SCMI Raw asynchronous message injection/snooping facility; write
+ a complete SCMI asynchronous command message (header included)
+ in little-endian binary format to have it sent to the configured
+ backend SCMI server for instance <n>.
+ Any subsequently received response can be read from this same
+ entry if it arrived within the configured timeout.
+ Any additional delayed response received afterwards can be read
+ from this same entry too if it arrived within the configured
+ timeout.
+ Each write to the entry causes one command request to be built
+ and sent while the replies are read back one message at time
+ (receiving an EOF at each message boundary).
+Users: Debugging, any userspace test suite
+
+What: /sys/kernel/debug/scmi/<n>/raw/errors
+Date: March 2023
+KernelVersion: 6.3
+Contact: cristian.marussi@arm.com
+Description: SCMI Raw message errors facility; any kind of timed-out or
+ generally unexpectedly received SCMI message, for instance <n>,
+ can be read from this entry.
+ Each read gives back one message at time (receiving an EOF at
+ each message boundary).
+Users: Debugging, any userspace test suite
+
+What: /sys/kernel/debug/scmi/<n>/raw/notification
+Date: March 2023
+KernelVersion: 6.3
+Contact: cristian.marussi@arm.com
+Description: SCMI Raw notification snooping facility; any notification
+ emitted by the backend SCMI server, for instance <n>, can be
+ read from this entry.
+ Each read gives back one message at time (receiving an EOF at
+ each message boundary).
+Users: Debugging, any userspace test suite
+
+What: /sys/kernel/debug/scmi/<n>/raw/reset
+Date: March 2023
+KernelVersion: 6.3
+Contact: cristian.marussi@arm.com
+Description: SCMI Raw stack reset facility; writing a value to this entry
+ causes the internal queues of any kind of received message,
+ still pending to be read out for instance <n>, to be immediately
+ flushed.
+ Can be used to reset and clean the SCMI Raw stack between to
+ different test-run.
+Users: Debugging, any userspace test suite
+
+What: /sys/kernel/debug/scmi/<n>/raw/channels/<m>/message
+Date: March 2023
+KernelVersion: 6.3
+Contact: cristian.marussi@arm.com
+Description: SCMI Raw synchronous message injection/snooping facility; write
+ a complete SCMI synchronous command message (header included)
+ in little-endian binary format to have it sent to the configured
+ backend SCMI server for instance <n> through the <m> transport
+ channel.
+ Any subsequently received response can be read from this same
+ entry if it arrived on channel <m> within the configured
+ timeout.
+ Each write to the entry causes one command request to be built
+ and sent while the replies are read back one message at time
+ (receiving an EOF at each message boundary).
+ Channel identifier <m> matches the SCMI protocol number which
+ has been associated with this transport channel in the DT
+ description, with base protocol number 0x10 being the default
+ channel for this instance.
+ Note that these per-channel entries rooted at <..>/channels
+ exist only if the transport is configured to have more than
+ one default channel.
+Users: Debugging, any userspace test suite
+
+What: /sys/kernel/debug/scmi/<n>/raw/channels/<m>/message_async
+Date: March 2023
+KernelVersion: 6.3
+Contact: cristian.marussi@arm.com
+Description: SCMI Raw asynchronous message injection/snooping facility; write
+ a complete SCMI asynchronous command message (header included)
+ in little-endian binary format to have it sent to the configured
+ backend SCMI server for instance <n> through the <m> transport
+ channel.
+ Any subsequently received response can be read from this same
+ entry if it arrived on channel <m> within the configured
+ timeout.
+ Any additional delayed response received afterwards can be read
+ from this same entry too if it arrived within the configured
+ timeout.
+ Each write to the entry causes one command request to be built
+ and sent while the replies are read back one message at time
+ (receiving an EOF at each message boundary).
+ Channel identifier <m> matches the SCMI protocol number which
+ has been associated with this transport channel in the DT
+ description, with base protocol number 0x10 being the default
+ channel for this instance.
+ Note that these per-channel entries rooted at <..>/channels
+ exist only if the transport is configured to have more than
+ one default channel.
+Users: Debugging, any userspace test suite
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml
index 0711f1834fbd..d1410345ef18 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml
@@ -31,7 +31,11 @@ properties:
- mediatek,mt8173-mmsys
- mediatek,mt8183-mmsys
- mediatek,mt8186-mmsys
+ - mediatek,mt8188-vdosys0
- mediatek,mt8192-mmsys
+ - mediatek,mt8195-vdosys1
+ - mediatek,mt8195-vppsys0
+ - mediatek,mt8195-vppsys1
- mediatek,mt8365-mmsys
- const: syscon
diff --git a/Documentation/devicetree/bindings/firmware/amlogic,meson-gxbb-sm.yaml b/Documentation/devicetree/bindings/firmware/amlogic,meson-gxbb-sm.yaml
new file mode 100644
index 000000000000..8f50e698760e
--- /dev/null
+++ b/Documentation/devicetree/bindings/firmware/amlogic,meson-gxbb-sm.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/firmware/amlogic,meson-gxbb-sm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Amlogic Secure Monitor (SM)
+
+description:
+ In the Amlogic SoCs the Secure Monitor code is used to provide access to the
+ NVMEM, enable JTAG, set USB boot, etc...
+
+maintainers:
+ - Neil Armstrong <neil.armstrong@linaro.org>
+
+properties:
+ compatible:
+ oneOf:
+ - const: amlogic,meson-gxbb-sm
+ - items:
+ - const: amlogic,meson-gx-sm
+ - const: amlogic,meson-gxbb-sm
+
+ power-controller:
+ type: object
+ $ref: /schemas/power/amlogic,meson-sec-pwrc.yaml#
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ firmware {
+ secure-monitor {
+ compatible = "amlogic,meson-gxbb-sm";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/firmware/meson/meson_sm.txt b/Documentation/devicetree/bindings/firmware/meson/meson_sm.txt
deleted file mode 100644
index c248cd44f727..000000000000
--- a/Documentation/devicetree/bindings/firmware/meson/meson_sm.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-* Amlogic Secure Monitor
-
-In the Amlogic SoCs the Secure Monitor code is used to provide access to the
-NVMEM, enable JTAG, set USB boot, etc...
-
-Required properties for the secure monitor node:
-- compatible: Should be "amlogic,meson-gxbb-sm"
-
-Example:
-
- firmware {
- sm: secure-monitor {
- compatible = "amlogic,meson-gxbb-sm";
- };
- };
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.yaml b/Documentation/devicetree/bindings/firmware/qcom,scm.yaml
index 25688571ee7c..a66e99812b1f 100644
--- a/Documentation/devicetree/bindings/firmware/qcom,scm.yaml
+++ b/Documentation/devicetree/bindings/firmware/qcom,scm.yaml
@@ -38,6 +38,8 @@ properties:
- qcom,scm-msm8994
- qcom,scm-msm8996
- qcom,scm-msm8998
+ - qcom,scm-qdu1000
+ - qcom,scm-sa8775p
- qcom,scm-sc7180
- qcom,scm-sc7280
- qcom,scm-sc8280xp
@@ -53,6 +55,7 @@ properties:
- qcom,scm-sm8250
- qcom,scm-sm8350
- qcom,scm-sm8450
+ - qcom,scm-sm8550
- qcom,scm-qcs404
- const: qcom,scm
@@ -73,6 +76,12 @@ properties:
'#reset-cells':
const: 1
+ interrupts:
+ description:
+ The wait-queue interrupt that firmware raises as part of handshake
+ protocol to handle sleeping SCM calls.
+ maxItems: 1
+
qcom,dload-mode:
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
@@ -82,6 +91,32 @@ properties:
description: TCSR hardware block
allOf:
+ # Clocks
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,scm-apq8064
+ - qcom,scm-apq8084
+ - qcom,scm-mdm9607
+ - qcom,scm-msm8226
+ - qcom,scm-msm8660
+ - qcom,scm-msm8916
+ - qcom,scm-msm8953
+ - qcom,scm-msm8960
+ - qcom,scm-msm8974
+ - qcom,scm-msm8976
+ - qcom,scm-sm6375
+ then:
+ required:
+ - clocks
+ - clock-names
+ else:
+ properties:
+ clock-names: false
+ clocks: false
+
- if:
properties:
compatible:
@@ -100,10 +135,6 @@ allOf:
clocks:
maxItems: 1
- required:
- - clocks
- - clock-names
-
- if:
properties:
compatible:
@@ -111,6 +142,7 @@ allOf:
enum:
- qcom,scm-apq8084
- qcom,scm-mdm9607
+ - qcom,scm-msm8226
- qcom,scm-msm8916
- qcom,scm-msm8953
- qcom,scm-msm8974
@@ -127,9 +159,31 @@ allOf:
minItems: 3
maxItems: 3
- required:
- - clocks
- - clock-names
+ # Interconnects
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,scm-sm8450
+ - qcom,scm-sm8550
+ then:
+ properties:
+ interconnects: false
+
+ # Interrupts
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,scm-sm8450
+ - qcom,scm-sm8550
+ then:
+ properties:
+ interrupts: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/input/mediatek,pmic-keys.yaml b/Documentation/devicetree/bindings/input/mediatek,pmic-keys.yaml
index 2f72ec418415..037c3ae9f1c3 100644
--- a/Documentation/devicetree/bindings/input/mediatek,pmic-keys.yaml
+++ b/Documentation/devicetree/bindings/input/mediatek,pmic-keys.yaml
@@ -26,6 +26,7 @@ properties:
enum:
- mediatek,mt6323-keys
- mediatek,mt6331-keys
+ - mediatek,mt6357-keys
- mediatek,mt6358-keys
- mediatek,mt6397-keys
diff --git a/Documentation/devicetree/bindings/leds/leds-mt6323.txt b/Documentation/devicetree/bindings/leds/leds-mt6323.txt
index 45bf9f7d85f3..73353692efa1 100644
--- a/Documentation/devicetree/bindings/leds/leds-mt6323.txt
+++ b/Documentation/devicetree/bindings/leds/leds-mt6323.txt
@@ -9,7 +9,7 @@ MT6323 PMIC hardware.
For MT6323 MFD bindings see:
Documentation/devicetree/bindings/mfd/mt6397.txt
For MediaTek PMIC wrapper bindings see:
-Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
+Documentation/devicetree/bindings/soc/mediatek/mediatek,pwrap.yaml
Required properties:
- compatible : Must be "mediatek,mt6323-led"
diff --git a/Documentation/devicetree/bindings/mfd/mediatek,mt6357.yaml b/Documentation/devicetree/bindings/mfd/mediatek,mt6357.yaml
new file mode 100644
index 000000000000..837a77013d57
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/mediatek,mt6357.yaml
@@ -0,0 +1,111 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/mediatek,mt6357.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek MT6357 PMIC
+
+maintainers:
+ - Flora Fu <flora.fu@mediatek.com>
+ - Alexandre Mergnat <amergnat@baylibre.com>
+
+description: |
+ MT6357 is a power management system chip containing 5 buck
+ converters and 29 LDOs. Supported features are audio codec,
+ USB battery charging, fuel gauge, RTC
+
+ This is a multifunction device with the following sub modules:
+ - Regulator
+ - RTC
+ - Keys
+
+ It is interfaced to host controller using SPI interface by a proprietary hardware
+ called PMIC wrapper or pwrap. This MFD is a child device of pwrap.
+ See the following for pwrap node definitions:
+ Documentation/devicetree/bindings/soc/mediatek/mediatek,pwrap.yaml
+
+properties:
+ compatible:
+ const: mediatek,mt6357
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+
+ regulators:
+ type: object
+ $ref: /schemas/regulator/mediatek,mt6357-regulator.yaml
+ description:
+ List of MT6357 BUCKs and LDOs regulators.
+
+ rtc:
+ type: object
+ $ref: /schemas/rtc/rtc.yaml#
+ description:
+ MT6357 Real Time Clock.
+ properties:
+ compatible:
+ const: mediatek,mt6357-rtc
+ start-year: true
+ required:
+ - compatible
+
+ keys:
+ type: object
+ $ref: /schemas/input/mediatek,pmic-keys.yaml
+ description:
+ MT6357 power and home keys.
+
+required:
+ - compatible
+ - regulators
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ pwrap {
+ pmic {
+ compatible = "mediatek,mt6357";
+
+ interrupt-parent = <&pio>;
+ interrupts = <145 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ regulators {
+ mt6357_vproc_reg: buck-vproc {
+ regulator-name = "vproc";
+ regulator-min-microvolt = <518750>;
+ regulator-max-microvolt = <1312500>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <220>;
+ regulator-always-on;
+ };
+
+ // ...
+
+ mt6357_vusb33_reg: ldo-vusb33 {
+ regulator-name = "vusb33";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3100000>;
+ regulator-enable-ramp-delay = <264>;
+ };
+ };
+
+ rtc {
+ compatible = "mediatek,mt6357-rtc";
+ };
+
+ keys {
+ compatible = "mediatek,mt6357-keys";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/mt6397.txt b/Documentation/devicetree/bindings/mfd/mt6397.txt
index 518986c44880..294693a8906c 100644
--- a/Documentation/devicetree/bindings/mfd/mt6397.txt
+++ b/Documentation/devicetree/bindings/mfd/mt6397.txt
@@ -13,7 +13,7 @@ MT6397/MT6323 is a multifunction device with the following sub modules:
It is interfaced to host controller using SPI interface by a proprietary hardware
called PMIC wrapper or pwrap. MT6397/MT6323 MFD is a child device of pwrap.
See the following for pwarp node definitions:
-../soc/mediatek/pwrap.txt
+../soc/mediatek/mediatek,pwrap.yaml
This document describes the binding for MFD device and its sub module.
diff --git a/Documentation/devicetree/bindings/mfd/qcom-rpm.txt b/Documentation/devicetree/bindings/mfd/qcom-rpm.txt
deleted file mode 100644
index b823b8625243..000000000000
--- a/Documentation/devicetree/bindings/mfd/qcom-rpm.txt
+++ /dev/null
@@ -1,283 +0,0 @@
-Qualcomm Resource Power Manager (RPM)
-
-This driver is used to interface with the Resource Power Manager (RPM) found in
-various Qualcomm platforms. The RPM allows each component in the system to vote
-for state of the system resources, such as clocks, regulators and bus
-frequencies.
-
-- compatible:
- Usage: required
- Value type: <string>
- Definition: must be one of:
- "qcom,rpm-apq8064"
- "qcom,rpm-msm8660"
- "qcom,rpm-msm8960"
- "qcom,rpm-ipq8064"
- "qcom,rpm-mdm9615"
-
-- reg:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: base address and size of the RPM's message ram
-
-- interrupts:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: three entries specifying the RPM's:
- 1. acknowledgement interrupt
- 2. error interrupt
- 3. wakeup interrupt
-
-- interrupt-names:
- Usage: required
- Value type: <string-array>
- Definition: must be the three strings "ack", "err" and "wakeup", in order
-
-- qcom,ipc:
- Usage: required
- Value type: <prop-encoded-array>
-
- Definition: three entries specifying the outgoing ipc bit used for
- signaling the RPM:
- - phandle to a syscon node representing the apcs registers
- - u32 representing offset to the register within the syscon
- - u32 representing the ipc bit within the register
-
-
-= SUBNODES
-
-The RPM exposes resources to its subnodes. The below bindings specify the set
-of valid subnodes that can operate on these resources.
-
-== Regulators
-
-Regulator nodes are identified by their compatible:
-
-- compatible:
- Usage: required
- Value type: <string>
- Definition: must be one of:
- "qcom,rpm-pm8058-regulators"
- "qcom,rpm-pm8901-regulators"
- "qcom,rpm-pm8921-regulators"
- "qcom,rpm-pm8018-regulators"
- "qcom,rpm-smb208-regulators"
-
-- vdd_l0_l1_lvs-supply:
-- vdd_l2_l11_l12-supply:
-- vdd_l3_l4_l5-supply:
-- vdd_l6_l7-supply:
-- vdd_l8-supply:
-- vdd_l9-supply:
-- vdd_l10-supply:
-- vdd_l13_l16-supply:
-- vdd_l14_l15-supply:
-- vdd_l17_l18-supply:
-- vdd_l19_l20-supply:
-- vdd_l21-supply:
-- vdd_l22-supply:
-- vdd_l23_l24_l25-supply:
-- vdd_ncp-supply:
-- vdd_s0-supply:
-- vdd_s1-supply:
-- vdd_s2-supply:
-- vdd_s3-supply:
-- vdd_s4-supply:
- Usage: optional (pm8058 only)
- Value type: <phandle>
- Definition: reference to regulator supplying the input pin, as
- described in the data sheet
-
-- lvs0_in-supply:
-- lvs1_in-supply:
-- lvs2_in-supply:
-- lvs3_in-supply:
-- mvs_in-supply:
-- vdd_l0-supply:
-- vdd_l1-supply:
-- vdd_l2-supply:
-- vdd_l3-supply:
-- vdd_l4-supply:
-- vdd_l5-supply:
-- vdd_l6-supply:
-- vdd_s0-supply:
-- vdd_s1-supply:
-- vdd_s2-supply:
-- vdd_s3-supply:
-- vdd_s4-supply:
- Usage: optional (pm8901 only)
- Value type: <phandle>
- Definition: reference to regulator supplying the input pin, as
- described in the data sheet
-
-- vdd_l1_l2_l12_l18-supply:
-- vdd_l3_l15_l17-supply:
-- vdd_l4_l14-supply:
-- vdd_l5_l8_l16-supply:
-- vdd_l6_l7-supply:
-- vdd_l9_l11-supply:
-- vdd_l10_l22-supply:
-- vdd_l21_l23_l29-supply:
-- vdd_l24-supply:
-- vdd_l25-supply:
-- vdd_l26-supply:
-- vdd_l27-supply:
-- vdd_l28-supply:
-- vdd_ncp-supply:
-- vdd_s1-supply:
-- vdd_s2-supply:
-- vdd_s4-supply:
-- vdd_s5-supply:
-- vdd_s6-supply:
-- vdd_s7-supply:
-- vdd_s8-supply:
-- vin_5vs-supply:
-- vin_lvs1_3_6-supply:
-- vin_lvs2-supply:
-- vin_lvs4_5_7-supply:
- Usage: optional (pm8921 only)
- Value type: <phandle>
- Definition: reference to regulator supplying the input pin, as
- described in the data sheet
-
-- vin_lvs1-supply:
-- vdd_l7-supply:
-- vdd_l8-supply:
-- vdd_l9_l10_l11_l12-supply:
- Usage: optional (pm8018 only)
- Value type: <phandle>
- Definition: reference to regulator supplying the input pin, as
- described in the data sheet
-
-The regulator node houses sub-nodes for each regulator within the device. Each
-sub-node is identified using the node's name, with valid values listed for each
-of the pmics below.
-
-pm8058:
- l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15,
- l16, l17, l18, l19, l20, l21, l22, l23, l24, l25, s0, s1, s2, s3, s4,
- lvs0, lvs1, ncp
-
-pm8901:
- l0, l1, l2, l3, l4, l5, l6, s0, s1, s2, s3, s4, lvs0, lvs1, lvs2, lvs3,
- mvs
-
-pm8921:
- s1, s2, s3, s4, s7, s8, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11,
- l12, l14, l15, l16, l17, l18, l21, l22, l23, l24, l25, l26, l27, l28,
- l29, lvs1, lvs2, lvs3, lvs4, lvs5, lvs6, lvs7, usb-switch, hdmi-switch,
- ncp
-
-pm8018:
- s1, s2, s3, s4, s5, , l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11,
- l12, l14, lvs1
-
-smb208:
- s1a, s1b, s2a, s2b
-
-The content of each sub-node is defined by the standard binding for regulators -
-see regulator.txt - with additional custom properties described below:
-
-=== Switch-mode Power Supply regulator custom properties
-
-- bias-pull-down:
- Usage: optional
- Value type: <empty>
- Definition: enable pull down of the regulator when inactive
-
-- qcom,switch-mode-frequency:
- Usage: required
- Value type: <u32>
- Definition: Frequency (Hz) of the switch-mode power supply;
- must be one of:
- 19200000, 9600000, 6400000, 4800000, 3840000, 3200000,
- 2740000, 2400000, 2130000, 1920000, 1750000, 1600000,
- 1480000, 1370000, 1280000, 1200000
-
-- qcom,force-mode:
- Usage: optional (default if no other qcom,force-mode is specified)
- Value type: <u32>
- Definition: indicates that the regulator should be forced to a
- particular mode, valid values are:
- QCOM_RPM_FORCE_MODE_NONE - do not force any mode
- QCOM_RPM_FORCE_MODE_LPM - force into low power mode
- QCOM_RPM_FORCE_MODE_HPM - force into high power mode
- QCOM_RPM_FORCE_MODE_AUTO - allow regulator to automatically
- select its own mode based on
- realtime current draw, only for:
- pm8921 smps and ftsmps
-
-- qcom,power-mode-hysteretic:
- Usage: optional
- Value type: <empty>
- Definition: select that the power supply should operate in hysteretic
- mode, instead of the default pwm mode
-
-=== Low-dropout regulator custom properties
-
-- bias-pull-down:
- Usage: optional
- Value type: <empty>
- Definition: enable pull down of the regulator when inactive
-
-- qcom,force-mode:
- Usage: optional
- Value type: <u32>
- Definition: indicates that the regulator should not be forced to any
- particular mode, valid values are:
- QCOM_RPM_FORCE_MODE_NONE - do not force any mode
- QCOM_RPM_FORCE_MODE_LPM - force into low power mode
- QCOM_RPM_FORCE_MODE_HPM - force into high power mode
- QCOM_RPM_FORCE_MODE_BYPASS - set regulator to use bypass
- mode, i.e. to act as a switch
- and not regulate, only for:
- pm8921 pldo, nldo and nldo1200
-
-=== Negative Charge Pump custom properties
-
-- qcom,switch-mode-frequency:
- Usage: required
- Value type: <u32>
- Definition: Frequency (Hz) of the switch mode power supply;
- must be one of:
- 19200000, 9600000, 6400000, 4800000, 3840000, 3200000,
- 2740000, 2400000, 2130000, 1920000, 1750000, 1600000,
- 1480000, 1370000, 1280000, 1200000
-
-= EXAMPLE
-
- #include <dt-bindings/mfd/qcom-rpm.h>
-
- rpm@108000 {
- compatible = "qcom,rpm-msm8960";
- reg = <0x108000 0x1000>;
- qcom,ipc = <&apcs 0x8 2>;
-
- interrupts = <0 19 0>, <0 21 0>, <0 22 0>;
- interrupt-names = "ack", "err", "wakeup";
-
- regulators {
- compatible = "qcom,rpm-pm8921-regulators";
- vdd_l1_l2_l12_l18-supply = <&pm8921_s4>;
-
- s1 {
- regulator-min-microvolt = <1225000>;
- regulator-max-microvolt = <1225000>;
-
- bias-pull-down;
-
- qcom,switch-mode-frequency = <3200000>;
- };
-
- pm8921_s4: s4 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
-
- qcom,switch-mode-frequency = <1600000>;
- bias-pull-down;
-
- qcom,force-mode = <QCOM_RPM_FORCE_MODE_AUTO>;
- };
- };
- };
-
diff --git a/Documentation/devicetree/bindings/phy/samsung,dp-video-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,dp-video-phy.yaml
index b03b2f00cc5b..3bee3f8733f7 100644
--- a/Documentation/devicetree/bindings/phy/samsung,dp-video-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/samsung,dp-video-phy.yaml
@@ -22,13 +22,13 @@ properties:
samsung,pmu-syscon:
$ref: /schemas/types.yaml#/definitions/phandle
+ deprecated: true
description:
- Phandle to PMU system controller interface.
+ Phandle to PMU system controller interface (if not a child of PMU).
required:
- compatible
- "#phy-cells"
- - samsung,pmu-syscon
additionalProperties: false
@@ -36,6 +36,5 @@ examples:
- |
phy {
compatible = "samsung,exynos5420-dp-video-phy";
- samsung,pmu-syscon = <&pmu_system_controller>;
#phy-cells = <0>;
};
diff --git a/Documentation/devicetree/bindings/phy/samsung,mipi-video-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,mipi-video-phy.yaml
index 415440aaad89..b2250e4a6b1b 100644
--- a/Documentation/devicetree/bindings/phy/samsung,mipi-video-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/samsung,mipi-video-phy.yaml
@@ -35,15 +35,18 @@ properties:
syscon:
$ref: /schemas/types.yaml#/definitions/phandle
+ deprecated: true
description:
Phandle to PMU system controller interface, valid only for
- samsung,s5pv210-mipi-video-phy and samsung,exynos5420-mipi-video-phy.
+ samsung,s5pv210-mipi-video-phy and samsung,exynos5420-mipi-video-phy (if
+ not a child of PMU).
samsung,pmu-syscon:
$ref: /schemas/types.yaml#/definitions/phandle
+ deprecated: true
description:
Phandle to PMU system controller interface, valid for
- samsung,exynos5433-mipi-video-phy.
+ samsung,exynos5433-mipi-video-phy (if not a child of PMU).
samsung,disp-sysreg:
$ref: /schemas/types.yaml#/definitions/phandle
@@ -81,13 +84,10 @@ allOf:
samsung,disp-sysreg: false
samsung,cam0-sysreg: false
samsung,cam1-sysreg: false
- required:
- - syscon
else:
properties:
syscon: false
required:
- - samsung,pmu-syscon
- samsung,disp-sysreg
- samsung,cam0-sysreg
- samsung,cam1-sysreg
@@ -99,7 +99,6 @@ examples:
phy {
compatible = "samsung,exynos5433-mipi-video-phy";
#phy-cells = <1>;
- samsung,pmu-syscon = <&pmu_system_controller>;
samsung,cam0-sysreg = <&syscon_cam0>;
samsung,cam1-sysreg = <&syscon_cam1>;
samsung,disp-sysreg = <&syscon_disp>;
diff --git a/Documentation/devicetree/bindings/power/allwinner,sun20i-d1-ppu.yaml b/Documentation/devicetree/bindings/power/allwinner,sun20i-d1-ppu.yaml
new file mode 100644
index 000000000000..46e2647a5d72
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/allwinner,sun20i-d1-ppu.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/allwinner,sun20i-d1-ppu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner SoCs PPU power domain controller
+
+maintainers:
+ - Samuel Holland <samuel@sholland.org>
+
+description:
+ D1 and related SoCs contain a power domain controller for the CPUs, GPU, and
+ video-related hardware.
+
+properties:
+ compatible:
+ enum:
+ - allwinner,sun20i-d1-ppu
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ description: Bus Clock
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ '#power-domain-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - resets
+ - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/sun20i-d1-r-ccu.h>
+ #include <dt-bindings/reset/sun20i-d1-r-ccu.h>
+
+ ppu: power-controller@7001000 {
+ compatible = "allwinner,sun20i-d1-ppu";
+ reg = <0x7001000 0x1000>;
+ clocks = <&r_ccu CLK_BUS_R_PPU>;
+ resets = <&r_ccu RST_BUS_R_PPU>;
+ #power-domain-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/power/amlogic,meson-gx-pwrc.txt b/Documentation/devicetree/bindings/power/amlogic,meson-gx-pwrc.txt
index 99b5b10cda31..ba5865ae6bfe 100644
--- a/Documentation/devicetree/bindings/power/amlogic,meson-gx-pwrc.txt
+++ b/Documentation/devicetree/bindings/power/amlogic,meson-gx-pwrc.txt
@@ -1,5 +1,5 @@
-Amlogic Meson Power Controller
-==============================
+Amlogic Meson Power Controller (deprecated)
+===========================================
The Amlogic Meson SoCs embeds an internal Power domain controller.
diff --git a/Documentation/devicetree/bindings/power/mediatek,power-controller.yaml b/Documentation/devicetree/bindings/power/mediatek,power-controller.yaml
index 605ec7ab5f63..c9acef80f452 100644
--- a/Documentation/devicetree/bindings/power/mediatek,power-controller.yaml
+++ b/Documentation/devicetree/bindings/power/mediatek,power-controller.yaml
@@ -28,6 +28,7 @@ properties:
- mediatek,mt8173-power-controller
- mediatek,mt8183-power-controller
- mediatek,mt8186-power-controller
+ - mediatek,mt8188-power-controller
- mediatek,mt8192-power-controller
- mediatek,mt8195-power-controller
@@ -84,6 +85,7 @@ $defs:
"include/dt-bindings/power/mt8167-power.h" - for MT8167 type power domain.
"include/dt-bindings/power/mt8173-power.h" - for MT8173 type power domain.
"include/dt-bindings/power/mt8183-power.h" - for MT8183 type power domain.
+ "include/dt-bindings/power/mediatek,mt8188-power.h" - for MT8188 type power domain.
"include/dt-bindings/power/mt8192-power.h" - for MT8192 type power domain.
"include/dt-bindings/power/mt8195-power.h" - for MT8195 type power domain.
maxItems: 1
diff --git a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
index 633d49884019..afad3135ed67 100644
--- a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
+++ b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
@@ -30,6 +30,7 @@ properties:
- qcom,qcs404-rpmpd
- qcom,qdu1000-rpmhpd
- qcom,sa8540p-rpmhpd
+ - qcom,sa8775p-rpmhpd
- qcom,sdm660-rpmpd
- qcom,sc7180-rpmhpd
- qcom,sc7280-rpmhpd
@@ -39,7 +40,6 @@ properties:
- qcom,sdm845-rpmhpd
- qcom,sdx55-rpmhpd
- qcom,sdx65-rpmhpd
- - qcom,sm4250-rpmpd
- qcom,sm6115-rpmpd
- qcom,sm6125-rpmpd
- qcom,sm6350-rpmhpd
diff --git a/Documentation/devicetree/bindings/power/starfive,jh7110-pmu.yaml b/Documentation/devicetree/bindings/power/starfive,jh7110-pmu.yaml
new file mode 100644
index 000000000000..98eb8b4110e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/starfive,jh7110-pmu.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/starfive,jh7110-pmu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: StarFive JH7110 Power Management Unit
+
+maintainers:
+ - Walker Chen <walker.chen@starfivetech.com>
+
+description: |
+ StarFive JH7110 SoC includes support for multiple power domains which can be
+ powered on/off by software based on different application scenes to save power.
+
+properties:
+ compatible:
+ enum:
+ - starfive,jh7110-pmu
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ "#power-domain-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#power-domain-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ pwrc: power-controller@17030000 {
+ compatible = "starfive,jh7110-pmu";
+ reg = <0x17030000 0x10000>;
+ interrupts = <111>;
+ #power-domain-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/reserved-memory/qcom,rmtfs-mem.yaml b/Documentation/devicetree/bindings/reserved-memory/qcom,rmtfs-mem.yaml
index 2998f1c8f0db..08eb10c25821 100644
--- a/Documentation/devicetree/bindings/reserved-memory/qcom,rmtfs-mem.yaml
+++ b/Documentation/devicetree/bindings/reserved-memory/qcom,rmtfs-mem.yaml
@@ -27,9 +27,11 @@ properties:
identifier of the client to use this region for buffers
qcom,vmid:
- $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32-array
description: >
- vmid of the remote processor, to set up memory protection
+ Array of vmids of the remote processors, to set up memory protection
+ minItems: 1
+ maxItems: 2
required:
- qcom,client-id
diff --git a/Documentation/devicetree/bindings/riscv/sifive,ccache0.yaml b/Documentation/devicetree/bindings/riscv/sifive,ccache0.yaml
index 0551a0d1b3df..eb6ab73c0f31 100644
--- a/Documentation/devicetree/bindings/riscv/sifive,ccache0.yaml
+++ b/Documentation/devicetree/bindings/riscv/sifive,ccache0.yaml
@@ -38,6 +38,10 @@ properties:
- sifive,fu740-c000-ccache
- const: cache
- items:
+ - const: starfive,jh7110-ccache
+ - const: sifive,ccache0
+ - const: cache
+ - items:
- const: microchip,mpfs-ccache
- const: sifive,fu540-c000-ccache
- const: cache
@@ -84,6 +88,7 @@ allOf:
contains:
enum:
- sifive,fu740-c000-ccache
+ - starfive,jh7110-ccache
- microchip,mpfs-ccache
then:
@@ -104,7 +109,9 @@ allOf:
properties:
compatible:
contains:
- const: sifive,fu740-c000-ccache
+ enum:
+ - sifive,fu740-c000-ccache
+ - starfive,jh7110-ccache
then:
properties:
diff --git a/Documentation/devicetree/bindings/soc/mediatek/mediatek,mutex.yaml b/Documentation/devicetree/bindings/soc/mediatek/mediatek,mutex.yaml
index 9241e5fc7cff..15c133cac315 100644
--- a/Documentation/devicetree/bindings/soc/mediatek/mediatek,mutex.yaml
+++ b/Documentation/devicetree/bindings/soc/mediatek/mediatek,mutex.yaml
@@ -32,6 +32,7 @@ properties:
- mediatek,mt8183-disp-mutex
- mediatek,mt8186-disp-mutex
- mediatek,mt8186-mdp3-mutex
+ - mediatek,mt8188-disp-mutex
- mediatek,mt8192-disp-mutex
- mediatek,mt8195-disp-mutex
diff --git a/Documentation/devicetree/bindings/soc/mediatek/mediatek,pwrap.yaml b/Documentation/devicetree/bindings/soc/mediatek/mediatek,pwrap.yaml
new file mode 100644
index 000000000000..3fefd634bc69
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/mediatek/mediatek,pwrap.yaml
@@ -0,0 +1,147 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/mediatek/mediatek,pwrap.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek PMIC Wrapper
+
+maintainers:
+ - Flora Fu <flora.fu@mediatek.com>
+ - Alexandre Mergnat <amergnat@baylibre.com>
+
+description:
+ On MediaTek SoCs the PMIC is connected via SPI. The SPI master interface
+ is not directly visible to the CPU, but only through the PMIC wrapper
+ inside the SoC. The communication between the SoC and the PMIC can
+ optionally be encrypted. Also a non standard Dual IO SPI mode can be
+ used to increase speed.
+
+ IP Pairing
+
+ On MT8135 the pins of some SoC internal peripherals can be on the PMIC.
+ The signals of these pins are routed over the SPI bus using the pwrap
+ bridge. In the binding description below the properties needed for bridging
+ are marked with "IP Pairing". These are optional on SoCs which do not support
+ IP Pairing
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - mediatek,mt2701-pwrap
+ - mediatek,mt6765-pwrap
+ - mediatek,mt6779-pwrap
+ - mediatek,mt6797-pwrap
+ - mediatek,mt6873-pwrap
+ - mediatek,mt7622-pwrap
+ - mediatek,mt8135-pwrap
+ - mediatek,mt8173-pwrap
+ - mediatek,mt8183-pwrap
+ - mediatek,mt8186-pwrap
+ - mediatek,mt8188-pwrap
+ - mediatek,mt8195-pwrap
+ - mediatek,mt8365-pwrap
+ - mediatek,mt8516-pwrap
+ - items:
+ - enum:
+ - mediatek,mt8186-pwrap
+ - mediatek,mt8195-pwrap
+ - const: syscon
+
+ reg:
+ minItems: 1
+ items:
+ - description: PMIC wrapper registers
+ - description: IP pairing registers
+
+ reg-names:
+ minItems: 1
+ items:
+ - const: pwrap
+ - const: pwrap-bridge
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 2
+ items:
+ - description: SPI bus clock
+ - description: Main module clock
+ - description: System module clock
+ - description: Timer module clock
+
+ clock-names:
+ minItems: 2
+ items:
+ - const: spi
+ - const: wrap
+ - const: sys
+ - const: tmr
+
+ resets:
+ minItems: 1
+ items:
+ - description: PMIC wrapper reset
+ - description: IP pairing reset
+
+ reset-names:
+ minItems: 1
+ items:
+ - const: pwrap
+ - const: pwrap-bridge
+
+ pmic:
+ type: object
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - clocks
+ - clock-names
+
+dependentRequired:
+ resets: [reset-names]
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: mediatek,mt8365-pwrap
+ then:
+ properties:
+ clocks:
+ minItems: 4
+
+ clock-names:
+ minItems: 4
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/reset/mt8135-resets.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ pwrap@1000f000 {
+ compatible = "mediatek,mt8135-pwrap";
+ reg = <0 0x1000f000 0 0x1000>,
+ <0 0x11017000 0 0x1000>;
+ reg-names = "pwrap", "pwrap-bridge";
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk26m>, <&clk26m>;
+ clock-names = "spi", "wrap";
+ resets = <&infracfg MT8135_INFRA_PMIC_WRAP_RST>,
+ <&pericfg MT8135_PERI_PWRAP_BRIDGE_SW_RST>;
+ reset-names = "pwrap", "pwrap-bridge";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt b/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
deleted file mode 100644
index 12e4b4260b40..000000000000
--- a/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
+++ /dev/null
@@ -1,75 +0,0 @@
-MediaTek PMIC Wrapper Driver
-
-This document describes the binding for the MediaTek PMIC wrapper.
-
-On MediaTek SoCs the PMIC is connected via SPI. The SPI master interface
-is not directly visible to the CPU, but only through the PMIC wrapper
-inside the SoC. The communication between the SoC and the PMIC can
-optionally be encrypted. Also a non standard Dual IO SPI mode can be
-used to increase speed.
-
-IP Pairing
-
-on MT8135 the pins of some SoC internal peripherals can be on the PMIC.
-The signals of these pins are routed over the SPI bus using the pwrap
-bridge. In the binding description below the properties needed for bridging
-are marked with "IP Pairing". These are optional on SoCs which do not support
-IP Pairing
-
-Required properties in pwrap device node.
-- compatible:
- "mediatek,mt2701-pwrap" for MT2701/7623 SoCs
- "mediatek,mt6765-pwrap" for MT6765 SoCs
- "mediatek,mt6779-pwrap" for MT6779 SoCs
- "mediatek,mt6797-pwrap" for MT6797 SoCs
- "mediatek,mt6873-pwrap" for MT6873/8192 SoCs
- "mediatek,mt7622-pwrap" for MT7622 SoCs
- "mediatek,mt8135-pwrap" for MT8135 SoCs
- "mediatek,mt8173-pwrap" for MT8173 SoCs
- "mediatek,mt8183-pwrap" for MT8183 SoCs
- "mediatek,mt8186-pwrap" for MT8186 SoCs
- "mediatek,mt8188-pwrap", "mediatek,mt8195-pwrap" for MT8188 SoCs
- "mediatek,mt8195-pwrap" for MT8195 SoCs
- "mediatek,mt8365-pwrap" for MT8365 SoCs
- "mediatek,mt8516-pwrap" for MT8516 SoCs
-- interrupts: IRQ for pwrap in SOC
-- reg-names: "pwrap" is required; "pwrap-bridge" is optional.
- "pwrap": Main registers base
- "pwrap-bridge": bridge base (IP Pairing)
-- reg: Must contain an entry for each entry in reg-names.
-- clock-names: Must include the following entries:
- "spi": SPI bus clock
- "wrap": Main module clock
- "sys": Optional system module clock
- "tmr": Optional timer module clock
-- clocks: Must contain an entry for each entry in clock-names.
-
-Optional properities:
-- reset-names: Some SoCs include the following entries:
- "pwrap"
- "pwrap-bridge" (IP Pairing)
-- resets: Must contain an entry for each entry in reset-names.
-- pmic: Using either MediaTek PMIC MFD as the child device of pwrap
- See the following for child node definitions:
- Documentation/devicetree/bindings/mfd/mt6397.txt
- or the regulator-only device as the child device of pwrap, such as MT6380.
- See the following definitions for such kinds of devices.
- Documentation/devicetree/bindings/regulator/mt6380-regulator.txt
-
-Example:
- pwrap: pwrap@1000f000 {
- compatible = "mediatek,mt8135-pwrap";
- reg = <0 0x1000f000 0 0x1000>,
- <0 0x11017000 0 0x1000>;
- reg-names = "pwrap", "pwrap-bridge";
- interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>;
- resets = <&infracfg MT8135_INFRA_PMIC_WRAP_RST>,
- <&pericfg MT8135_PERI_PWRAP_BRIDGE_SW_RST>;
- reset-names = "pwrap", "pwrap-bridge";
- clocks = <&clk26m>, <&clk26m>;
- clock-names = "spi", "wrap";
-
- pmic {
- compatible = "mediatek,mt6397";
- };
- };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,dcc.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,dcc.yaml
new file mode 100644
index 000000000000..ce7e20dd22c9
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,dcc.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/qcom/qcom,dcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Data Capture and Compare
+
+maintainers:
+ - Souradeep Chowdhury <quic_schowdhu@quicinc.com>
+
+description: |
+ DCC (Data Capture and Compare) is a DMA engine which is used to save
+ configuration data or system memory contents during catastrophic failure
+ or SW trigger. DCC is used to capture and store data for debugging purpose
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - qcom,sm8150-dcc
+ - qcom,sc7280-dcc
+ - qcom,sc7180-dcc
+ - qcom,sdm845-dcc
+ - const: qcom,dcc
+
+ reg:
+ items:
+ - description: DCC base
+ - description: DCC RAM base
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ dma@10a2000{
+ compatible = "qcom,sm8150-dcc", "qcom,dcc";
+ reg = <0x010a2000 0x1000>,
+ <0x010ad000 0x2000>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,msm8976-ramp-controller.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,msm8976-ramp-controller.yaml
new file mode 100644
index 000000000000..aae9cf7b8caf
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,msm8976-ramp-controller.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/qcom/qcom,msm8976-ramp-controller.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Ramp Controller
+
+maintainers:
+ - AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+
+description:
+ The Ramp Controller is used to program the sequence ID for pulse
+ swallowing, enable sequences and link Sequence IDs (SIDs) for the
+ CPU cores on some Qualcomm SoCs.
+
+properties:
+ compatible:
+ enum:
+ - qcom,msm8976-ramp-controller
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ cpu-power-controller@b014000 {
+ compatible = "qcom,msm8976-ramp-controller";
+ reg = <0x0b014000 0x68>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml
new file mode 100644
index 000000000000..cf863683c21a
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/qcom/qcom,pmic-glink.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm PMIC GLINK firmware interface for battery management, USB
+ Type-C and other things.
+
+maintainers:
+ - Bjorn Andersson <andersson@kernel.org>
+
+description:
+ The PMIC GLINK service, running on a coprocessor on some modern Qualcomm
+ platforms and implement USB Type-C handling and battery management. This
+ binding describes the component in the OS used to communicate with the
+ firmware and connect it's resources to those described in the Devicetree,
+ particularly the USB Type-C controllers relationship with USB and DisplayPort
+ components.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - qcom,sc8180x-pmic-glink
+ - qcom,sc8280xp-pmic-glink
+ - qcom,sm8350-pmic-glink
+ - const: qcom,pmic-glink
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ '^connector@\d$':
+ $ref: /schemas/connector/usb-connector.yaml#
+
+ properties:
+ reg: true
+
+ required:
+ - reg
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |+
+ pmic-glink {
+ compatible = "qcom,sc8280xp-pmic-glink", "qcom,pmic-glink";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ endpoint {
+ remote-endpoint = <&usb_role>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ endpoint {
+ remote-endpoint = <&ss_phy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ endpoint {
+ remote-endpoint = <&sbu_mux>;
+ };
+ };
+ };
+ };
+ };
+...
+
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,rpm.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,rpm.yaml
new file mode 100644
index 000000000000..b00be9e01206
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,rpm.yaml
@@ -0,0 +1,101 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/qcom/qcom,rpm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Resource Power Manager (RPM)
+
+description:
+ This driver is used to interface with the Resource Power Manager (RPM) found
+ in various Qualcomm platforms. The RPM allows each component in the system
+ to vote for state of the system resources, such as clocks, regulators and bus
+ frequencies.
+
+maintainers:
+ - Bjorn Andersson <andersson@kernel.org>
+
+properties:
+ compatible:
+ enum:
+ - qcom,rpm-apq8064
+ - qcom,rpm-msm8660
+ - qcom,rpm-msm8960
+ - qcom,rpm-ipq8064
+ - qcom,rpm-mdm9615
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 3
+
+ interrupt-names:
+ items:
+ - const: ack
+ - const: err
+ - const: wakeup
+
+ qcom,ipc:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: phandle to a syscon node representing the APCS registers
+ - description: u32 representing offset to the register within the syscon
+ - description: u32 representing the ipc bit within the register
+ description:
+ Three entries specifying the outgoing ipc bit used for signaling the RPM.
+
+patternProperties:
+ "^regulators(-[01])?$":
+ type: object
+ $ref: /schemas/regulator/qcom,rpm-regulator.yaml#
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - qcom,ipc
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/mfd/qcom-rpm.h>
+
+ rpm@108000 {
+ compatible = "qcom,rpm-msm8960";
+ reg = <0x108000 0x1000>;
+ qcom,ipc = <&apcs 0x8 2>;
+
+ interrupts = <GIC_SPI 19 IRQ_TYPE_NONE>, <GIC_SPI 21 IRQ_TYPE_NONE>, <GIC_SPI 22 IRQ_TYPE_NONE>;
+ interrupt-names = "ack", "err", "wakeup";
+
+ regulators {
+ compatible = "qcom,rpm-pm8921-regulators";
+ vdd_l1_l2_l12_l18-supply = <&pm8921_s4>;
+
+ s1 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+
+ bias-pull-down;
+
+ qcom,switch-mode-frequency = <3200000>;
+ };
+
+ pm8921_s4: s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ qcom,switch-mode-frequency = <1600000>;
+ bias-pull-down;
+
+ qcom,force-mode = <QCOM_RPM_FORCE_MODE_AUTO>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,rpmh-rsc.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,rpmh-rsc.yaml
index b246500d3d5d..a4046ba60846 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,rpmh-rsc.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,rpmh-rsc.yaml
@@ -112,8 +112,9 @@ properties:
$ref: /schemas/power/qcom,rpmpd.yaml#
patternProperties:
- '-regulators$':
+ '^regulators(-[0-9])?$':
$ref: /schemas/regulator/qcom,rpmh-regulator.yaml#
+ unevaluatedProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
index 11c0f4dd797c..16fd67c0bd1f 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
@@ -80,6 +80,7 @@ if:
enum:
- qcom,rpm-apq8084
- qcom,rpm-msm8916
+ - qcom,rpm-msm8936
- qcom,rpm-msm8974
- qcom,rpm-msm8976
- qcom,rpm-msm8953
diff --git a/MAINTAINERS b/MAINTAINERS
index 70429a8e59bc..0b4edb98289b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -17253,7 +17253,8 @@ F: drivers/clk/qcom/
F: include/dt-bindings/clock/qcom,*
QUALCOMM CORE POWER REDUCTION (CPR) AVS DRIVER
-M: Niklas Cassel <nks@flawful.org>
+M: Bjorn Andersson <andersson@kernel.org>
+M: Konrad Dybcio <konrad.dybcio@linaro.org>
L: linux-pm@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
@@ -19947,6 +19948,19 @@ F: Documentation/devicetree/bindings/reset/starfive,jh7100-reset.yaml
F: drivers/reset/reset-starfive-jh7100.c
F: include/dt-bindings/reset/starfive-jh7100.h
+STARFIVE JH71XX PMU CONTROLLER DRIVER
+M: Walker Chen <walker.chen@starfivetech.com>
+S: Supported
+F: Documentation/devicetree/bindings/power/starfive*
+F: drivers/soc/starfive/jh71xx_pmu.c
+F: include/dt-bindings/power/starfive,jh7110-pmu.h
+
+STARFIVE SOC DRIVERS
+M: Conor Dooley <conor@kernel.org>
+S: Maintained
+T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/
+F: drivers/soc/starfive/
+
STARFIVE TRNG DRIVER
M: Jia Jie Ho <jiajie.ho@starfivetech.com>
S: Supported
diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
index 5d2f386a46d8..eca2fe0f4314 100644
--- a/arch/arm/mach-qcom/platsmp.c
+++ b/arch/arm/mach-qcom/platsmp.c
@@ -14,7 +14,7 @@
#include <linux/of_address.h>
#include <linux/smp.h>
#include <linux/io.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <asm/smp_plat.h>
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 828c66bbaa67..2a6b4f676458 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -263,7 +263,6 @@ static int weim_parse_dt(struct platform_device *pdev)
static int weim_probe(struct platform_device *pdev)
{
struct weim_priv *priv;
- struct resource *res;
struct clk *clk;
void __iomem *base;
int ret;
@@ -273,8 +272,7 @@ static int weim_probe(struct platform_device *pdev)
return -ENOMEM;
/* get the resource */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index 326bca154ac7..c6e2e91bb4c3 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -17,7 +17,7 @@
#include <linux/platform_device.h>
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <soc/qcom/spm.h>
#include <asm/proc-fns.h>
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
index a14f65444b35..ea0f5083ac47 100644
--- a/drivers/firmware/arm_scmi/Kconfig
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -23,6 +23,38 @@ config ARM_SCMI_PROTOCOL
if ARM_SCMI_PROTOCOL
+config ARM_SCMI_NEED_DEBUGFS
+ bool
+ help
+ This declares whether at least one SCMI facility is configured
+ which needs debugfs support. When selected causess the creation
+ of a common SCMI debugfs root directory.
+
+config ARM_SCMI_RAW_MODE_SUPPORT
+ bool "Enable support for SCMI Raw transmission mode"
+ depends on DEBUG_FS
+ select ARM_SCMI_NEED_DEBUGFS
+ help
+ Enable support for SCMI Raw transmission mode.
+
+ If enabled allows the direct injection and snooping of SCMI bare
+ messages through a dedicated debugfs interface.
+ It is meant to be used by SCMI compliance/testing suites.
+
+ When enabled regular SCMI drivers interactions are inhibited in
+ order to avoid unexpected interactions with the SCMI Raw message
+ flow. If unsure say N.
+
+config ARM_SCMI_RAW_MODE_SUPPORT_COEX
+ bool "Allow SCMI Raw mode coexistence with normal SCMI stack"
+ depends on ARM_SCMI_RAW_MODE_SUPPORT
+ help
+ Allow SCMI Raw transmission mode to coexist with normal SCMI stack.
+
+ This will allow regular SCMI drivers to register with the core and
+ operate normally, thing which could make an SCMI test suite using the
+ SCMI Raw mode support unreliable. If unsure, say N.
+
config ARM_SCMI_HAVE_TRANSPORT
bool
help
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 9ea86f8cc8f7..b31d78fa66cc 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
scmi-bus-y = bus.o
+scmi-core-objs := $(scmi-bus-y)
+
scmi-driver-y = driver.o notify.o
+scmi-driver-$(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) += raw_mode.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o
@@ -8,9 +11,11 @@ scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o
-scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \
- $(scmi-transport-y)
+scmi-module-objs := $(scmi-driver-y) $(scmi-protocols-y) $(scmi-transport-y)
+
+obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-core.o
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o
+
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index cc2eba067575..73140b854b31 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -7,8 +7,10 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/atomic.h>
#include <linux/types.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
@@ -16,9 +18,174 @@
#include "common.h"
+BLOCKING_NOTIFIER_HEAD(scmi_requested_devices_nh);
+EXPORT_SYMBOL_GPL(scmi_requested_devices_nh);
+
static DEFINE_IDA(scmi_bus_id);
-static DEFINE_IDR(scmi_protocols);
-static DEFINE_SPINLOCK(protocol_lock);
+
+static DEFINE_IDR(scmi_requested_devices);
+/* Protect access to scmi_requested_devices */
+static DEFINE_MUTEX(scmi_requested_devices_mtx);
+
+struct scmi_requested_dev {
+ const struct scmi_device_id *id_table;
+ struct list_head node;
+};
+
+/* Track globally the creation of SCMI SystemPower related devices */
+static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
+
+/**
+ * scmi_protocol_device_request - Helper to request a device
+ *
+ * @id_table: A protocol/name pair descriptor for the device to be created.
+ *
+ * This helper let an SCMI driver request specific devices identified by the
+ * @id_table to be created for each active SCMI instance.
+ *
+ * The requested device name MUST NOT be already existent for any protocol;
+ * at first the freshly requested @id_table is annotated in the IDR table
+ * @scmi_requested_devices and then the requested device is advertised to any
+ * registered party via the @scmi_requested_devices_nh notification chain.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
+{
+ int ret = 0;
+ unsigned int id = 0;
+ struct list_head *head, *phead = NULL;
+ struct scmi_requested_dev *rdev;
+
+ pr_debug("Requesting SCMI device (%s) for protocol %x\n",
+ id_table->name, id_table->protocol_id);
+
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) &&
+ !IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX)) {
+ pr_warn("SCMI Raw mode active. Rejecting '%s'/0x%02X\n",
+ id_table->name, id_table->protocol_id);
+ return -EINVAL;
+ }
+
+ /*
+ * Search for the matching protocol rdev list and then search
+ * of any existent equally named device...fails if any duplicate found.
+ */
+ mutex_lock(&scmi_requested_devices_mtx);
+ idr_for_each_entry(&scmi_requested_devices, head, id) {
+ if (!phead) {
+ /* A list found registered in the IDR is never empty */
+ rdev = list_first_entry(head, struct scmi_requested_dev,
+ node);
+ if (rdev->id_table->protocol_id ==
+ id_table->protocol_id)
+ phead = head;
+ }
+ list_for_each_entry(rdev, head, node) {
+ if (!strcmp(rdev->id_table->name, id_table->name)) {
+ pr_err("Ignoring duplicate request [%d] %s\n",
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ /*
+ * No duplicate found for requested id_table, so let's create a new
+ * requested device entry for this new valid request.
+ */
+ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+ if (!rdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ rdev->id_table = id_table;
+
+ /*
+ * Append the new requested device table descriptor to the head of the
+ * related protocol list, eventually creating such head if not already
+ * there.
+ */
+ if (!phead) {
+ phead = kzalloc(sizeof(*phead), GFP_KERNEL);
+ if (!phead) {
+ kfree(rdev);
+ ret = -ENOMEM;
+ goto out;
+ }
+ INIT_LIST_HEAD(phead);
+
+ ret = idr_alloc(&scmi_requested_devices, (void *)phead,
+ id_table->protocol_id,
+ id_table->protocol_id + 1, GFP_KERNEL);
+ if (ret != id_table->protocol_id) {
+ pr_err("Failed to save SCMI device - ret:%d\n", ret);
+ kfree(rdev);
+ kfree(phead);
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = 0;
+ }
+ list_add(&rdev->node, phead);
+
+out:
+ mutex_unlock(&scmi_requested_devices_mtx);
+
+ if (!ret)
+ blocking_notifier_call_chain(&scmi_requested_devices_nh,
+ SCMI_BUS_NOTIFY_DEVICE_REQUEST,
+ (void *)rdev->id_table);
+
+ return ret;
+}
+
+/**
+ * scmi_protocol_device_unrequest - Helper to unrequest a device
+ *
+ * @id_table: A protocol/name pair descriptor for the device to be unrequested.
+ *
+ * The unrequested device, described by the provided id_table, is at first
+ * removed from the IDR @scmi_requested_devices and then the removal is
+ * advertised to any registered party via the @scmi_requested_devices_nh
+ * notification chain.
+ */
+static void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
+{
+ struct list_head *phead;
+
+ pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
+ id_table->name, id_table->protocol_id);
+
+ mutex_lock(&scmi_requested_devices_mtx);
+ phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
+ if (phead) {
+ struct scmi_requested_dev *victim, *tmp;
+
+ list_for_each_entry_safe(victim, tmp, phead, node) {
+ if (!strcmp(victim->id_table->name, id_table->name)) {
+ list_del(&victim->node);
+
+ mutex_unlock(&scmi_requested_devices_mtx);
+ blocking_notifier_call_chain(&scmi_requested_devices_nh,
+ SCMI_BUS_NOTIFY_DEVICE_UNREQUEST,
+ (void *)victim->id_table);
+ kfree(victim);
+ mutex_lock(&scmi_requested_devices_mtx);
+ break;
+ }
+ }
+
+ if (list_empty(phead)) {
+ idr_remove(&scmi_requested_devices,
+ id_table->protocol_id);
+ kfree(phead);
+ }
+ }
+ mutex_unlock(&scmi_requested_devices_mtx);
+}
static const struct scmi_device_id *
scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv)
@@ -58,11 +225,11 @@ static int scmi_match_by_id_table(struct device *dev, void *data)
struct scmi_device_id *id_table = data;
return sdev->protocol_id == id_table->protocol_id &&
- !strcmp(sdev->name, id_table->name);
+ (id_table->name && !strcmp(sdev->name, id_table->name));
}
-struct scmi_device *scmi_child_dev_find(struct device *parent,
- int prot_id, const char *name)
+static struct scmi_device *scmi_child_dev_find(struct device *parent,
+ int prot_id, const char *name)
{
struct scmi_device_id id_table;
struct device *dev;
@@ -77,30 +244,6 @@ struct scmi_device *scmi_child_dev_find(struct device *parent,
return to_scmi_dev(dev);
}
-const struct scmi_protocol *scmi_protocol_get(int protocol_id)
-{
- const struct scmi_protocol *proto;
-
- proto = idr_find(&scmi_protocols, protocol_id);
- if (!proto || !try_module_get(proto->owner)) {
- pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
- return NULL;
- }
-
- pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
-
- return proto;
-}
-
-void scmi_protocol_put(int protocol_id)
-{
- const struct scmi_protocol *proto;
-
- proto = idr_find(&scmi_protocols, protocol_id);
- if (proto)
- module_put(proto->owner);
-}
-
static int scmi_dev_probe(struct device *dev)
{
struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
@@ -121,12 +264,13 @@ static void scmi_dev_remove(struct device *dev)
scmi_drv->remove(scmi_dev);
}
-static struct bus_type scmi_bus_type = {
+struct bus_type scmi_bus_type = {
.name = "scmi_protocol",
.match = scmi_dev_match,
.probe = scmi_dev_probe,
.remove = scmi_dev_remove,
};
+EXPORT_SYMBOL_GPL(scmi_bus_type);
int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
const char *mod_name)
@@ -147,7 +291,7 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
retval = driver_register(&driver->driver);
if (!retval)
- pr_debug("registered new scmi driver %s\n", driver->name);
+ pr_debug("Registered new scmi driver %s\n", driver->name);
return retval;
}
@@ -165,13 +309,53 @@ static void scmi_device_release(struct device *dev)
kfree(to_scmi_dev(dev));
}
-struct scmi_device *
-scmi_device_create(struct device_node *np, struct device *parent, int protocol,
- const char *name)
+static void __scmi_device_destroy(struct scmi_device *scmi_dev)
+{
+ pr_debug("(%s) Destroying SCMI device '%s' for protocol 0x%x (%s)\n",
+ of_node_full_name(scmi_dev->dev.parent->of_node),
+ dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
+ scmi_dev->name);
+
+ if (scmi_dev->protocol_id == SCMI_PROTOCOL_SYSTEM)
+ atomic_set(&scmi_syspower_registered, 0);
+
+ kfree_const(scmi_dev->name);
+ ida_free(&scmi_bus_id, scmi_dev->id);
+ device_unregister(&scmi_dev->dev);
+}
+
+static struct scmi_device *
+__scmi_device_create(struct device_node *np, struct device *parent,
+ int protocol, const char *name)
{
int id, retval;
struct scmi_device *scmi_dev;
+ /*
+ * If the same protocol/name device already exist under the same parent
+ * (i.e. SCMI instance) just return the existent device.
+ * This avoids any race between the SCMI driver, creating devices for
+ * each DT defined protocol at probe time, and the concurrent
+ * registration of SCMI drivers.
+ */
+ scmi_dev = scmi_child_dev_find(parent, protocol, name);
+ if (scmi_dev)
+ return scmi_dev;
+
+ /*
+ * Ignore any possible subsequent failures while creating the device
+ * since we are doomed anyway at that point; not using a mutex which
+ * spans across this whole function to keep things simple and to avoid
+ * to serialize all the __scmi_device_create calls across possibly
+ * different SCMI server instances (parent)
+ */
+ if (protocol == SCMI_PROTOCOL_SYSTEM &&
+ atomic_cmpxchg(&scmi_syspower_registered, 0, 1)) {
+ dev_warn(parent,
+ "SCMI SystemPower protocol device must be unique !\n");
+ return NULL;
+ }
+
scmi_dev = kzalloc(sizeof(*scmi_dev), GFP_KERNEL);
if (!scmi_dev)
return NULL;
@@ -201,6 +385,10 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol,
if (retval)
goto put_dev;
+ pr_debug("(%s) Created SCMI device '%s' for protocol 0x%x (%s)\n",
+ of_node_full_name(parent->of_node),
+ dev_name(&scmi_dev->dev), protocol, name);
+
return scmi_dev;
put_dev:
kfree_const(scmi_dev->name);
@@ -209,77 +397,85 @@ put_dev:
return NULL;
}
-void scmi_device_destroy(struct scmi_device *scmi_dev)
-{
- kfree_const(scmi_dev->name);
- scmi_handle_put(scmi_dev->handle);
- ida_free(&scmi_bus_id, scmi_dev->id);
- device_unregister(&scmi_dev->dev);
-}
-
-void scmi_device_link_add(struct device *consumer, struct device *supplier)
-{
- struct device_link *link;
-
- link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
-
- WARN_ON(!link);
-}
-
-void scmi_set_handle(struct scmi_device *scmi_dev)
-{
- scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
- if (scmi_dev->handle)
- scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
-}
-
-int scmi_protocol_register(const struct scmi_protocol *proto)
+/**
+ * scmi_device_create - A method to create one or more SCMI devices
+ *
+ * @np: A reference to the device node to use for the new device(s)
+ * @parent: The parent device to use identifying a specific SCMI instance
+ * @protocol: The SCMI protocol to be associated with this device
+ * @name: The requested-name of the device to be created; this is optional
+ * and if no @name is provided, all the devices currently known to
+ * be requested on the SCMI bus for @protocol will be created.
+ *
+ * This method can be invoked to create a single well-defined device (like
+ * a transport device or a device requested by an SCMI driver loaded after
+ * the core SCMI stack has been probed), or to create all the devices currently
+ * known to have been requested by the loaded SCMI drivers for a specific
+ * protocol (typically during SCMI core protocol enumeration at probe time).
+ *
+ * Return: The created device (or one of them if @name was NOT provided and
+ * multiple devices were created) or NULL if no device was created;
+ * note that NULL indicates an error ONLY in case a specific @name
+ * was provided: when @name param was not provided, a number of devices
+ * could have been potentially created for a whole protocol, unless no
+ * device was found to have been requested for that specific protocol.
+ */
+struct scmi_device *scmi_device_create(struct device_node *np,
+ struct device *parent, int protocol,
+ const char *name)
{
- int ret;
-
- if (!proto) {
- pr_err("invalid protocol\n");
- return -EINVAL;
- }
-
- if (!proto->instance_init) {
- pr_err("missing init for protocol 0x%x\n", proto->id);
- return -EINVAL;
+ struct list_head *phead;
+ struct scmi_requested_dev *rdev;
+ struct scmi_device *scmi_dev = NULL;
+
+ if (name)
+ return __scmi_device_create(np, parent, protocol, name);
+
+ mutex_lock(&scmi_requested_devices_mtx);
+ phead = idr_find(&scmi_requested_devices, protocol);
+ /* Nothing to do. */
+ if (!phead) {
+ mutex_unlock(&scmi_requested_devices_mtx);
+ return scmi_dev;
}
- spin_lock(&protocol_lock);
- ret = idr_alloc(&scmi_protocols, (void *)proto,
- proto->id, proto->id + 1, GFP_ATOMIC);
- spin_unlock(&protocol_lock);
- if (ret != proto->id) {
- pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
- proto->id, ret);
- return ret;
+ /* Walk the list of requested devices for protocol and create them */
+ list_for_each_entry(rdev, phead, node) {
+ struct scmi_device *sdev;
+
+ sdev = __scmi_device_create(np, parent,
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
+ /* Report errors and carry on... */
+ if (sdev)
+ scmi_dev = sdev;
+ else
+ pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
+ of_node_full_name(parent->of_node),
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
}
+ mutex_unlock(&scmi_requested_devices_mtx);
- pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
-
- return 0;
+ return scmi_dev;
}
-EXPORT_SYMBOL_GPL(scmi_protocol_register);
+EXPORT_SYMBOL_GPL(scmi_device_create);
-void scmi_protocol_unregister(const struct scmi_protocol *proto)
+void scmi_device_destroy(struct device *parent, int protocol, const char *name)
{
- spin_lock(&protocol_lock);
- idr_remove(&scmi_protocols, proto->id);
- spin_unlock(&protocol_lock);
-
- pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
+ struct scmi_device *scmi_dev;
- return;
+ scmi_dev = scmi_child_dev_find(parent, protocol, name);
+ if (scmi_dev)
+ __scmi_device_destroy(scmi_dev);
}
-EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
+EXPORT_SYMBOL_GPL(scmi_device_destroy);
static int __scmi_devices_unregister(struct device *dev, void *data)
{
struct scmi_device *scmi_dev = to_scmi_dev(dev);
- scmi_device_destroy(scmi_dev);
+ __scmi_device_destroy(scmi_dev);
return 0;
}
@@ -288,20 +484,33 @@ static void scmi_devices_unregister(void)
bus_for_each_dev(&scmi_bus_type, NULL, NULL, __scmi_devices_unregister);
}
-int __init scmi_bus_init(void)
+static int __init scmi_bus_init(void)
{
int retval;
retval = bus_register(&scmi_bus_type);
if (retval)
- pr_err("scmi protocol bus register failed (%d)\n", retval);
+ pr_err("SCMI protocol bus register failed (%d)\n", retval);
+
+ pr_info("SCMI protocol bus registered\n");
return retval;
}
+subsys_initcall(scmi_bus_init);
-void __exit scmi_bus_exit(void)
+static void __exit scmi_bus_exit(void)
{
+ /*
+ * Destroy all remaining devices: just in case the drivers were
+ * manually unbound and at first and then the modules unloaded.
+ */
scmi_devices_unregister();
bus_unregister(&scmi_bus_type);
ida_destroy(&scmi_bus_id);
}
+module_exit(scmi_bus_exit);
+
+MODULE_ALIAS("scmi-core");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI protocol bus");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index a1c0154c31c6..c46dc5215af7 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -27,6 +27,48 @@
#include "protocols.h"
#include "notify.h"
+#define SCMI_MAX_CHANNELS 256
+
+#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
+
+enum scmi_error_codes {
+ SCMI_SUCCESS = 0, /* Success */
+ SCMI_ERR_SUPPORT = -1, /* Not supported */
+ SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
+ SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
+ SCMI_ERR_ENTRY = -4, /* Not found */
+ SCMI_ERR_RANGE = -5, /* Value out of range */
+ SCMI_ERR_BUSY = -6, /* Device busy */
+ SCMI_ERR_COMMS = -7, /* Communication Error */
+ SCMI_ERR_GENERIC = -8, /* Generic Error */
+ SCMI_ERR_HARDWARE = -9, /* Hardware Error */
+ SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
+};
+
+static const int scmi_linux_errmap[] = {
+ /* better than switch case as long as return value is continuous */
+ 0, /* SCMI_SUCCESS */
+ -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
+ -EINVAL, /* SCMI_ERR_PARAM */
+ -EACCES, /* SCMI_ERR_ACCESS */
+ -ENOENT, /* SCMI_ERR_ENTRY */
+ -ERANGE, /* SCMI_ERR_RANGE */
+ -EBUSY, /* SCMI_ERR_BUSY */
+ -ECOMM, /* SCMI_ERR_COMMS */
+ -EIO, /* SCMI_ERR_GENERIC */
+ -EREMOTEIO, /* SCMI_ERR_HARDWARE */
+ -EPROTO, /* SCMI_ERR_PROTOCOL */
+};
+
+static inline int scmi_to_linux_errno(int errno)
+{
+ int err_idx = -errno;
+
+ if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
+ return scmi_linux_errmap[err_idx];
+ return -EIO;
+}
+
#define MSG_ID_MASK GENMASK(7, 0)
#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
#define MSG_TYPE_MASK GENMASK(9, 8)
@@ -96,18 +138,19 @@ static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
struct scmi_revision_info *
scmi_revision_area_get(const struct scmi_protocol_handle *ph);
-int scmi_handle_put(const struct scmi_handle *handle);
-void scmi_device_link_add(struct device *consumer, struct device *supplier);
-struct scmi_handle *scmi_handle_get(struct device *dev);
-void scmi_set_handle(struct scmi_device *scmi_dev);
void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
u8 *prot_imp);
-int __init scmi_bus_init(void);
-void __exit scmi_bus_exit(void);
+extern struct bus_type scmi_bus_type;
+
+#define SCMI_BUS_NOTIFY_DEVICE_REQUEST 0
+#define SCMI_BUS_NOTIFY_DEVICE_UNREQUEST 1
+extern struct blocking_notifier_head scmi_requested_devices_nh;
-const struct scmi_protocol *scmi_protocol_get(int protocol_id);
-void scmi_protocol_put(int protocol_id);
+struct scmi_device *scmi_device_create(struct device_node *np,
+ struct device *parent, int protocol,
+ const char *name);
+void scmi_device_destroy(struct device *parent, int protocol, const char *name);
int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id);
void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
@@ -116,6 +159,8 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
/**
* struct scmi_chan_info - Structure representing a SCMI channel information
*
+ * @id: An identifier for this channel: this matches the protocol number
+ * used to initialize this channel
* @dev: Reference to device in the SCMI hierarchy corresponding to this
* channel
* @rx_timeout_ms: The configured RX timeout in milliseconds.
@@ -127,6 +172,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
* @transport_info: Transport layer related information
*/
struct scmi_chan_info {
+ int id;
struct device *dev;
unsigned int rx_timeout_ms;
struct scmi_handle *handle;
@@ -153,7 +199,7 @@ struct scmi_chan_info {
*/
struct scmi_transport_ops {
int (*link_supplier)(struct device *dev);
- bool (*chan_available)(struct device *dev, int idx);
+ bool (*chan_available)(struct device_node *of_node, int idx);
int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
bool tx);
int (*chan_free)(int id, void *p, void *data);
@@ -170,11 +216,6 @@ struct scmi_transport_ops {
bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
};
-int scmi_protocol_device_request(const struct scmi_device_id *id_table);
-void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table);
-struct scmi_device *scmi_child_dev_find(struct device *parent,
- int prot_id, const char *name);
-
/**
* struct scmi_desc - Description of SoC integration
*
@@ -215,6 +256,36 @@ struct scmi_desc {
const bool atomic_enabled;
};
+static inline bool is_polling_required(struct scmi_chan_info *cinfo,
+ const struct scmi_desc *desc)
+{
+ return cinfo->no_completion_irq || desc->force_polling;
+}
+
+static inline bool is_transport_polling_capable(const struct scmi_desc *desc)
+{
+ return desc->ops->poll_done || desc->sync_cmds_completed_on_ret;
+}
+
+static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
+ const struct scmi_desc *desc)
+{
+ return is_polling_required(cinfo, desc) &&
+ is_transport_polling_capable(desc);
+}
+
+void scmi_xfer_raw_put(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer);
+struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle);
+struct scmi_chan_info *
+scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id);
+
+int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer);
+
+int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer,
+ unsigned int timeout_ms);
#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
extern const struct scmi_desc scmi_mailbox_desc;
#endif
@@ -229,7 +300,6 @@ extern const struct scmi_desc scmi_optee_desc;
#endif
void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv);
-void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
/* shmem related declarations */
struct scmi_shared_mem;
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index ffdad59ec81f..d21c7eafd641 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -14,7 +14,10 @@
* Copyright (C) 2018-2021 ARM Ltd.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/bitmap.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/idr.h>
@@ -34,22 +37,15 @@
#include "common.h"
#include "notify.h"
+#include "raw_mode.h"
+
#define CREATE_TRACE_POINTS
#include <trace/events/scmi.h>
-enum scmi_error_codes {
- SCMI_SUCCESS = 0, /* Success */
- SCMI_ERR_SUPPORT = -1, /* Not supported */
- SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
- SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
- SCMI_ERR_ENTRY = -4, /* Not found */
- SCMI_ERR_RANGE = -5, /* Value out of range */
- SCMI_ERR_BUSY = -6, /* Device busy */
- SCMI_ERR_COMMS = -7, /* Communication Error */
- SCMI_ERR_GENERIC = -8, /* Generic Error */
- SCMI_ERR_HARDWARE = -9, /* Hardware Error */
- SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
-};
+static DEFINE_IDA(scmi_id);
+
+static DEFINE_IDR(scmi_protocols);
+static DEFINE_SPINLOCK(protocol_lock);
/* List of all SCMI devices active in system */
static LIST_HEAD(scmi_list);
@@ -58,18 +54,7 @@ static DEFINE_MUTEX(scmi_list_mutex);
/* Track the unique id for the transfers for debug & profiling purpose */
static atomic_t transfer_last_id;
-static DEFINE_IDR(scmi_requested_devices);
-static DEFINE_MUTEX(scmi_requested_devices_mtx);
-
-/* Track globally the creation of SCMI SystemPower related devices */
-static bool scmi_syspower_registered;
-/* Protect access to scmi_syspower_registered */
-static DEFINE_MUTEX(scmi_syspower_mtx);
-
-struct scmi_requested_dev {
- const struct scmi_device_id *id_table;
- struct list_head node;
-};
+static struct dentry *scmi_top_dentry;
/**
* struct scmi_xfers_info - Structure to manage transfer information
@@ -118,8 +103,23 @@ struct scmi_protocol_instance {
#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
/**
+ * struct scmi_debug_info - Debug common info
+ * @top_dentry: A reference to the top debugfs dentry
+ * @name: Name of this SCMI instance
+ * @type: Type of this SCMI instance
+ * @is_atomic: Flag to state if the transport of this instance is atomic
+ */
+struct scmi_debug_info {
+ struct dentry *top_dentry;
+ const char *name;
+ const char *type;
+ bool is_atomic;
+};
+
+/**
* struct scmi_info - Structure representing a SCMI instance
*
+ * @id: A sequence number starting from zero identifying this instance
* @dev: Device pointer
* @desc: SoC description for this instance
* @version: SCMI revision information containing protocol version,
@@ -147,8 +147,15 @@ struct scmi_protocol_instance {
* @notify_priv: Pointer to private data structure specific to notifications.
* @node: List head
* @users: Number of users of this instance
+ * @bus_nb: A notifier to listen for device bind/unbind on the scmi bus
+ * @dev_req_nb: A notifier to listen for device request/unrequest on the scmi
+ * bus
+ * @devreq_mtx: A mutex to serialize device creation for this SCMI instance
+ * @dbg: A pointer to debugfs related data (if any)
+ * @raw: An opaque reference handle used by SCMI Raw mode.
*/
struct scmi_info {
+ int id;
struct device *dev;
const struct scmi_desc *desc;
struct scmi_revision_info version;
@@ -166,32 +173,114 @@ struct scmi_info {
void *notify_priv;
struct list_head node;
int users;
+ struct notifier_block bus_nb;
+ struct notifier_block dev_req_nb;
+ /* Serialize device creation process for this instance */
+ struct mutex devreq_mtx;
+ struct scmi_debug_info *dbg;
+ void *raw;
};
#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
+#define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb)
+#define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb)
-static const int scmi_linux_errmap[] = {
- /* better than switch case as long as return value is continuous */
- 0, /* SCMI_SUCCESS */
- -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
- -EINVAL, /* SCMI_ERR_PARAM */
- -EACCES, /* SCMI_ERR_ACCESS */
- -ENOENT, /* SCMI_ERR_ENTRY */
- -ERANGE, /* SCMI_ERR_RANGE */
- -EBUSY, /* SCMI_ERR_BUSY */
- -ECOMM, /* SCMI_ERR_COMMS */
- -EIO, /* SCMI_ERR_GENERIC */
- -EREMOTEIO, /* SCMI_ERR_HARDWARE */
- -EPROTO, /* SCMI_ERR_PROTOCOL */
-};
+static const struct scmi_protocol *scmi_protocol_get(int protocol_id)
+{
+ const struct scmi_protocol *proto;
+
+ proto = idr_find(&scmi_protocols, protocol_id);
+ if (!proto || !try_module_get(proto->owner)) {
+ pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
+ return NULL;
+ }
+
+ pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
+
+ return proto;
+}
+
+static void scmi_protocol_put(int protocol_id)
+{
+ const struct scmi_protocol *proto;
+
+ proto = idr_find(&scmi_protocols, protocol_id);
+ if (proto)
+ module_put(proto->owner);
+}
+
+int scmi_protocol_register(const struct scmi_protocol *proto)
+{
+ int ret;
+
+ if (!proto) {
+ pr_err("invalid protocol\n");
+ return -EINVAL;
+ }
+
+ if (!proto->instance_init) {
+ pr_err("missing init for protocol 0x%x\n", proto->id);
+ return -EINVAL;
+ }
+
+ spin_lock(&protocol_lock);
+ ret = idr_alloc(&scmi_protocols, (void *)proto,
+ proto->id, proto->id + 1, GFP_ATOMIC);
+ spin_unlock(&protocol_lock);
+ if (ret != proto->id) {
+ pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
+ proto->id, ret);
+ return ret;
+ }
+
+ pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scmi_protocol_register);
+
+void scmi_protocol_unregister(const struct scmi_protocol *proto)
+{
+ spin_lock(&protocol_lock);
+ idr_remove(&scmi_protocols, proto->id);
+ spin_unlock(&protocol_lock);
-static inline int scmi_to_linux_errno(int errno)
+ pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
+}
+EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
+
+/**
+ * scmi_create_protocol_devices - Create devices for all pending requests for
+ * this SCMI instance.
+ *
+ * @np: The device node describing the protocol
+ * @info: The SCMI instance descriptor
+ * @prot_id: The protocol ID
+ * @name: The optional name of the device to be created: if not provided this
+ * call will lead to the creation of all the devices currently requested
+ * for the specified protocol.
+ */
+static void scmi_create_protocol_devices(struct device_node *np,
+ struct scmi_info *info,
+ int prot_id, const char *name)
{
- int err_idx = -errno;
+ struct scmi_device *sdev;
- if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
- return scmi_linux_errmap[err_idx];
- return -EIO;
+ mutex_lock(&info->devreq_mtx);
+ sdev = scmi_device_create(np, info->dev, prot_id, name);
+ if (name && !sdev)
+ dev_err(info->dev,
+ "failed to create device for protocol 0x%X (%s)\n",
+ prot_id, name);
+ mutex_unlock(&info->devreq_mtx);
+}
+
+static void scmi_destroy_protocol_devices(struct scmi_info *info,
+ int prot_id, const char *name)
+{
+ mutex_lock(&info->devreq_mtx);
+ scmi_device_destroy(info->dev, prot_id, name);
+ mutex_unlock(&info->devreq_mtx);
}
void scmi_notification_instance_data_set(const struct scmi_handle *handle,
@@ -311,8 +400,6 @@ static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
if (xfer_id != next_token)
atomic_add((int)(xfer_id - next_token), &transfer_last_id);
- /* Set in-flight */
- set_bit(xfer_id, minfo->xfer_alloc_table);
xfer->hdr.seq = (u16)xfer_id;
return 0;
@@ -331,32 +418,123 @@ static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
}
/**
+ * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
+ *
+ * @xfer: The xfer to register
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Note that this helper assumes that the xfer to be registered as in-flight
+ * had been built using an xfer sequence number which still corresponds to a
+ * free slot in the xfer_alloc_table.
+ *
+ * Context: Assumes to be called with @xfer_lock already acquired.
+ */
+static inline void
+scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
+ struct scmi_xfers_info *minfo)
+{
+ /* Set in-flight */
+ set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
+ hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
+ xfer->pending = true;
+}
+
+/**
+ * scmi_xfer_inflight_register - Try to register an xfer as in-flight
+ *
+ * @xfer: The xfer to register
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Note that this helper does NOT assume anything about the sequence number
+ * that was baked into the provided xfer, so it checks at first if it can
+ * be mapped to a free slot and fails with an error if another xfer with the
+ * same sequence number is currently still registered as in-flight.
+ *
+ * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
+ * could not rbe mapped to a free slot in the xfer_alloc_table.
+ */
+static int scmi_xfer_inflight_register(struct scmi_xfer *xfer,
+ struct scmi_xfers_info *minfo)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table))
+ scmi_xfer_inflight_register_unlocked(xfer, minfo);
+ else
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ return ret;
+}
+
+/**
+ * scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
+ * flight on the TX channel, if possible.
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: The xfer to register
+ *
+ * Return: 0 on Success, error otherwise
+ */
+int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ return scmi_xfer_inflight_register(xfer, &info->tx_minfo);
+}
+
+/**
+ * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
+ * as pending in-flight
+ *
+ * @xfer: The xfer to act upon
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Return: 0 on Success or error otherwise
+ */
+static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer,
+ struct scmi_xfers_info *minfo)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ /* Set a new monotonic token as the xfer sequence number */
+ ret = scmi_xfer_token_set(minfo, xfer);
+ if (!ret)
+ scmi_xfer_inflight_register_unlocked(xfer, minfo);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ return ret;
+}
+
+/**
* scmi_xfer_get() - Allocate one message
*
* @handle: Pointer to SCMI entity handle
* @minfo: Pointer to Tx/Rx Message management info based on channel type
- * @set_pending: If true a monotonic token is picked and the xfer is added to
- * the pending hash table.
*
* Helper function which is used by various message functions that are
* exposed to clients of this driver for allocating a message traffic event.
*
- * Picks an xfer from the free list @free_xfers (if any available) and, if
- * required, sets a monotonically increasing token and stores the inflight xfer
- * into the @pending_xfers hashtable for later retrieval.
+ * Picks an xfer from the free list @free_xfers (if any available) and perform
+ * a basic initialization.
+ *
+ * Note that, at this point, still no sequence number is assigned to the
+ * allocated xfer, nor it is registered as a pending transaction.
*
* The successfully initialized xfer is refcounted.
*
- * Context: Holds @xfer_lock while manipulating @xfer_alloc_table and
- * @free_xfers.
+ * Context: Holds @xfer_lock while manipulating @free_xfers.
*
- * Return: 0 if all went fine, else corresponding error.
+ * Return: An initialized xfer if all went fine, else pointer error.
*/
static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
- struct scmi_xfers_info *minfo,
- bool set_pending)
+ struct scmi_xfers_info *minfo)
{
- int ret;
unsigned long flags;
struct scmi_xfer *xfer;
@@ -376,31 +554,71 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
*/
xfer->transfer_id = atomic_inc_return(&transfer_last_id);
- if (set_pending) {
- /* Pick and set monotonic token */
- ret = scmi_xfer_token_set(minfo, xfer);
- if (!ret) {
- hash_add(minfo->pending_xfers, &xfer->node,
- xfer->hdr.seq);
- xfer->pending = true;
- } else {
- dev_err(handle->dev,
- "Failed to get monotonic token %d\n", ret);
- hlist_add_head(&xfer->node, &minfo->free_xfers);
- xfer = ERR_PTR(ret);
- }
- }
-
- if (!IS_ERR(xfer)) {
- refcount_set(&xfer->users, 1);
- atomic_set(&xfer->busy, SCMI_XFER_FREE);
- }
+ refcount_set(&xfer->users, 1);
+ atomic_set(&xfer->busy, SCMI_XFER_FREE);
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
return xfer;
}
/**
+ * scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
+ *
+ * @handle: Pointer to SCMI entity handle
+ *
+ * Note that xfer is taken from the TX channel structures.
+ *
+ * Return: A valid xfer on Success, or an error-pointer otherwise
+ */
+struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle)
+{
+ struct scmi_xfer *xfer;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ xfer = scmi_xfer_get(handle, &info->tx_minfo);
+ if (!IS_ERR(xfer))
+ xfer->flags |= SCMI_XFER_FLAG_IS_RAW;
+
+ return xfer;
+}
+
+/**
+ * scmi_xfer_raw_channel_get - Helper to get a reference to the proper channel
+ * to use for a specific protocol_id Raw transaction.
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @protocol_id: Identifier of the protocol
+ *
+ * Note that in a regular SCMI stack, usually, a protocol has to be defined in
+ * the DT to have an associated channel and be usable; but in Raw mode any
+ * protocol in range is allowed, re-using the Base channel, so as to enable
+ * fuzzing on any protocol without the need of a fully compiled DT.
+ *
+ * Return: A reference to the channel to use, or an ERR_PTR
+ */
+struct scmi_chan_info *
+scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id)
+{
+ struct scmi_chan_info *cinfo;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ cinfo = idr_find(&info->tx_idr, protocol_id);
+ if (!cinfo) {
+ if (protocol_id == SCMI_PROTOCOL_BASE)
+ return ERR_PTR(-EINVAL);
+ /* Use Base channel for protocols not defined for DT */
+ cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
+ if (!cinfo)
+ return ERR_PTR(-EINVAL);
+ dev_warn_once(handle->dev,
+ "Using Base channel for protocol 0x%X\n",
+ protocol_id);
+ }
+
+ return cinfo;
+}
+
+/**
* __scmi_xfer_put() - Release a message
*
* @minfo: Pointer to Tx/Rx Message management info based on channel type
@@ -429,6 +647,24 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
}
/**
+ * scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: A reference to the xfer to put
+ *
+ * Note that as with other xfer_put() handlers the xfer is really effectively
+ * released only if there are no more users on the system.
+ */
+void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
+ xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
+ return __scmi_xfer_put(&info->tx_minfo, xfer);
+}
+
+/**
* scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
*
* @minfo: Pointer to Tx/Rx Message management info based on channel type
@@ -623,25 +859,6 @@ static inline void scmi_clear_channel(struct scmi_info *info,
info->desc->ops->clear_channel(cinfo);
}
-static inline bool is_polling_required(struct scmi_chan_info *cinfo,
- struct scmi_info *info)
-{
- return cinfo->no_completion_irq || info->desc->force_polling;
-}
-
-static inline bool is_transport_polling_capable(struct scmi_info *info)
-{
- return info->desc->ops->poll_done ||
- info->desc->sync_cmds_completed_on_ret;
-}
-
-static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
- struct scmi_info *info)
-{
- return is_polling_required(cinfo, info) &&
- is_transport_polling_capable(info);
-}
-
static void scmi_handle_notification(struct scmi_chan_info *cinfo,
u32 msg_hdr, void *priv)
{
@@ -652,7 +869,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
ktime_t ts;
ts = ktime_get_boottime();
- xfer = scmi_xfer_get(cinfo->handle, minfo, false);
+ xfer = scmi_xfer_get(cinfo->handle, minfo);
if (IS_ERR(xfer)) {
dev_err(dev, "failed to get free message slot (%ld)\n",
PTR_ERR(xfer));
@@ -667,9 +884,9 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
xfer);
- trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "NOTI",
- xfer->hdr.seq, xfer->hdr.status,
- xfer->rx.buf, xfer->rx.len);
+ trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
+ xfer->hdr.id, "NOTI", xfer->hdr.seq,
+ xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
@@ -678,6 +895,12 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
xfer->hdr.protocol_id, xfer->hdr.seq,
MSG_TYPE_NOTIFICATION);
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
+ scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE,
+ cinfo->id);
+ }
+
__scmi_xfer_put(minfo, xfer);
scmi_clear_channel(info, cinfo);
@@ -691,6 +914,9 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
if (IS_ERR(xfer)) {
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
+ scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv);
+
if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
scmi_clear_channel(info, cinfo);
return;
@@ -705,9 +931,11 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
smp_store_mb(xfer->priv, priv);
info->desc->ops->fetch_response(cinfo, xfer);
- trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
+ trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
+ xfer->hdr.id,
xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
- "DLYD" : "RESP",
+ (!SCMI_XFER_IS_RAW(xfer) ? "DLYD" : "dlyd") :
+ (!SCMI_XFER_IS_RAW(xfer) ? "RESP" : "resp"),
xfer->hdr.seq, xfer->hdr.status,
xfer->rx.buf, xfer->rx.len);
@@ -722,6 +950,18 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
complete(&xfer->done);
}
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ /*
+ * When in polling mode avoid to queue the Raw xfer on the IRQ
+ * RX path since it will be already queued at the end of the TX
+ * poll loop.
+ */
+ if (!xfer->hdr.poll_completion)
+ scmi_raw_message_report(info->raw, xfer,
+ SCMI_RAW_REPLY_QUEUE,
+ cinfo->id);
+ }
+
scmi_xfer_command_release(info, xfer);
}
@@ -785,36 +1025,18 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
ktime_after(ktime_get(), stop);
}
-/**
- * scmi_wait_for_message_response - An helper to group all the possible ways of
- * waiting for a synchronous message response.
- *
- * @cinfo: SCMI channel info
- * @xfer: Reference to the transfer being waited for.
- *
- * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
- * configuration flags like xfer->hdr.poll_completion.
- *
- * Return: 0 on Success, error otherwise.
- */
-static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
- struct scmi_xfer *xfer)
+static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
+ struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer, unsigned int timeout_ms)
{
- struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
- struct device *dev = info->dev;
- int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms;
-
- trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
- xfer->hdr.protocol_id, xfer->hdr.seq,
- timeout_ms,
- xfer->hdr.poll_completion);
+ int ret = 0;
if (xfer->hdr.poll_completion) {
/*
* Real polling is needed only if transport has NOT declared
* itself to support synchronous commands replies.
*/
- if (!info->desc->sync_cmds_completed_on_ret) {
+ if (!desc->sync_cmds_completed_on_ret) {
/*
* Poll on xfer using transport provided .poll_done();
* assumes no completion interrupt was available.
@@ -833,6 +1055,8 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
if (!ret) {
unsigned long flags;
+ struct scmi_info *info =
+ handle_to_scmi_info(cinfo->handle);
/*
* Do not fetch_response if an out-of-order delayed
@@ -840,16 +1064,27 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
*/
spin_lock_irqsave(&xfer->lock, flags);
if (xfer->state == SCMI_XFER_SENT_OK) {
- info->desc->ops->fetch_response(cinfo, xfer);
+ desc->ops->fetch_response(cinfo, xfer);
xfer->state = SCMI_XFER_RESP_OK;
}
spin_unlock_irqrestore(&xfer->lock, flags);
/* Trace polled replies. */
- trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
- "RESP",
+ trace_scmi_msg_dump(info->id, cinfo->id,
+ xfer->hdr.protocol_id, xfer->hdr.id,
+ !SCMI_XFER_IS_RAW(xfer) ?
+ "RESP" : "resp",
xfer->hdr.seq, xfer->hdr.status,
xfer->rx.buf, xfer->rx.len);
+
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ struct scmi_info *info =
+ handle_to_scmi_info(cinfo->handle);
+
+ scmi_raw_message_report(info->raw, xfer,
+ SCMI_RAW_REPLY_QUEUE,
+ cinfo->id);
+ }
}
} else {
/* And we wait for the response. */
@@ -865,6 +1100,59 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
}
/**
+ * scmi_wait_for_message_response - An helper to group all the possible ways of
+ * waiting for a synchronous message response.
+ *
+ * @cinfo: SCMI channel info
+ * @xfer: Reference to the transfer being waited for.
+ *
+ * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
+ * configuration flags like xfer->hdr.poll_completion.
+ *
+ * Return: 0 on Success, error otherwise.
+ */
+static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct device *dev = info->dev;
+
+ trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ info->desc->max_rx_timeout_ms,
+ xfer->hdr.poll_completion);
+
+ return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
+ info->desc->max_rx_timeout_ms);
+}
+
+/**
+ * scmi_xfer_raw_wait_for_message_response - An helper to wait for a message
+ * reply to an xfer raw request on a specific channel for the required timeout.
+ *
+ * @cinfo: SCMI channel info
+ * @xfer: Reference to the transfer being waited for.
+ * @timeout_ms: The maximum timeout in milliseconds
+ *
+ * Return: 0 on Success, error otherwise.
+ */
+int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer,
+ unsigned int timeout_ms)
+{
+ int ret;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct device *dev = info->dev;
+
+ ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms);
+ if (ret)
+ dev_dbg(dev, "timed out in RAW response - HDR:%08X\n",
+ pack_scmi_header(&xfer->hdr));
+
+ return ret;
+}
+
+/**
* do_xfer() - Do one transfer
*
* @ph: Pointer to SCMI protocol handle
@@ -884,7 +1172,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
struct scmi_chan_info *cinfo;
/* Check for polling request on custom command xfers at first */
- if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) {
+ if (xfer->hdr.poll_completion &&
+ !is_transport_polling_capable(info->desc)) {
dev_warn_once(dev,
"Polling mode is not supported by transport.\n");
return -EINVAL;
@@ -895,7 +1184,7 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
return -EINVAL;
/* True ONLY if also supported by transport. */
- if (is_polling_enabled(cinfo, info))
+ if (is_polling_enabled(cinfo, info->desc))
xfer->hdr.poll_completion = true;
/*
@@ -928,9 +1217,9 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
return ret;
}
- trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "CMND",
- xfer->hdr.seq, xfer->hdr.status,
- xfer->tx.buf, xfer->tx.len);
+ trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
+ xfer->hdr.id, "CMND", xfer->hdr.seq,
+ xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
ret = scmi_wait_for_message_response(cinfo, xfer);
if (!ret && xfer->hdr.status)
@@ -954,8 +1243,6 @@ static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
xfer->rx.len = info->desc->max_msg_size;
}
-#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
-
/**
* do_xfer_with_response() - Do one transfer and wait until the delayed
* response is received
@@ -1043,13 +1330,22 @@ static int xfer_get_init(const struct scmi_protocol_handle *ph,
tx_size > info->desc->max_msg_size)
return -ERANGE;
- xfer = scmi_xfer_get(pi->handle, minfo, true);
+ xfer = scmi_xfer_get(pi->handle, minfo);
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "failed to get free message slot(%d)\n", ret);
return ret;
}
+ /* Pick a sequence number and register this xfer as in-flight */
+ ret = scmi_xfer_pending_set(xfer, minfo);
+ if (ret) {
+ dev_err(pi->handle->dev,
+ "Failed to get monotonic token %d\n", ret);
+ __scmi_xfer_put(minfo, xfer);
+ return ret;
+ }
+
xfer->tx.len = tx_size;
xfer->rx.len = rx_size ? : info->desc->max_msg_size;
xfer->hdr.type = MSG_TYPE_COMMAND;
@@ -1822,20 +2118,14 @@ static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
bool ret;
struct scmi_info *info = handle_to_scmi_info(handle);
- ret = info->desc->atomic_enabled && is_transport_polling_capable(info);
+ ret = info->desc->atomic_enabled &&
+ is_transport_polling_capable(info->desc);
if (ret && atomic_threshold)
*atomic_threshold = info->atomic_threshold;
return ret;
}
-static inline
-struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
-{
- info->users++;
- return &info->handle;
-}
-
/**
* scmi_handle_get() - Get the SCMI handle for a device
*
@@ -1847,7 +2137,7 @@ struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
*
* Return: pointer to handle if successful, NULL on error
*/
-struct scmi_handle *scmi_handle_get(struct device *dev)
+static struct scmi_handle *scmi_handle_get(struct device *dev)
{
struct list_head *p;
struct scmi_info *info;
@@ -1857,7 +2147,8 @@ struct scmi_handle *scmi_handle_get(struct device *dev)
list_for_each(p, &scmi_list) {
info = list_entry(p, struct scmi_info, node);
if (dev->parent == info->dev) {
- handle = scmi_handle_get_from_info_unlocked(info);
+ info->users++;
+ handle = &info->handle;
break;
}
}
@@ -1878,7 +2169,7 @@ struct scmi_handle *scmi_handle_get(struct device *dev)
* Return: 0 is successfully released
* if null was passed, it returns -EINVAL;
*/
-int scmi_handle_put(const struct scmi_handle *handle)
+static int scmi_handle_put(const struct scmi_handle *handle)
{
struct scmi_info *info;
@@ -1894,6 +2185,23 @@ int scmi_handle_put(const struct scmi_handle *handle)
return 0;
}
+static void scmi_device_link_add(struct device *consumer,
+ struct device *supplier)
+{
+ struct device_link *link;
+
+ link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
+
+ WARN_ON(!link);
+}
+
+static void scmi_set_handle(struct scmi_device *scmi_dev)
+{
+ scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
+ if (scmi_dev->handle)
+ scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
+}
+
static int __scmi_xfer_info_init(struct scmi_info *sinfo,
struct scmi_xfers_info *info)
{
@@ -1987,23 +2295,20 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
return ret;
}
-static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
+static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
int prot_id, bool tx)
{
int ret, idx;
+ char name[32];
struct scmi_chan_info *cinfo;
struct idr *idr;
+ struct scmi_device *tdev = NULL;
/* Transmit channel is first entry i.e. index 0 */
idx = tx ? 0 : 1;
idr = tx ? &info->tx_idr : &info->rx_idr;
- /* check if already allocated, used for multiple device per protocol */
- cinfo = idr_find(idr, prot_id);
- if (cinfo)
- return 0;
-
- if (!info->desc->ops->chan_available(dev, idx)) {
+ if (!info->desc->ops->chan_available(of_node, idx)) {
cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
return -EINVAL;
@@ -2014,27 +2319,52 @@ static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
if (!cinfo)
return -ENOMEM;
- cinfo->dev = dev;
cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
+ /* Create a unique name for this transport device */
+ snprintf(name, 32, "__scmi_transport_device_%s_%02X",
+ idx ? "rx" : "tx", prot_id);
+ /* Create a uniquely named, dedicated transport device for this chan */
+ tdev = scmi_device_create(of_node, info->dev, prot_id, name);
+ if (!tdev) {
+ dev_err(info->dev,
+ "failed to create transport device (%s)\n", name);
+ devm_kfree(info->dev, cinfo);
+ return -EINVAL;
+ }
+ of_node_get(of_node);
+
+ cinfo->id = prot_id;
+ cinfo->dev = &tdev->dev;
ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
- if (ret)
+ if (ret) {
+ of_node_put(of_node);
+ scmi_device_destroy(info->dev, prot_id, name);
+ devm_kfree(info->dev, cinfo);
return ret;
+ }
- if (tx && is_polling_required(cinfo, info)) {
- if (is_transport_polling_capable(info))
- dev_info(dev,
+ if (tx && is_polling_required(cinfo, info->desc)) {
+ if (is_transport_polling_capable(info->desc))
+ dev_info(&tdev->dev,
"Enabled polling mode TX channel - prot_id:%d\n",
prot_id);
else
- dev_warn(dev,
+ dev_warn(&tdev->dev,
"Polling mode NOT supported by transport.\n");
}
idr_alloc:
ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
if (ret != prot_id) {
- dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
+ dev_err(info->dev,
+ "unable to allocate SCMI idr slot err %d\n", ret);
+ /* Destroy channel and device only if created by this call. */
+ if (tdev) {
+ of_node_put(of_node);
+ scmi_device_destroy(info->dev, prot_id, name);
+ devm_kfree(info->dev, cinfo);
+ }
return ret;
}
@@ -2043,13 +2373,14 @@ idr_alloc:
}
static inline int
-scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
+scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
+ int prot_id)
{
- int ret = scmi_chan_setup(info, dev, prot_id, true);
+ int ret = scmi_chan_setup(info, of_node, prot_id, true);
if (!ret) {
/* Rx is optional, report only memory errors */
- ret = scmi_chan_setup(info, dev, prot_id, false);
+ ret = scmi_chan_setup(info, of_node, prot_id, false);
if (ret && ret != -ENOMEM)
ret = 0;
}
@@ -2058,306 +2389,264 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
}
/**
- * scmi_get_protocol_device - Helper to get/create an SCMI device.
- *
- * @np: A device node representing a valid active protocols for the referred
- * SCMI instance.
- * @info: The referred SCMI instance for which we are getting/creating this
- * device.
- * @prot_id: The protocol ID.
- * @name: The device name.
- *
- * Referring to the specific SCMI instance identified by @info, this helper
- * takes care to return a properly initialized device matching the requested
- * @proto_id and @name: if device was still not existent it is created as a
- * child of the specified SCMI instance @info and its transport properly
- * initialized as usual.
- *
- * Return: A properly initialized scmi device, NULL otherwise.
+ * scmi_channels_setup - Helper to initialize all required channels
+ *
+ * @info: The SCMI instance descriptor.
+ *
+ * Initialize all the channels found described in the DT against the underlying
+ * configured transport using custom defined dedicated devices instead of
+ * borrowing devices from the SCMI drivers; this way channels are initialized
+ * upfront during core SCMI stack probing and are no more coupled with SCMI
+ * devices used by SCMI drivers.
+ *
+ * Note that, even though a pair of TX/RX channels is associated to each
+ * protocol defined in the DT, a distinct freshly initialized channel is
+ * created only if the DT node for the protocol at hand describes a dedicated
+ * channel: in all the other cases the common BASE protocol channel is reused.
+ *
+ * Return: 0 on Success
*/
-static inline struct scmi_device *
-scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
- int prot_id, const char *name)
+static int scmi_channels_setup(struct scmi_info *info)
{
- struct scmi_device *sdev;
+ int ret;
+ struct device_node *child, *top_np = info->dev->of_node;
- /* Already created for this parent SCMI instance ? */
- sdev = scmi_child_dev_find(info->dev, prot_id, name);
- if (sdev)
- return sdev;
+ /* Initialize a common generic channel at first */
+ ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE);
+ if (ret)
+ return ret;
- mutex_lock(&scmi_syspower_mtx);
- if (prot_id == SCMI_PROTOCOL_SYSTEM && scmi_syspower_registered) {
- dev_warn(info->dev,
- "SCMI SystemPower protocol device must be unique !\n");
- mutex_unlock(&scmi_syspower_mtx);
+ for_each_available_child_of_node(top_np, child) {
+ u32 prot_id;
- return NULL;
+ if (of_property_read_u32(child, "reg", &prot_id))
+ continue;
+
+ if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
+ dev_err(info->dev,
+ "Out of range protocol %d\n", prot_id);
+
+ ret = scmi_txrx_setup(info, child, prot_id);
+ if (ret) {
+ of_node_put(child);
+ return ret;
+ }
}
- pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
+ return 0;
+}
- sdev = scmi_device_create(np, info->dev, prot_id, name);
- if (!sdev) {
- dev_err(info->dev, "failed to create %d protocol device\n",
- prot_id);
- mutex_unlock(&scmi_syspower_mtx);
+static int scmi_chan_destroy(int id, void *p, void *idr)
+{
+ struct scmi_chan_info *cinfo = p;
- return NULL;
+ if (cinfo->dev) {
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_device *sdev = to_scmi_dev(cinfo->dev);
+
+ of_node_put(cinfo->dev->of_node);
+ scmi_device_destroy(info->dev, id, sdev->name);
+ cinfo->dev = NULL;
}
- if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
- dev_err(&sdev->dev, "failed to setup transport\n");
- scmi_device_destroy(sdev);
- mutex_unlock(&scmi_syspower_mtx);
+ idr_remove(idr, id);
- return NULL;
- }
+ return 0;
+}
- if (prot_id == SCMI_PROTOCOL_SYSTEM)
- scmi_syspower_registered = true;
+static void scmi_cleanup_channels(struct scmi_info *info, struct idr *idr)
+{
+ /* At first free all channels at the transport layer ... */
+ idr_for_each(idr, info->desc->ops->chan_free, idr);
- mutex_unlock(&scmi_syspower_mtx);
+ /* ...then destroy all underlying devices */
+ idr_for_each(idr, scmi_chan_destroy, idr);
- return sdev;
+ idr_destroy(idr);
}
-static inline void
-scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
- int prot_id, const char *name)
+static void scmi_cleanup_txrx_channels(struct scmi_info *info)
{
- struct scmi_device *sdev;
-
- sdev = scmi_get_protocol_device(np, info, prot_id, name);
- if (!sdev)
- return;
+ scmi_cleanup_channels(info, &info->tx_idr);
- /* setup handle now as the transport is ready */
- scmi_set_handle(sdev);
+ scmi_cleanup_channels(info, &info->rx_idr);
}
-/**
- * scmi_create_protocol_devices - Create devices for all pending requests for
- * this SCMI instance.
- *
- * @np: The device node describing the protocol
- * @info: The SCMI instance descriptor
- * @prot_id: The protocol ID
- *
- * All devices previously requested for this instance (if any) are found and
- * created by scanning the proper @&scmi_requested_devices entry.
- */
-static void scmi_create_protocol_devices(struct device_node *np,
- struct scmi_info *info, int prot_id)
+static int scmi_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
{
- struct list_head *phead;
+ struct scmi_info *info = bus_nb_to_scmi_info(nb);
+ struct scmi_device *sdev = to_scmi_dev(data);
- mutex_lock(&scmi_requested_devices_mtx);
- phead = idr_find(&scmi_requested_devices, prot_id);
- if (phead) {
- struct scmi_requested_dev *rdev;
+ /* Skip transport devices and devices of different SCMI instances */
+ if (!strncmp(sdev->name, "__scmi_transport_device", 23) ||
+ sdev->dev.parent != info->dev)
+ return NOTIFY_DONE;
- list_for_each_entry(rdev, phead, node)
- scmi_create_protocol_device(np, info, prot_id,
- rdev->id_table->name);
+ switch (action) {
+ case BUS_NOTIFY_BIND_DRIVER:
+ /* setup handle now as the transport is ready */
+ scmi_set_handle(sdev);
+ break;
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ scmi_handle_put(sdev->handle);
+ sdev->handle = NULL;
+ break;
+ default:
+ return NOTIFY_DONE;
}
- mutex_unlock(&scmi_requested_devices_mtx);
+
+ dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev),
+ sdev->name, action == BUS_NOTIFY_BIND_DRIVER ?
+ "about to be BOUND." : "UNBOUND.");
+
+ return NOTIFY_OK;
}
-/**
- * scmi_protocol_device_request - Helper to request a device
- *
- * @id_table: A protocol/name pair descriptor for the device to be created.
- *
- * This helper let an SCMI driver request specific devices identified by the
- * @id_table to be created for each active SCMI instance.
- *
- * The requested device name MUST NOT be already existent for any protocol;
- * at first the freshly requested @id_table is annotated in the IDR table
- * @scmi_requested_devices, then a matching device is created for each already
- * active SCMI instance. (if any)
- *
- * This way the requested device is created straight-away for all the already
- * initialized(probed) SCMI instances (handles) and it remains also annotated
- * as pending creation if the requesting SCMI driver was loaded before some
- * SCMI instance and related transports were available: when such late instance
- * is probed, its probe will take care to scan the list of pending requested
- * devices and create those on its own (see @scmi_create_protocol_devices and
- * its enclosing loop)
- *
- * Return: 0 on Success
- */
-int scmi_protocol_device_request(const struct scmi_device_id *id_table)
+static int scmi_device_request_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
{
- int ret = 0;
- unsigned int id = 0;
- struct list_head *head, *phead = NULL;
- struct scmi_requested_dev *rdev;
- struct scmi_info *info;
+ struct device_node *np;
+ struct scmi_device_id *id_table = data;
+ struct scmi_info *info = req_nb_to_scmi_info(nb);
- pr_debug("Requesting SCMI device (%s) for protocol %x\n",
- id_table->name, id_table->protocol_id);
+ np = idr_find(&info->active_protocols, id_table->protocol_id);
+ if (!np)
+ return NOTIFY_DONE;
- /*
- * Search for the matching protocol rdev list and then search
- * of any existent equally named device...fails if any duplicate found.
- */
- mutex_lock(&scmi_requested_devices_mtx);
- idr_for_each_entry(&scmi_requested_devices, head, id) {
- if (!phead) {
- /* A list found registered in the IDR is never empty */
- rdev = list_first_entry(head, struct scmi_requested_dev,
- node);
- if (rdev->id_table->protocol_id ==
- id_table->protocol_id)
- phead = head;
- }
- list_for_each_entry(rdev, head, node) {
- if (!strcmp(rdev->id_table->name, id_table->name)) {
- pr_err("Ignoring duplicate request [%d] %s\n",
- rdev->id_table->protocol_id,
- rdev->id_table->name);
- ret = -EINVAL;
- goto out;
- }
- }
- }
+ dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n",
+ action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-",
+ id_table->name, id_table->protocol_id);
- /*
- * No duplicate found for requested id_table, so let's create a new
- * requested device entry for this new valid request.
- */
- rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
- if (!rdev) {
- ret = -ENOMEM;
- goto out;
+ switch (action) {
+ case SCMI_BUS_NOTIFY_DEVICE_REQUEST:
+ scmi_create_protocol_devices(np, info, id_table->protocol_id,
+ id_table->name);
+ break;
+ case SCMI_BUS_NOTIFY_DEVICE_UNREQUEST:
+ scmi_destroy_protocol_devices(info, id_table->protocol_id,
+ id_table->name);
+ break;
+ default:
+ return NOTIFY_DONE;
}
- rdev->id_table = id_table;
- /*
- * Append the new requested device table descriptor to the head of the
- * related protocol list, eventually creating such head if not already
- * there.
- */
- if (!phead) {
- phead = kzalloc(sizeof(*phead), GFP_KERNEL);
- if (!phead) {
- kfree(rdev);
- ret = -ENOMEM;
- goto out;
- }
- INIT_LIST_HEAD(phead);
-
- ret = idr_alloc(&scmi_requested_devices, (void *)phead,
- id_table->protocol_id,
- id_table->protocol_id + 1, GFP_KERNEL);
- if (ret != id_table->protocol_id) {
- pr_err("Failed to save SCMI device - ret:%d\n", ret);
- kfree(rdev);
- kfree(phead);
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- }
- list_add(&rdev->node, phead);
+ return NOTIFY_OK;
+}
- /*
- * Now effectively create and initialize the requested device for every
- * already initialized SCMI instance which has registered the requested
- * protocol as a valid active one: i.e. defined in DT and supported by
- * current platform FW.
- */
- mutex_lock(&scmi_list_mutex);
- list_for_each_entry(info, &scmi_list, node) {
- struct device_node *child;
-
- child = idr_find(&info->active_protocols,
- id_table->protocol_id);
- if (child) {
- struct scmi_device *sdev;
-
- sdev = scmi_get_protocol_device(child, info,
- id_table->protocol_id,
- id_table->name);
- if (sdev) {
- /* Set handle if not already set: device existed */
- if (!sdev->handle)
- sdev->handle =
- scmi_handle_get_from_info_unlocked(info);
- /* Relink consumer and suppliers */
- if (sdev->handle)
- scmi_device_link_add(&sdev->dev,
- sdev->handle->dev);
- }
- } else {
- dev_err(info->dev,
- "Failed. SCMI protocol %d not active.\n",
- id_table->protocol_id);
- }
- }
- mutex_unlock(&scmi_list_mutex);
+static void scmi_debugfs_common_cleanup(void *d)
+{
+ struct scmi_debug_info *dbg = d;
-out:
- mutex_unlock(&scmi_requested_devices_mtx);
+ if (!dbg)
+ return;
- return ret;
+ debugfs_remove_recursive(dbg->top_dentry);
+ kfree(dbg->name);
+ kfree(dbg->type);
}
-/**
- * scmi_protocol_device_unrequest - Helper to unrequest a device
- *
- * @id_table: A protocol/name pair descriptor for the device to be unrequested.
- *
- * An helper to let an SCMI driver release its request about devices; note that
- * devices are created and initialized once the first SCMI driver request them
- * but they destroyed only on SCMI core unloading/unbinding.
- *
- * The current SCMI transport layer uses such devices as internal references and
- * as such they could be shared as same transport between multiple drivers so
- * that cannot be safely destroyed till the whole SCMI stack is removed.
- * (unless adding further burden of refcounting.)
- */
-void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
+static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
{
- struct list_head *phead;
+ char top_dir[16];
+ struct dentry *trans, *top_dentry;
+ struct scmi_debug_info *dbg;
+ const char *c_ptr = NULL;
- pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
- id_table->name, id_table->protocol_id);
+ dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL);
+ if (!dbg)
+ return NULL;
- mutex_lock(&scmi_requested_devices_mtx);
- phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
- if (phead) {
- struct scmi_requested_dev *victim, *tmp;
+ dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL);
+ if (!dbg->name) {
+ devm_kfree(info->dev, dbg);
+ return NULL;
+ }
- list_for_each_entry_safe(victim, tmp, phead, node) {
- if (!strcmp(victim->id_table->name, id_table->name)) {
- list_del(&victim->node);
- kfree(victim);
- break;
- }
- }
+ of_property_read_string(info->dev->of_node, "compatible", &c_ptr);
+ dbg->type = kstrdup(c_ptr, GFP_KERNEL);
+ if (!dbg->type) {
+ kfree(dbg->name);
+ devm_kfree(info->dev, dbg);
+ return NULL;
+ }
- if (list_empty(phead)) {
- idr_remove(&scmi_requested_devices,
- id_table->protocol_id);
- kfree(phead);
- }
+ snprintf(top_dir, 16, "%d", info->id);
+ top_dentry = debugfs_create_dir(top_dir, scmi_top_dentry);
+ trans = debugfs_create_dir("transport", top_dentry);
+
+ dbg->is_atomic = info->desc->atomic_enabled &&
+ is_transport_polling_capable(info->desc);
+
+ debugfs_create_str("instance_name", 0400, top_dentry,
+ (char **)&dbg->name);
+
+ debugfs_create_u32("atomic_threshold_us", 0400, top_dentry,
+ &info->atomic_threshold);
+
+ debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
+
+ debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic);
+
+ debugfs_create_u32("max_rx_timeout_ms", 0400, trans,
+ (u32 *)&info->desc->max_rx_timeout_ms);
+
+ debugfs_create_u32("max_msg_size", 0400, trans,
+ (u32 *)&info->desc->max_msg_size);
+
+ debugfs_create_u32("tx_max_msg", 0400, trans,
+ (u32 *)&info->tx_minfo.max_msg);
+
+ debugfs_create_u32("rx_max_msg", 0400, trans,
+ (u32 *)&info->rx_minfo.max_msg);
+
+ dbg->top_dentry = top_dentry;
+
+ if (devm_add_action_or_reset(info->dev,
+ scmi_debugfs_common_cleanup, dbg)) {
+ scmi_debugfs_common_cleanup(dbg);
+ return NULL;
}
- mutex_unlock(&scmi_requested_devices_mtx);
+
+ return dbg;
}
-static int scmi_cleanup_txrx_channels(struct scmi_info *info)
+static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
{
- int ret;
- struct idr *idr = &info->tx_idr;
+ int id, num_chans = 0, ret = 0;
+ struct scmi_chan_info *cinfo;
+ u8 channels[SCMI_MAX_CHANNELS] = {};
+ DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
+
+ if (!info->dbg)
+ return -EINVAL;
+
+ /* Enumerate all channels to collect their ids */
+ idr_for_each_entry(&info->tx_idr, cinfo, id) {
+ /*
+ * Cannot happen, but be defensive.
+ * Zero as num_chans is ok, warn and carry on.
+ */
+ if (num_chans >= SCMI_MAX_CHANNELS || !cinfo) {
+ dev_warn(info->dev,
+ "SCMI RAW - Error enumerating channels\n");
+ break;
+ }
- ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
- idr_destroy(&info->tx_idr);
+ if (!test_bit(cinfo->id, protos)) {
+ channels[num_chans++] = cinfo->id;
+ set_bit(cinfo->id, protos);
+ }
+ }
- idr = &info->rx_idr;
- ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
- idr_destroy(&info->rx_idr);
+ info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry,
+ info->id, channels, num_chans,
+ info->desc, info->tx_minfo.max_msg);
+ if (IS_ERR(info->raw)) {
+ dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n");
+ ret = PTR_ERR(info->raw);
+ info->raw = NULL;
+ }
return ret;
}
@@ -2379,12 +2668,19 @@ static int scmi_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
+ info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL);
+ if (info->id < 0)
+ return info->id;
+
info->dev = dev;
info->desc = desc;
+ info->bus_nb.notifier_call = scmi_bus_notifier;
+ info->dev_req_nb.notifier_call = scmi_device_request_notifier;
INIT_LIST_HEAD(&info->node);
idr_init(&info->protocols);
mutex_init(&info->protocols_mtx);
idr_init(&info->active_protocols);
+ mutex_init(&info->devreq_mtx);
platform_set_drvdata(pdev, info);
idr_init(&info->tx_idr);
@@ -2408,21 +2704,55 @@ static int scmi_probe(struct platform_device *pdev)
if (desc->ops->link_supplier) {
ret = desc->ops->link_supplier(dev);
if (ret)
- return ret;
+ goto clear_ida;
}
- ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
+ /* Setup all channels described in the DT at first */
+ ret = scmi_channels_setup(info);
if (ret)
- return ret;
+ goto clear_ida;
- ret = scmi_xfer_info_init(info);
+ ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb);
if (ret)
goto clear_txrx_setup;
+ ret = blocking_notifier_chain_register(&scmi_requested_devices_nh,
+ &info->dev_req_nb);
+ if (ret)
+ goto clear_bus_notifier;
+
+ ret = scmi_xfer_info_init(info);
+ if (ret)
+ goto clear_dev_req_notifier;
+
+ if (scmi_top_dentry) {
+ info->dbg = scmi_debugfs_common_setup(info);
+ if (!info->dbg)
+ dev_warn(dev, "Failed to setup SCMI debugfs.\n");
+
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ bool coex =
+ IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
+
+ ret = scmi_debugfs_raw_mode_setup(info);
+ if (!coex) {
+ if (ret)
+ goto clear_dev_req_notifier;
+
+ /* Bail out anyway when coex enabled */
+ return ret;
+ }
+
+ /* Coex enabled, carry on in any case. */
+ dev_info(dev, "SCMI RAW Mode COEX enabled !\n");
+ }
+ }
+
if (scmi_notification_init(handle))
dev_err(dev, "SCMI Notifications NOT available.\n");
- if (info->desc->atomic_enabled && !is_transport_polling_capable(info))
+ if (info->desc->atomic_enabled &&
+ !is_transport_polling_capable(info->desc))
dev_err(dev,
"Transport is not polling capable. Atomic mode not supported.\n");
@@ -2469,29 +2799,36 @@ static int scmi_probe(struct platform_device *pdev)
}
of_node_get(child);
- scmi_create_protocol_devices(child, info, prot_id);
+ scmi_create_protocol_devices(child, info, prot_id, NULL);
}
return 0;
notification_exit:
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
+ scmi_raw_mode_cleanup(info->raw);
scmi_notification_exit(&info->handle);
+clear_dev_req_notifier:
+ blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
+ &info->dev_req_nb);
+clear_bus_notifier:
+ bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
clear_txrx_setup:
scmi_cleanup_txrx_channels(info);
+clear_ida:
+ ida_free(&scmi_id, info->id);
return ret;
}
-void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
-{
- idr_remove(idr, id);
-}
-
static int scmi_remove(struct platform_device *pdev)
{
- int ret, id;
+ int id;
struct scmi_info *info = platform_get_drvdata(pdev);
struct device_node *child;
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
+ scmi_raw_mode_cleanup(info->raw);
+
mutex_lock(&scmi_list_mutex);
if (info->users)
dev_warn(&pdev->dev,
@@ -2509,10 +2846,14 @@ static int scmi_remove(struct platform_device *pdev)
of_node_put(child);
idr_destroy(&info->active_protocols);
+ blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
+ &info->dev_req_nb);
+ bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
+
/* Safe to free channels since no more users */
- ret = scmi_cleanup_txrx_channels(info);
- if (ret)
- dev_warn(&pdev->dev, "Failed to cleanup SCMI channels.\n");
+ scmi_cleanup_txrx_channels(info);
+
+ ida_free(&scmi_id, info->id);
return 0;
}
@@ -2641,6 +2982,19 @@ static void __exit scmi_transports_exit(void)
__scmi_transports_setup(false);
}
+static struct dentry *scmi_debugfs_init(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_dir("scmi", NULL);
+ if (IS_ERR(d)) {
+ pr_err("Could NOT create SCMI top dentry.\n");
+ return NULL;
+ }
+
+ return d;
+}
+
static int __init scmi_driver_init(void)
{
int ret;
@@ -2649,13 +3003,14 @@ static int __init scmi_driver_init(void)
if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
return -EINVAL;
- scmi_bus_init();
-
/* Initialize any compiled-in transport which provided an init/exit */
ret = scmi_transports_init();
if (ret)
return ret;
+ if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS))
+ scmi_top_dentry = scmi_debugfs_init();
+
scmi_base_register();
scmi_clock_register();
@@ -2669,7 +3024,7 @@ static int __init scmi_driver_init(void)
return platform_driver_register(&scmi_driver);
}
-subsys_initcall(scmi_driver_init);
+module_init(scmi_driver_init);
static void __exit scmi_driver_exit(void)
{
@@ -2684,11 +3039,11 @@ static void __exit scmi_driver_exit(void)
scmi_system_unregister();
scmi_powercap_unregister();
- scmi_bus_exit();
-
scmi_transports_exit();
platform_driver_unregister(&scmi_driver);
+
+ debugfs_remove_recursive(scmi_top_dentry);
}
module_exit(scmi_driver_exit);
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index 1e40cb035044..0d9c9538b7f4 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -46,9 +46,9 @@ static void rx_callback(struct mbox_client *cl, void *m)
scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
}
-static bool mailbox_chan_available(struct device *dev, int idx)
+static bool mailbox_chan_available(struct device_node *of_node, int idx)
{
- return !of_parse_phandle_with_args(dev->of_node, "mboxes",
+ return !of_parse_phandle_with_args(of_node, "mboxes",
"#mbox-cells", idx, NULL);
}
@@ -120,8 +120,6 @@ static int mailbox_chan_free(int id, void *p, void *data)
smbox->cinfo = NULL;
}
- scmi_free_channel(cinfo, data, id);
-
return 0;
}
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
index 2a7aeab40e54..929720387102 100644
--- a/drivers/firmware/arm_scmi/optee.c
+++ b/drivers/firmware/arm_scmi/optee.c
@@ -328,11 +328,11 @@ static int scmi_optee_link_supplier(struct device *dev)
return 0;
}
-static bool scmi_optee_chan_available(struct device *dev, int idx)
+static bool scmi_optee_chan_available(struct device_node *of_node, int idx)
{
u32 channel_id;
- return !of_property_read_u32_index(dev->of_node, "linaro,optee-channel-id",
+ return !of_property_read_u32_index(of_node, "linaro,optee-channel-id",
idx, &channel_id);
}
@@ -481,8 +481,6 @@ static int scmi_optee_chan_free(int id, void *p, void *data)
cinfo->transport_info = NULL;
channel->cinfo = NULL;
- scmi_free_channel(cinfo, data, id);
-
return 0;
}
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
index 2f3bf691db7c..78e1a01eb656 100644
--- a/drivers/firmware/arm_scmi/protocols.h
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -115,6 +115,7 @@ struct scmi_msg_hdr {
* - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ]
* - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK
* (Missing synchronous response is assumed OK and ignored)
+ * @flags: Optional flags associated to this xfer.
* @lock: A spinlock to protect state and busy fields.
* @priv: A pointer for transport private usage.
*/
@@ -135,6 +136,12 @@ struct scmi_xfer {
#define SCMI_XFER_RESP_OK 1
#define SCMI_XFER_DRESP_OK 2
int state;
+#define SCMI_XFER_FLAG_IS_RAW BIT(0)
+#define SCMI_XFER_IS_RAW(x) ((x)->flags & SCMI_XFER_FLAG_IS_RAW)
+#define SCMI_XFER_FLAG_CHAN_SET BIT(1)
+#define SCMI_XFER_IS_CHAN_SET(x) \
+ ((x)->flags & SCMI_XFER_FLAG_CHAN_SET)
+ int flags;
/* A lock to protect state and busy fields */
spinlock_t lock;
void *priv;
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
new file mode 100644
index 000000000000..d40df099fd51
--- /dev/null
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -0,0 +1,1443 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Raw mode support
+ *
+ * Copyright (C) 2022 ARM Ltd.
+ */
+/**
+ * DOC: Theory of operation
+ *
+ * When enabled the SCMI Raw mode support exposes a userspace API which allows
+ * to send and receive SCMI commands, replies and notifications from a user
+ * application through injection and snooping of bare SCMI messages in binary
+ * little-endian format.
+ *
+ * Such injected SCMI transactions will then be routed through the SCMI core
+ * stack towards the SCMI backend server using whatever SCMI transport is
+ * currently configured on the system under test.
+ *
+ * It is meant to help in running any sort of SCMI backend server testing, no
+ * matter where the server is placed, as long as it is normally reachable via
+ * the transport configured on the system.
+ *
+ * It is activated by a Kernel configuration option since it is NOT meant to
+ * be used in production but only during development and in CI deployments.
+ *
+ * In order to avoid possible interferences between the SCMI Raw transactions
+ * originated from a test-suite and the normal operations of the SCMI drivers,
+ * when Raw mode is enabled, by default, all the regular SCMI drivers are
+ * inhibited, unless CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX is enabled: in this
+ * latter case the regular SCMI stack drivers will be loaded as usual and it is
+ * up to the user of this interface to take care of manually inhibiting the
+ * regular SCMI drivers in order to avoid interferences during the test runs.
+ *
+ * The exposed API is as follows.
+ *
+ * All SCMI Raw entries are rooted under a common top /raw debugfs top directory
+ * which in turn is rooted under the corresponding underlying SCMI instance.
+ *
+ * /sys/kernel/debug/scmi/
+ * `-- 0
+ * |-- atomic_threshold_us
+ * |-- instance_name
+ * |-- raw
+ * | |-- channels
+ * | | |-- 0x10
+ * | | | |-- message
+ * | | | `-- message_async
+ * | | `-- 0x13
+ * | | |-- message
+ * | | `-- message_async
+ * | |-- errors
+ * | |-- message
+ * | |-- message_async
+ * | |-- notification
+ * | `-- reset
+ * `-- transport
+ * |-- is_atomic
+ * |-- max_msg_size
+ * |-- max_rx_timeout_ms
+ * |-- rx_max_msg
+ * |-- tx_max_msg
+ * `-- type
+ *
+ * where:
+ *
+ * - errors: used to read back timed-out and unexpected replies
+ * - message*: used to send sync/async commands and read back immediate and
+ * delayed reponses (if any)
+ * - notification: used to read any notification being emitted by the system
+ * (if previously enabled by the user app)
+ * - reset: used to flush the queues of messages (of any kind) still pending
+ * to be read; this is useful at test-suite start/stop to get
+ * rid of any unread messages from the previous run.
+ *
+ * with the per-channel entries rooted at /channels being present only on a
+ * system where multiple transport channels have been configured.
+ *
+ * Such per-channel entries can be used to explicitly choose a specific channel
+ * for SCMI bare message injection, in contrast with the general entries above
+ * where, instead, the selection of the proper channel to use is automatically
+ * performed based the protocol embedded in the injected message and on how the
+ * transport is configured on the system.
+ *
+ * Note that other common general entries are available under transport/ to let
+ * the user applications properly make up their expectations in terms of
+ * timeouts and message characteristics.
+ *
+ * Each write to the message* entries causes one command request to be built
+ * and sent while the replies or delayed response are read back from those same
+ * entries one message at time (receiving an EOF at each message boundary).
+ *
+ * The user application running the test is in charge of handling timeouts
+ * on replies and properly choosing SCMI sequence numbers for the outgoing
+ * requests (using the same sequence number is supported but discouraged).
+ *
+ * Injection of multiple in-flight requests is supported as long as the user
+ * application uses properly distinct sequence numbers for concurrent requests
+ * and takes care to properly manage all the related issues about concurrency
+ * and command/reply pairing. Keep in mind that, anyway, the real level of
+ * parallelism attainable in such scenario is dependent on the characteristics
+ * of the underlying transport being used.
+ *
+ * Since the SCMI core regular stack is partially used to deliver and collect
+ * the messages, late replies arrived after timeouts and any other sort of
+ * unexpected message can be identified by the SCMI core as usual and they will
+ * be reported as messages under "errors" for later analysis.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+
+#include "common.h"
+
+#include "raw_mode.h"
+
+#include <trace/events/scmi.h>
+
+#define SCMI_XFER_RAW_MAX_RETRIES 10
+
+/**
+ * struct scmi_raw_queue - Generic Raw queue descriptor
+ *
+ * @free_bufs: A freelists listhead used to keep unused raw buffers
+ * @free_bufs_lock: Spinlock used to protect access to @free_bufs
+ * @msg_q: A listhead to a queue of snooped messages waiting to be read out
+ * @msg_q_lock: Spinlock used to protect access to @msg_q
+ * @wq: A waitqueue used to wait and poll on related @msg_q
+ */
+struct scmi_raw_queue {
+ struct list_head free_bufs;
+ /* Protect free_bufs[] lists */
+ spinlock_t free_bufs_lock;
+ struct list_head msg_q;
+ /* Protect msg_q[] lists */
+ spinlock_t msg_q_lock;
+ wait_queue_head_t wq;
+};
+
+/**
+ * struct scmi_raw_mode_info - Structure holding SCMI Raw instance data
+ *
+ * @id: Sequential Raw instance ID.
+ * @handle: Pointer to SCMI entity handle to use
+ * @desc: Pointer to the transport descriptor to use
+ * @tx_max_msg: Maximum number of concurrent TX in-flight messages
+ * @q: An array of Raw queue descriptors
+ * @chans_q: An XArray mapping optional additional per-channel queues
+ * @free_waiters: Head of freelist for unused waiters
+ * @free_mtx: A mutex to protect the waiters freelist
+ * @active_waiters: Head of list for currently active and used waiters
+ * @active_mtx: A mutex to protect the active waiters list
+ * @waiters_work: A work descriptor to be used with the workqueue machinery
+ * @wait_wq: A workqueue reference to the created workqueue
+ * @dentry: Top debugfs root dentry for SCMI Raw
+ * @gid: A group ID used for devres accounting
+ *
+ * Note that this descriptor is passed back to the core after SCMI Raw is
+ * initialized as an opaque handle to use by subsequent SCMI Raw call hooks.
+ *
+ */
+struct scmi_raw_mode_info {
+ unsigned int id;
+ const struct scmi_handle *handle;
+ const struct scmi_desc *desc;
+ int tx_max_msg;
+ struct scmi_raw_queue *q[SCMI_RAW_MAX_QUEUE];
+ struct xarray chans_q;
+ struct list_head free_waiters;
+ /* Protect free_waiters list */
+ struct mutex free_mtx;
+ struct list_head active_waiters;
+ /* Protect active_waiters list */
+ struct mutex active_mtx;
+ struct work_struct waiters_work;
+ struct workqueue_struct *wait_wq;
+ struct dentry *dentry;
+ void *gid;
+};
+
+/**
+ * struct scmi_xfer_raw_waiter - Structure to describe an xfer to be waited for
+ *
+ * @start_jiffies: The timestamp in jiffies of when this structure was queued.
+ * @cinfo: A reference to the channel to use for this transaction
+ * @xfer: A reference to the xfer to be waited for
+ * @async_response: A completion to be, optionally, used for async waits: it
+ * will be setup by @scmi_do_xfer_raw_start, if needed, to be
+ * pointed at by xfer->async_done.
+ * @node: A list node.
+ */
+struct scmi_xfer_raw_waiter {
+ unsigned long start_jiffies;
+ struct scmi_chan_info *cinfo;
+ struct scmi_xfer *xfer;
+ struct completion async_response;
+ struct list_head node;
+};
+
+/**
+ * struct scmi_raw_buffer - Structure to hold a full SCMI message
+ *
+ * @max_len: The maximum allowed message size (header included) that can be
+ * stored into @msg
+ * @msg: A message buffer used to collect a full message grabbed from an xfer.
+ * @node: A list node.
+ */
+struct scmi_raw_buffer {
+ size_t max_len;
+ struct scmi_msg msg;
+ struct list_head node;
+};
+
+/**
+ * struct scmi_dbg_raw_data - Structure holding data needed by the debugfs
+ * layer
+ *
+ * @chan_id: The preferred channel to use: if zero the channel is automatically
+ * selected based on protocol.
+ * @raw: A reference to the Raw instance.
+ * @tx: A message buffer used to collect TX message on write.
+ * @tx_size: The effective size of the TX message.
+ * @tx_req_size: The final expected size of the complete TX message.
+ * @rx: A message buffer to collect RX message on read.
+ * @rx_size: The effective size of the RX message.
+ */
+struct scmi_dbg_raw_data {
+ u8 chan_id;
+ struct scmi_raw_mode_info *raw;
+ struct scmi_msg tx;
+ size_t tx_size;
+ size_t tx_req_size;
+ struct scmi_msg rx;
+ size_t rx_size;
+};
+
+static struct scmi_raw_queue *
+scmi_raw_queue_select(struct scmi_raw_mode_info *raw, unsigned int idx,
+ unsigned int chan_id)
+{
+ if (!chan_id)
+ return raw->q[idx];
+
+ return xa_load(&raw->chans_q, chan_id);
+}
+
+static struct scmi_raw_buffer *scmi_raw_buffer_get(struct scmi_raw_queue *q)
+{
+ unsigned long flags;
+ struct scmi_raw_buffer *rb = NULL;
+ struct list_head *head = &q->free_bufs;
+
+ spin_lock_irqsave(&q->free_bufs_lock, flags);
+ if (!list_empty(head)) {
+ rb = list_first_entry(head, struct scmi_raw_buffer, node);
+ list_del_init(&rb->node);
+ }
+ spin_unlock_irqrestore(&q->free_bufs_lock, flags);
+
+ return rb;
+}
+
+static void scmi_raw_buffer_put(struct scmi_raw_queue *q,
+ struct scmi_raw_buffer *rb)
+{
+ unsigned long flags;
+
+ /* Reset to full buffer length */
+ rb->msg.len = rb->max_len;
+
+ spin_lock_irqsave(&q->free_bufs_lock, flags);
+ list_add_tail(&rb->node, &q->free_bufs);
+ spin_unlock_irqrestore(&q->free_bufs_lock, flags);
+}
+
+static void scmi_raw_buffer_enqueue(struct scmi_raw_queue *q,
+ struct scmi_raw_buffer *rb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ list_add_tail(&rb->node, &q->msg_q);
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ wake_up_interruptible(&q->wq);
+}
+
+static struct scmi_raw_buffer*
+scmi_raw_buffer_dequeue_unlocked(struct scmi_raw_queue *q)
+{
+ struct scmi_raw_buffer *rb = NULL;
+
+ if (!list_empty(&q->msg_q)) {
+ rb = list_first_entry(&q->msg_q, struct scmi_raw_buffer, node);
+ list_del_init(&rb->node);
+ }
+
+ return rb;
+}
+
+static struct scmi_raw_buffer *scmi_raw_buffer_dequeue(struct scmi_raw_queue *q)
+{
+ unsigned long flags;
+ struct scmi_raw_buffer *rb;
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ rb = scmi_raw_buffer_dequeue_unlocked(q);
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ return rb;
+}
+
+static void scmi_raw_buffer_queue_flush(struct scmi_raw_queue *q)
+{
+ struct scmi_raw_buffer *rb;
+
+ do {
+ rb = scmi_raw_buffer_dequeue(q);
+ if (rb)
+ scmi_raw_buffer_put(q, rb);
+ } while (rb);
+}
+
+static struct scmi_xfer_raw_waiter *
+scmi_xfer_raw_waiter_get(struct scmi_raw_mode_info *raw, struct scmi_xfer *xfer,
+ struct scmi_chan_info *cinfo, bool async)
+{
+ struct scmi_xfer_raw_waiter *rw = NULL;
+
+ mutex_lock(&raw->free_mtx);
+ if (!list_empty(&raw->free_waiters)) {
+ rw = list_first_entry(&raw->free_waiters,
+ struct scmi_xfer_raw_waiter, node);
+ list_del_init(&rw->node);
+
+ if (async) {
+ reinit_completion(&rw->async_response);
+ xfer->async_done = &rw->async_response;
+ }
+
+ rw->cinfo = cinfo;
+ rw->xfer = xfer;
+ }
+ mutex_unlock(&raw->free_mtx);
+
+ return rw;
+}
+
+static void scmi_xfer_raw_waiter_put(struct scmi_raw_mode_info *raw,
+ struct scmi_xfer_raw_waiter *rw)
+{
+ if (rw->xfer) {
+ rw->xfer->async_done = NULL;
+ rw->xfer = NULL;
+ }
+
+ mutex_lock(&raw->free_mtx);
+ list_add_tail(&rw->node, &raw->free_waiters);
+ mutex_unlock(&raw->free_mtx);
+}
+
+static void scmi_xfer_raw_waiter_enqueue(struct scmi_raw_mode_info *raw,
+ struct scmi_xfer_raw_waiter *rw)
+{
+ /* A timestamp for the deferred worker to know how much this has aged */
+ rw->start_jiffies = jiffies;
+
+ trace_scmi_xfer_response_wait(rw->xfer->transfer_id, rw->xfer->hdr.id,
+ rw->xfer->hdr.protocol_id,
+ rw->xfer->hdr.seq,
+ raw->desc->max_rx_timeout_ms,
+ rw->xfer->hdr.poll_completion);
+
+ mutex_lock(&raw->active_mtx);
+ list_add_tail(&rw->node, &raw->active_waiters);
+ mutex_unlock(&raw->active_mtx);
+
+ /* kick waiter work */
+ queue_work(raw->wait_wq, &raw->waiters_work);
+}
+
+static struct scmi_xfer_raw_waiter *
+scmi_xfer_raw_waiter_dequeue(struct scmi_raw_mode_info *raw)
+{
+ struct scmi_xfer_raw_waiter *rw = NULL;
+
+ mutex_lock(&raw->active_mtx);
+ if (!list_empty(&raw->active_waiters)) {
+ rw = list_first_entry(&raw->active_waiters,
+ struct scmi_xfer_raw_waiter, node);
+ list_del_init(&rw->node);
+ }
+ mutex_unlock(&raw->active_mtx);
+
+ return rw;
+}
+
+/**
+ * scmi_xfer_raw_worker - Work function to wait for Raw xfers completions
+ *
+ * @work: A reference to the work.
+ *
+ * In SCMI Raw mode, once a user-provided injected SCMI message is sent, we
+ * cannot wait to receive its response (if any) in the context of the injection
+ * routines so as not to leave the userspace write syscall, which delivered the
+ * SCMI message to send, pending till eventually a reply is received.
+ * Userspace should and will poll/wait instead on the read syscalls which will
+ * be in charge of reading a received reply (if any).
+ *
+ * Even though reply messages are collected and reported into the SCMI Raw layer
+ * on the RX path, nonetheless we have to properly wait for their completion as
+ * usual (and async_completion too if needed) in order to properly release the
+ * xfer structure at the end: to do this out of the context of the write/send
+ * these waiting jobs are delegated to this deferred worker.
+ *
+ * Any sent xfer, to be waited for, is timestamped and queued for later
+ * consumption by this worker: queue aging is accounted for while choosing a
+ * timeout for the completion, BUT we do not really care here if we end up
+ * accidentally waiting for a bit too long.
+ */
+static void scmi_xfer_raw_worker(struct work_struct *work)
+{
+ struct scmi_raw_mode_info *raw;
+ struct device *dev;
+ unsigned long max_tmo;
+
+ raw = container_of(work, struct scmi_raw_mode_info, waiters_work);
+ dev = raw->handle->dev;
+ max_tmo = msecs_to_jiffies(raw->desc->max_rx_timeout_ms);
+
+ do {
+ int ret = 0;
+ unsigned int timeout_ms;
+ unsigned long aging;
+ struct scmi_xfer *xfer;
+ struct scmi_xfer_raw_waiter *rw;
+ struct scmi_chan_info *cinfo;
+
+ rw = scmi_xfer_raw_waiter_dequeue(raw);
+ if (!rw)
+ return;
+
+ cinfo = rw->cinfo;
+ xfer = rw->xfer;
+ /*
+ * Waiters are queued by wait-deadline at the end, so some of
+ * them could have been already expired when processed, BUT we
+ * have to check the completion status anyway just in case a
+ * virtually expired (aged) transaction was indeed completed
+ * fine and we'll have to wait for the asynchronous part (if
+ * any): for this reason a 1 ms timeout is used for already
+ * expired/aged xfers.
+ */
+ aging = jiffies - rw->start_jiffies;
+ timeout_ms = max_tmo > aging ?
+ jiffies_to_msecs(max_tmo - aging) : 1;
+
+ ret = scmi_xfer_raw_wait_for_message_response(cinfo, xfer,
+ timeout_ms);
+ if (!ret && xfer->hdr.status)
+ ret = scmi_to_linux_errno(xfer->hdr.status);
+
+ if (raw->desc->ops->mark_txdone)
+ raw->desc->ops->mark_txdone(rw->cinfo, ret, xfer);
+
+ trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq, ret);
+
+ /* Wait also for an async delayed response if needed */
+ if (!ret && xfer->async_done) {
+ unsigned long tmo = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
+
+ if (!wait_for_completion_timeout(xfer->async_done, tmo))
+ dev_err(dev,
+ "timed out in RAW delayed resp - HDR:%08X\n",
+ pack_scmi_header(&xfer->hdr));
+ }
+
+ /* Release waiter and xfer */
+ scmi_xfer_raw_put(raw->handle, xfer);
+ scmi_xfer_raw_waiter_put(raw, rw);
+ } while (1);
+}
+
+static void scmi_xfer_raw_reset(struct scmi_raw_mode_info *raw)
+{
+ int i;
+
+ dev_info(raw->handle->dev, "Resetting SCMI Raw stack.\n");
+
+ for (i = 0; i < SCMI_RAW_MAX_QUEUE; i++)
+ scmi_raw_buffer_queue_flush(raw->q[i]);
+}
+
+/**
+ * scmi_xfer_raw_get_init - An helper to build a valid xfer from the provided
+ * bare SCMI message.
+ *
+ * @raw: A reference to the Raw instance.
+ * @buf: A buffer containing the whole SCMI message to send (including the
+ * header) in little-endian binary formmat.
+ * @len: Length of the message in @buf.
+ * @p: A pointer to return the initialized Raw xfer.
+ *
+ * After an xfer is picked from the TX pool and filled in with the message
+ * content, the xfer is registered as pending with the core in the usual way
+ * using the original sequence number provided by the user with the message.
+ *
+ * Note that, in case the testing user application is NOT using distinct
+ * sequence-numbers between successive SCMI messages such registration could
+ * fail temporarily if the previous message, using the same sequence number,
+ * had still not released; in such a case we just wait and retry.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_xfer_raw_get_init(struct scmi_raw_mode_info *raw, void *buf,
+ size_t len, struct scmi_xfer **p)
+{
+ u32 msg_hdr;
+ size_t tx_size;
+ struct scmi_xfer *xfer;
+ int ret, retry = SCMI_XFER_RAW_MAX_RETRIES;
+ struct device *dev = raw->handle->dev;
+
+ if (!buf || len < sizeof(u32))
+ return -EINVAL;
+
+ tx_size = len - sizeof(u32);
+ /* Ensure we have sane transfer sizes */
+ if (tx_size > raw->desc->max_msg_size)
+ return -ERANGE;
+
+ xfer = scmi_xfer_raw_get(raw->handle);
+ if (IS_ERR(xfer)) {
+ dev_warn(dev, "RAW - Cannot get a free RAW xfer !\n");
+ return PTR_ERR(xfer);
+ }
+
+ /* Build xfer from the provided SCMI bare LE message */
+ msg_hdr = le32_to_cpu(*((__le32 *)buf));
+ unpack_scmi_header(msg_hdr, &xfer->hdr);
+ xfer->hdr.seq = (u16)MSG_XTRACT_TOKEN(msg_hdr);
+ /* Polling not supported */
+ xfer->hdr.poll_completion = false;
+ xfer->hdr.status = SCMI_SUCCESS;
+ xfer->tx.len = tx_size;
+ xfer->rx.len = raw->desc->max_msg_size;
+ /* Clear the whole TX buffer */
+ memset(xfer->tx.buf, 0x00, raw->desc->max_msg_size);
+ if (xfer->tx.len)
+ memcpy(xfer->tx.buf, (u8 *)buf + sizeof(msg_hdr), xfer->tx.len);
+ *p = xfer;
+
+ /*
+ * In flight registration can temporarily fail in case of Raw messages
+ * if the user injects messages without using monotonically increasing
+ * sequence numbers since, in Raw mode, the xfer (and the token) is
+ * finally released later by a deferred worker. Just retry for a while.
+ */
+ do {
+ ret = scmi_xfer_raw_inflight_register(raw->handle, xfer);
+ if (ret) {
+ dev_dbg(dev,
+ "...retrying[%d] inflight registration\n",
+ retry);
+ msleep(raw->desc->max_rx_timeout_ms /
+ SCMI_XFER_RAW_MAX_RETRIES);
+ }
+ } while (ret && --retry);
+
+ if (ret) {
+ dev_warn(dev,
+ "RAW - Could NOT register xfer %d in-flight HDR:0x%08X\n",
+ xfer->hdr.seq, msg_hdr);
+ scmi_xfer_raw_put(raw->handle, xfer);
+ }
+
+ return ret;
+}
+
+/**
+ * scmi_do_xfer_raw_start - An helper to send a valid raw xfer
+ *
+ * @raw: A reference to the Raw instance.
+ * @xfer: The xfer to send
+ * @chan_id: The channel ID to use, if zero the channels is automatically
+ * selected based on the protocol used.
+ * @async: A flag stating if an asynchronous command is required.
+ *
+ * This function send a previously built raw xfer using an appropriate channel
+ * and queues the related waiting work.
+ *
+ * Note that we need to know explicitly if the required command is meant to be
+ * asynchronous in kind since we have to properly setup the waiter.
+ * (and deducing this from the payload is weak and do not scale given there is
+ * NOT a common header-flag stating if the command is asynchronous or not)
+ *
+ * Return: 0 on Success
+ */
+static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw,
+ struct scmi_xfer *xfer, u8 chan_id,
+ bool async)
+{
+ int ret;
+ struct scmi_chan_info *cinfo;
+ struct scmi_xfer_raw_waiter *rw;
+ struct device *dev = raw->handle->dev;
+
+ if (!chan_id)
+ chan_id = xfer->hdr.protocol_id;
+ else
+ xfer->flags |= SCMI_XFER_FLAG_CHAN_SET;
+
+ cinfo = scmi_xfer_raw_channel_get(raw->handle, chan_id);
+ if (IS_ERR(cinfo))
+ return PTR_ERR(cinfo);
+
+ rw = scmi_xfer_raw_waiter_get(raw, xfer, cinfo, async);
+ if (!rw) {
+ dev_warn(dev, "RAW - Cannot get a free waiter !\n");
+ return -ENOMEM;
+ }
+
+ /* True ONLY if also supported by transport. */
+ if (is_polling_enabled(cinfo, raw->desc))
+ xfer->hdr.poll_completion = true;
+
+ reinit_completion(&xfer->done);
+ /* Make sure xfer state update is visible before sending */
+ smp_store_mb(xfer->state, SCMI_XFER_SENT_OK);
+
+ trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ xfer->hdr.poll_completion);
+
+ ret = raw->desc->ops->send_message(rw->cinfo, xfer);
+ if (ret) {
+ dev_err(dev, "Failed to send RAW message %d\n", ret);
+ scmi_xfer_raw_waiter_put(raw, rw);
+ return ret;
+ }
+
+ trace_scmi_msg_dump(raw->id, cinfo->id, xfer->hdr.protocol_id,
+ xfer->hdr.id, "cmnd", xfer->hdr.seq,
+ xfer->hdr.status,
+ xfer->tx.buf, xfer->tx.len);
+
+ scmi_xfer_raw_waiter_enqueue(raw, rw);
+
+ return ret;
+}
+
+/**
+ * scmi_raw_message_send - An helper to build and send an SCMI command using
+ * the provided SCMI bare message buffer
+ *
+ * @raw: A reference to the Raw instance.
+ * @buf: A buffer containing the whole SCMI message to send (including the
+ * header) in little-endian binary format.
+ * @len: Length of the message in @buf.
+ * @chan_id: The channel ID to use.
+ * @async: A flag stating if an asynchronous command is required.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
+ void *buf, size_t len, u8 chan_id, bool async)
+{
+ int ret;
+ struct scmi_xfer *xfer;
+
+ ret = scmi_xfer_raw_get_init(raw, buf, len, &xfer);
+ if (ret)
+ return ret;
+
+ ret = scmi_do_xfer_raw_start(raw, xfer, chan_id, async);
+ if (ret)
+ scmi_xfer_raw_put(raw->handle, xfer);
+
+ return ret;
+}
+
+static struct scmi_raw_buffer *
+scmi_raw_message_dequeue(struct scmi_raw_queue *q, bool o_nonblock)
+{
+ unsigned long flags;
+ struct scmi_raw_buffer *rb;
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ while (list_empty(&q->msg_q)) {
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ if (o_nonblock)
+ return ERR_PTR(-EAGAIN);
+
+ if (wait_event_interruptible(q->wq, !list_empty(&q->msg_q)))
+ return ERR_PTR(-ERESTARTSYS);
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ }
+
+ rb = scmi_raw_buffer_dequeue_unlocked(q);
+
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ return rb;
+}
+
+/**
+ * scmi_raw_message_receive - An helper to dequeue and report the next
+ * available enqueued raw message payload that has been collected.
+ *
+ * @raw: A reference to the Raw instance.
+ * @buf: A buffer to get hold of the whole SCMI message received and represented
+ * in little-endian binary format.
+ * @len: Length of @buf.
+ * @size: The effective size of the message copied into @buf
+ * @idx: The index of the queue to pick the next queued message from.
+ * @chan_id: The channel ID to use.
+ * @o_nonblock: A flag to request a non-blocking message dequeue.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_raw_message_receive(struct scmi_raw_mode_info *raw,
+ void *buf, size_t len, size_t *size,
+ unsigned int idx, unsigned int chan_id,
+ bool o_nonblock)
+{
+ int ret = 0;
+ struct scmi_raw_buffer *rb;
+ struct scmi_raw_queue *q;
+
+ q = scmi_raw_queue_select(raw, idx, chan_id);
+ if (!q)
+ return -ENODEV;
+
+ rb = scmi_raw_message_dequeue(q, o_nonblock);
+ if (IS_ERR(rb)) {
+ dev_dbg(raw->handle->dev, "RAW - No message available!\n");
+ return PTR_ERR(rb);
+ }
+
+ if (rb->msg.len <= len) {
+ memcpy(buf, rb->msg.buf, rb->msg.len);
+ *size = rb->msg.len;
+ } else {
+ ret = -ENOSPC;
+ }
+
+ scmi_raw_buffer_put(q, rb);
+
+ return ret;
+}
+
+/* SCMI Raw debugfs helpers */
+
+static ssize_t scmi_dbg_raw_mode_common_read(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos,
+ unsigned int idx)
+{
+ ssize_t cnt;
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+
+ if (!rd->rx_size) {
+ int ret;
+
+ ret = scmi_raw_message_receive(rd->raw, rd->rx.buf, rd->rx.len,
+ &rd->rx_size, idx, rd->chan_id,
+ filp->f_flags & O_NONBLOCK);
+ if (ret) {
+ rd->rx_size = 0;
+ return ret;
+ }
+
+ /* Reset any previous filepos change, including writes */
+ *ppos = 0;
+ } else if (*ppos == rd->rx_size) {
+ /* Return EOF once all the message has been read-out */
+ rd->rx_size = 0;
+ return 0;
+ }
+
+ cnt = simple_read_from_buffer(buf, count, ppos,
+ rd->rx.buf, rd->rx_size);
+
+ return cnt;
+}
+
+static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos,
+ bool async)
+{
+ int ret;
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+
+ if (count > rd->tx.len - rd->tx_size)
+ return -ENOSPC;
+
+ /* On first write attempt @count carries the total full message size. */
+ if (!rd->tx_size)
+ rd->tx_req_size = count;
+
+ /*
+ * Gather a full message, possibly across multiple interrupted wrrtes,
+ * before sending it with a single RAW xfer.
+ */
+ if (rd->tx_size < rd->tx_req_size) {
+ size_t cnt;
+
+ cnt = simple_write_to_buffer(rd->tx.buf, rd->tx.len, ppos,
+ buf, count);
+ rd->tx_size += cnt;
+ if (cnt < count)
+ return cnt;
+ }
+
+ ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size,
+ rd->chan_id, async);
+
+ /* Reset ppos for next message ... */
+ rd->tx_size = 0;
+ *ppos = 0;
+
+ return ret ?: count;
+}
+
+static __poll_t scmi_test_dbg_raw_common_poll(struct file *filp,
+ struct poll_table_struct *wait,
+ unsigned int idx)
+{
+ unsigned long flags;
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+ struct scmi_raw_queue *q;
+ __poll_t mask = 0;
+
+ q = scmi_raw_queue_select(rd->raw, idx, rd->chan_id);
+ if (!q)
+ return mask;
+
+ poll_wait(filp, &q->wq, wait);
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ if (!list_empty(&q->msg_q))
+ mask = EPOLLIN | EPOLLRDNORM;
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ return mask;
+}
+
+static ssize_t scmi_dbg_raw_mode_message_read(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
+ SCMI_RAW_REPLY_QUEUE);
+}
+
+static ssize_t scmi_dbg_raw_mode_message_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, false);
+}
+
+static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_REPLY_QUEUE);
+}
+
+static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
+{
+ u8 id;
+ struct scmi_raw_mode_info *raw;
+ struct scmi_dbg_raw_data *rd;
+ const char *id_str = filp->f_path.dentry->d_parent->d_name.name;
+
+ if (!inode->i_private)
+ return -ENODEV;
+
+ raw = inode->i_private;
+ rd = kzalloc(sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+
+ rd->rx.len = raw->desc->max_msg_size + sizeof(u32);
+ rd->rx.buf = kzalloc(rd->rx.len, GFP_KERNEL);
+ if (!rd->rx.buf) {
+ kfree(rd);
+ return -ENOMEM;
+ }
+
+ rd->tx.len = raw->desc->max_msg_size + sizeof(u32);
+ rd->tx.buf = kzalloc(rd->tx.len, GFP_KERNEL);
+ if (!rd->tx.buf) {
+ kfree(rd->rx.buf);
+ kfree(rd);
+ return -ENOMEM;
+ }
+
+ /* Grab channel ID from debugfs entry naming if any */
+ if (!kstrtou8(id_str, 16, &id))
+ rd->chan_id = id;
+
+ rd->raw = raw;
+ filp->private_data = rd;
+
+ return 0;
+}
+
+static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp)
+{
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+
+ kfree(rd->rx.buf);
+ kfree(rd->tx.buf);
+ kfree(rd);
+
+ return 0;
+}
+
+static ssize_t scmi_dbg_raw_mode_reset_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+
+ scmi_xfer_raw_reset(rd->raw);
+
+ return count;
+}
+
+static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .write = scmi_dbg_raw_mode_reset_write,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations scmi_dbg_raw_mode_message_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_dbg_raw_mode_message_async_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, true);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_async_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_test_dbg_raw_mode_notif_read(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
+ SCMI_RAW_NOTIF_QUEUE);
+}
+
+static __poll_t
+scmi_test_dbg_raw_mode_notif_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_NOTIF_QUEUE);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_test_dbg_raw_mode_notif_read,
+ .poll = scmi_test_dbg_raw_mode_notif_poll,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_test_dbg_raw_mode_errors_read(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
+ SCMI_RAW_ERRS_QUEUE);
+}
+
+static __poll_t
+scmi_test_dbg_raw_mode_errors_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_ERRS_QUEUE);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_test_dbg_raw_mode_errors_read,
+ .poll = scmi_test_dbg_raw_mode_errors_poll,
+ .owner = THIS_MODULE,
+};
+
+static struct scmi_raw_queue *
+scmi_raw_queue_init(struct scmi_raw_mode_info *raw)
+{
+ int i;
+ struct scmi_raw_buffer *rb;
+ struct device *dev = raw->handle->dev;
+ struct scmi_raw_queue *q;
+
+ q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return ERR_PTR(-ENOMEM);
+
+ rb = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rb), GFP_KERNEL);
+ if (!rb)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&q->free_bufs_lock);
+ INIT_LIST_HEAD(&q->free_bufs);
+ for (i = 0; i < raw->tx_max_msg; i++, rb++) {
+ rb->max_len = raw->desc->max_msg_size + sizeof(u32);
+ rb->msg.buf = devm_kzalloc(dev, rb->max_len, GFP_KERNEL);
+ if (!rb->msg.buf)
+ return ERR_PTR(-ENOMEM);
+ scmi_raw_buffer_put(q, rb);
+ }
+
+ spin_lock_init(&q->msg_q_lock);
+ INIT_LIST_HEAD(&q->msg_q);
+ init_waitqueue_head(&q->wq);
+
+ return q;
+}
+
+static int scmi_xfer_raw_worker_init(struct scmi_raw_mode_info *raw)
+{
+ int i;
+ struct scmi_xfer_raw_waiter *rw;
+ struct device *dev = raw->handle->dev;
+
+ rw = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rw), GFP_KERNEL);
+ if (!rw)
+ return -ENOMEM;
+
+ raw->wait_wq = alloc_workqueue("scmi-raw-wait-wq-%d",
+ WQ_UNBOUND | WQ_FREEZABLE |
+ WQ_HIGHPRI, WQ_SYSFS, raw->id);
+ if (!raw->wait_wq)
+ return -ENOMEM;
+
+ mutex_init(&raw->free_mtx);
+ INIT_LIST_HEAD(&raw->free_waiters);
+ mutex_init(&raw->active_mtx);
+ INIT_LIST_HEAD(&raw->active_waiters);
+
+ for (i = 0; i < raw->tx_max_msg; i++, rw++) {
+ init_completion(&rw->async_response);
+ scmi_xfer_raw_waiter_put(raw, rw);
+ }
+ INIT_WORK(&raw->waiters_work, scmi_xfer_raw_worker);
+
+ return 0;
+}
+
+static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
+ u8 *channels, int num_chans)
+{
+ int ret, idx;
+ void *gid;
+ struct device *dev = raw->handle->dev;
+
+ gid = devres_open_group(dev, NULL, GFP_KERNEL);
+ if (!gid)
+ return -ENOMEM;
+
+ for (idx = 0; idx < SCMI_RAW_MAX_QUEUE; idx++) {
+ raw->q[idx] = scmi_raw_queue_init(raw);
+ if (IS_ERR(raw->q[idx])) {
+ ret = PTR_ERR(raw->q[idx]);
+ goto err;
+ }
+ }
+
+ xa_init(&raw->chans_q);
+ if (num_chans > 1) {
+ int i;
+
+ for (i = 0; i < num_chans; i++) {
+ void *xret;
+ struct scmi_raw_queue *q;
+
+ q = scmi_raw_queue_init(raw);
+ if (IS_ERR(q)) {
+ ret = PTR_ERR(q);
+ goto err_xa;
+ }
+
+ xret = xa_store(&raw->chans_q, channels[i], q,
+ GFP_KERNEL);
+ if (xa_err(xret)) {
+ dev_err(dev,
+ "Fail to allocate Raw queue 0x%02X\n",
+ channels[i]);
+ ret = xa_err(xret);
+ goto err_xa;
+ }
+ }
+ }
+
+ ret = scmi_xfer_raw_worker_init(raw);
+ if (ret)
+ goto err_xa;
+
+ devres_close_group(dev, gid);
+ raw->gid = gid;
+
+ return 0;
+
+err_xa:
+ xa_destroy(&raw->chans_q);
+err:
+ devres_release_group(dev, gid);
+ return ret;
+}
+
+/**
+ * scmi_raw_mode_init - Function to initialize the SCMI Raw stack
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @top_dentry: A reference to the top Raw debugfs dentry
+ * @instance_id: The ID of the underlying SCMI platform instance represented by
+ * this Raw instance
+ * @channels: The list of the existing channels
+ * @num_chans: The number of entries in @channels
+ * @desc: Reference to the transport operations
+ * @tx_max_msg: Max number of in-flight messages allowed by the transport
+ *
+ * This function prepare the SCMI Raw stack and creates the debugfs API.
+ *
+ * Return: An opaque handle to the Raw instance on Success, an ERR_PTR otherwise
+ */
+void *scmi_raw_mode_init(const struct scmi_handle *handle,
+ struct dentry *top_dentry, int instance_id,
+ u8 *channels, int num_chans,
+ const struct scmi_desc *desc, int tx_max_msg)
+{
+ int ret;
+ struct scmi_raw_mode_info *raw;
+ struct device *dev;
+
+ if (!handle || !desc)
+ return ERR_PTR(-EINVAL);
+
+ dev = handle->dev;
+ raw = devm_kzalloc(dev, sizeof(*raw), GFP_KERNEL);
+ if (!raw)
+ return ERR_PTR(-ENOMEM);
+
+ raw->handle = handle;
+ raw->desc = desc;
+ raw->tx_max_msg = tx_max_msg;
+ raw->id = instance_id;
+
+ ret = scmi_raw_mode_setup(raw, channels, num_chans);
+ if (ret) {
+ devm_kfree(dev, raw);
+ return ERR_PTR(ret);
+ }
+
+ raw->dentry = debugfs_create_dir("raw", top_dentry);
+
+ debugfs_create_file("reset", 0200, raw->dentry, raw,
+ &scmi_dbg_raw_mode_reset_fops);
+
+ debugfs_create_file("message", 0600, raw->dentry, raw,
+ &scmi_dbg_raw_mode_message_fops);
+
+ debugfs_create_file("message_async", 0600, raw->dentry, raw,
+ &scmi_dbg_raw_mode_message_async_fops);
+
+ debugfs_create_file("notification", 0400, raw->dentry, raw,
+ &scmi_dbg_raw_mode_notification_fops);
+
+ debugfs_create_file("errors", 0400, raw->dentry, raw,
+ &scmi_dbg_raw_mode_errors_fops);
+
+ /*
+ * Expose per-channel entries if multiple channels available.
+ * Just ignore errors while setting up these interfaces since we
+ * have anyway already a working core Raw support.
+ */
+ if (num_chans > 1) {
+ int i;
+ struct dentry *top_chans;
+
+ top_chans = debugfs_create_dir("channels", raw->dentry);
+
+ for (i = 0; i < num_chans; i++) {
+ char cdir[8];
+ struct dentry *chd;
+
+ snprintf(cdir, 8, "0x%02X", channels[i]);
+ chd = debugfs_create_dir(cdir, top_chans);
+
+ debugfs_create_file("message", 0600, chd, raw,
+ &scmi_dbg_raw_mode_message_fops);
+
+ debugfs_create_file("message_async", 0600, chd, raw,
+ &scmi_dbg_raw_mode_message_async_fops);
+ }
+ }
+
+ dev_info(dev, "SCMI RAW Mode initialized for instance %d\n", raw->id);
+
+ return raw;
+}
+
+/**
+ * scmi_raw_mode_cleanup - Function to cleanup the SCMI Raw stack
+ *
+ * @r: An opaque handle to an initialized SCMI Raw instance
+ */
+void scmi_raw_mode_cleanup(void *r)
+{
+ struct scmi_raw_mode_info *raw = r;
+
+ if (!raw)
+ return;
+
+ debugfs_remove_recursive(raw->dentry);
+
+ cancel_work_sync(&raw->waiters_work);
+ destroy_workqueue(raw->wait_wq);
+ xa_destroy(&raw->chans_q);
+}
+
+static int scmi_xfer_raw_collect(void *msg, size_t *msg_len,
+ struct scmi_xfer *xfer)
+{
+ __le32 *m;
+ size_t msg_size;
+
+ if (!xfer || !msg || !msg_len)
+ return -EINVAL;
+
+ /* Account for hdr ...*/
+ msg_size = xfer->rx.len + sizeof(u32);
+ /* ... and status if needed */
+ if (xfer->hdr.type != MSG_TYPE_NOTIFICATION)
+ msg_size += sizeof(u32);
+
+ if (msg_size > *msg_len)
+ return -ENOSPC;
+
+ m = msg;
+ *m = cpu_to_le32(pack_scmi_header(&xfer->hdr));
+ if (xfer->hdr.type != MSG_TYPE_NOTIFICATION)
+ *++m = cpu_to_le32(xfer->hdr.status);
+
+ memcpy(++m, xfer->rx.buf, xfer->rx.len);
+
+ *msg_len = msg_size;
+
+ return 0;
+}
+
+/**
+ * scmi_raw_message_report - Helper to report back valid reponses/notifications
+ * to raw message requests.
+ *
+ * @r: An opaque reference to the raw instance configuration
+ * @xfer: The xfer containing the message to be reported
+ * @idx: The index of the queue.
+ * @chan_id: The channel ID to use.
+ *
+ * If Raw mode is enabled, this is called from the SCMI core on the regular RX
+ * path to save and enqueue the response/notification payload carried by this
+ * xfer into a dedicated scmi_raw_buffer for later consumption by the user.
+ *
+ * This way the caller can free the related xfer immediately afterwards and the
+ * user can read back the raw message payload at its own pace (if ever) without
+ * holding an xfer for too long.
+ */
+void scmi_raw_message_report(void *r, struct scmi_xfer *xfer,
+ unsigned int idx, unsigned int chan_id)
+{
+ int ret;
+ unsigned long flags;
+ struct scmi_raw_buffer *rb;
+ struct device *dev;
+ struct scmi_raw_queue *q;
+ struct scmi_raw_mode_info *raw = r;
+
+ if (!raw || (idx == SCMI_RAW_REPLY_QUEUE && !SCMI_XFER_IS_RAW(xfer)))
+ return;
+
+ dev = raw->handle->dev;
+ q = scmi_raw_queue_select(raw, idx,
+ SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0);
+
+ /*
+ * Grab the msg_q_lock upfront to avoid a possible race between
+ * realizing the free list was empty and effectively picking the next
+ * buffer to use from the oldest one enqueued and still unread on this
+ * msg_q.
+ *
+ * Note that nowhere else these locks are taken together, so no risk of
+ * deadlocks du eto inversion.
+ */
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ rb = scmi_raw_buffer_get(q);
+ if (!rb) {
+ /*
+ * Immediate and delayed replies to previously injected Raw
+ * commands MUST be read back from userspace to free the buffers:
+ * if this is not happening something is seriously broken and
+ * must be fixed at the application level: complain loudly.
+ */
+ if (idx == SCMI_RAW_REPLY_QUEUE) {
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+ dev_warn(dev,
+ "RAW[%d] - Buffers exhausted. Dropping report.\n",
+ idx);
+ return;
+ }
+
+ /*
+ * Notifications and errors queues are instead handled in a
+ * circular manner: unread old buffers are just overwritten by
+ * newer ones.
+ *
+ * The main reason for this is that notifications originated
+ * by Raw requests cannot be distinguished from normal ones, so
+ * your Raw buffers queues risk to be flooded and depleted by
+ * notifications if you left it mistakenly enabled or when in
+ * coexistence mode.
+ */
+ rb = scmi_raw_buffer_dequeue_unlocked(q);
+ if (WARN_ON(!rb)) {
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+ return;
+ }
+
+ /* Reset to full buffer length */
+ rb->msg.len = rb->max_len;
+
+ dev_warn_once(dev,
+ "RAW[%d] - Buffers exhausted. Re-using oldest.\n",
+ idx);
+ }
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ ret = scmi_xfer_raw_collect(rb->msg.buf, &rb->msg.len, xfer);
+ if (ret) {
+ dev_warn(dev, "RAW - Cannot collect xfer into buffer !\n");
+ scmi_raw_buffer_put(q, rb);
+ return;
+ }
+
+ scmi_raw_buffer_enqueue(q, rb);
+}
+
+static void scmi_xfer_raw_fill(struct scmi_raw_mode_info *raw,
+ struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer, u32 msg_hdr)
+{
+ /* Unpack received HDR as it is */
+ unpack_scmi_header(msg_hdr, &xfer->hdr);
+ xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
+
+ memset(xfer->rx.buf, 0x00, xfer->rx.len);
+
+ raw->desc->ops->fetch_response(cinfo, xfer);
+}
+
+/**
+ * scmi_raw_error_report - Helper to report back timed-out or generally
+ * unexpected replies.
+ *
+ * @r: An opaque reference to the raw instance configuration
+ * @cinfo: A reference to the channel to use to retrieve the broken xfer
+ * @msg_hdr: The SCMI message header of the message to fetch and report
+ * @priv: Any private data related to the xfer.
+ *
+ * If Raw mode is enabled, this is called from the SCMI core on the RX path in
+ * case of errors to save and enqueue the bad message payload carried by the
+ * message that has just been received.
+ *
+ * Note that we have to manually fetch any available payload into a temporary
+ * xfer to be able to save and enqueue the message, since the regular RX error
+ * path which had called this would have not fetched the message payload having
+ * classified it as an error.
+ */
+void scmi_raw_error_report(void *r, struct scmi_chan_info *cinfo,
+ u32 msg_hdr, void *priv)
+{
+ struct scmi_xfer xfer;
+ struct scmi_raw_mode_info *raw = r;
+
+ if (!raw)
+ return;
+
+ xfer.rx.len = raw->desc->max_msg_size;
+ xfer.rx.buf = kzalloc(xfer.rx.len, GFP_ATOMIC);
+ if (!xfer.rx.buf) {
+ dev_info(raw->handle->dev,
+ "Cannot report Raw error for HDR:0x%X - ENOMEM\n",
+ msg_hdr);
+ return;
+ }
+
+ /* Any transport-provided priv must be passed back down to transport */
+ if (priv)
+ /* Ensure priv is visible */
+ smp_store_mb(xfer.priv, priv);
+
+ scmi_xfer_raw_fill(raw, cinfo, &xfer, msg_hdr);
+ scmi_raw_message_report(raw, &xfer, SCMI_RAW_ERRS_QUEUE, 0);
+
+ kfree(xfer.rx.buf);
+}
diff --git a/drivers/firmware/arm_scmi/raw_mode.h b/drivers/firmware/arm_scmi/raw_mode.h
new file mode 100644
index 000000000000..8af756a83fd1
--- /dev/null
+++ b/drivers/firmware/arm_scmi/raw_mode.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol
+ * Raw mode support header.
+ *
+ * Copyright (C) 2022 ARM Ltd.
+ */
+#ifndef _SCMI_RAW_MODE_H
+#define _SCMI_RAW_MODE_H
+
+#include "common.h"
+
+enum {
+ SCMI_RAW_REPLY_QUEUE,
+ SCMI_RAW_NOTIF_QUEUE,
+ SCMI_RAW_ERRS_QUEUE,
+ SCMI_RAW_MAX_QUEUE
+};
+
+void *scmi_raw_mode_init(const struct scmi_handle *handle,
+ struct dentry *top_dentry, int instance_id,
+ u8 *channels, int num_chans,
+ const struct scmi_desc *desc, int tx_max_msg);
+void scmi_raw_mode_cleanup(void *raw);
+
+void scmi_raw_message_report(void *raw, struct scmi_xfer *xfer,
+ unsigned int idx, unsigned int chan_id);
+void scmi_raw_error_report(void *raw, struct scmi_chan_info *cinfo,
+ u32 msg_hdr, void *priv);
+
+#endif /* _SCMI_RAW_MODE_H */
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
index 87a7b13cf868..93272e4bbd12 100644
--- a/drivers/firmware/arm_scmi/smc.c
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -52,9 +52,9 @@ static irqreturn_t smc_msg_done_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static bool smc_chan_available(struct device *dev, int idx)
+static bool smc_chan_available(struct device_node *of_node, int idx)
{
- struct device_node *np = of_parse_phandle(dev->of_node, "shmem", 0);
+ struct device_node *np = of_parse_phandle(of_node, "shmem", 0);
if (!np)
return false;
@@ -171,8 +171,6 @@ static int smc_chan_free(int id, void *p, void *data)
cinfo->transport_info = NULL;
scmi_info->cinfo = NULL;
- scmi_free_channel(cinfo, data, id);
-
return 0;
}
diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
index 1db975c08896..d68c01cb7aa0 100644
--- a/drivers/firmware/arm_scmi/virtio.c
+++ b/drivers/firmware/arm_scmi/virtio.c
@@ -385,7 +385,7 @@ static int virtio_link_supplier(struct device *dev)
return 0;
}
-static bool virtio_chan_available(struct device *dev, int idx)
+static bool virtio_chan_available(struct device_node *of_node, int idx)
{
struct scmi_vio_channel *channels, *vioch = NULL;
@@ -489,8 +489,6 @@ static int virtio_chan_free(int id, void *p, void *data)
virtio_break_device(vioch->vqueue->vdev);
scmi_vio_channel_cleanup_sync(vioch);
- scmi_free_channel(cinfo, data, id);
-
return 0;
}
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
index 77aa5c6398aa..3f5ff9ed668e 100644
--- a/drivers/firmware/meson/meson_sm.c
+++ b/drivers/firmware/meson/meson_sm.c
@@ -82,7 +82,7 @@ static void __iomem *meson_sm_map_shmem(u32 cmd_shmem, unsigned int size)
sm_phy_base = __meson_sm_call(cmd_shmem, 0, 0, 0, 0, 0);
if (!sm_phy_base)
- return 0;
+ return NULL;
return ioremap_cache(sm_phy_base, size);
}
diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c
index 9f918b9e6f8f..029e6d117cb8 100644
--- a/drivers/firmware/qcom_scm-legacy.c
+++ b/drivers/firmware/qcom_scm-legacy.c
@@ -9,7 +9,7 @@
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/arm-smccc.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c
index d111833364ba..16cf88acfa8e 100644
--- a/drivers/firmware/qcom_scm-smc.c
+++ b/drivers/firmware/qcom_scm-smc.c
@@ -8,7 +8,7 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/arm-smccc.h>
#include <linux/dma-mapping.h>
@@ -52,29 +52,97 @@ static void __scm_smc_do_quirk(const struct arm_smccc_args *smc,
} while (res->a0 == QCOM_SCM_INTERRUPTED);
}
-static void __scm_smc_do(const struct arm_smccc_args *smc,
- struct arm_smccc_res *res, bool atomic)
+static void fill_wq_resume_args(struct arm_smccc_args *resume, u32 smc_call_ctx)
{
- int retry_count = 0;
+ memset(resume->args, 0, sizeof(resume->args[0]) * ARRAY_SIZE(resume->args));
+
+ resume->args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
+ ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
+ SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_RESUME));
+
+ resume->args[1] = QCOM_SCM_ARGS(1);
+
+ resume->args[2] = smc_call_ctx;
+}
+
+int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending)
+{
+ int ret;
+ struct arm_smccc_res get_wq_res;
+ struct arm_smccc_args get_wq_ctx = {0};
+
+ get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
+ ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
+ SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_WQ_CTX));
+
+ /* Guaranteed to return only success or error, no WAITQ_* */
+ __scm_smc_do_quirk(&get_wq_ctx, &get_wq_res);
+ ret = get_wq_res.a0;
+ if (ret)
+ return ret;
+
+ *wq_ctx = get_wq_res.a1;
+ *flags = get_wq_res.a2;
+ *more_pending = get_wq_res.a3;
+
+ return 0;
+}
+
+static int __scm_smc_do_quirk_handle_waitq(struct device *dev, struct arm_smccc_args *waitq,
+ struct arm_smccc_res *res)
+{
+ int ret;
+ u32 wq_ctx, smc_call_ctx;
+ struct arm_smccc_args resume;
+ struct arm_smccc_args *smc = waitq;
+
+ do {
+ __scm_smc_do_quirk(smc, res);
+
+ if (res->a0 == QCOM_SCM_WAITQ_SLEEP) {
+ wq_ctx = res->a1;
+ smc_call_ctx = res->a2;
+
+ ret = qcom_scm_wait_for_wq_completion(wq_ctx);
+ if (ret)
+ return ret;
+
+ fill_wq_resume_args(&resume, smc_call_ctx);
+ smc = &resume;
+ }
+ } while (res->a0 == QCOM_SCM_WAITQ_SLEEP);
+
+ return 0;
+}
+
+static int __scm_smc_do(struct device *dev, struct arm_smccc_args *smc,
+ struct arm_smccc_res *res, bool atomic)
+{
+ int ret, retry_count = 0;
if (atomic) {
__scm_smc_do_quirk(smc, res);
- return;
+ return 0;
}
do {
mutex_lock(&qcom_scm_lock);
- __scm_smc_do_quirk(smc, res);
+ ret = __scm_smc_do_quirk_handle_waitq(dev, smc, res);
mutex_unlock(&qcom_scm_lock);
+ if (ret)
+ return ret;
+
if (res->a0 == QCOM_SCM_V2_EBUSY) {
if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
break;
msleep(QCOM_SCM_EBUSY_WAIT_MS);
}
} while (res->a0 == QCOM_SCM_V2_EBUSY);
+
+ return 0;
}
@@ -83,7 +151,7 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
struct qcom_scm_res *res, bool atomic)
{
int arglen = desc->arginfo & 0xf;
- int i;
+ int i, ret;
dma_addr_t args_phys = 0;
void *args_virt = NULL;
size_t alloc_len;
@@ -135,13 +203,17 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
smc.args[SCM_SMC_LAST_REG_IDX] = args_phys;
}
- __scm_smc_do(&smc, &smc_res, atomic);
+ /* ret error check follows after args_virt cleanup*/
+ ret = __scm_smc_do(dev, &smc, &smc_res, atomic);
if (args_virt) {
dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
kfree(args_virt);
}
+ if (ret)
+ return ret;
+
if (res) {
res->result[0] = smc_res.a1;
res->result[1] = smc_res.a2;
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index cdbfe54c8146..468d4d5ab550 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -4,15 +4,18 @@
*/
#include <linux/platform_device.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/reset-controller.h>
@@ -33,6 +36,7 @@ struct qcom_scm {
struct clk *iface_clk;
struct clk *bus_clk;
struct icc_path *path;
+ struct completion waitq_comp;
struct reset_controller_dev reset;
/* control access to the interconnect path */
@@ -63,6 +67,9 @@ static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
BIT(2), BIT(1), BIT(4), BIT(6)
};
+#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0)
+#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1)
+
static const char * const qcom_scm_convention_names[] = {
[SMC_CONVENTION_UNKNOWN] = "unknown",
[SMC_CONVENTION_ARM_32] = "smc arm 32",
@@ -1325,11 +1332,79 @@ bool qcom_scm_is_available(void)
}
EXPORT_SYMBOL(qcom_scm_is_available);
+static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
+{
+ /* FW currently only supports a single wq_ctx (zero).
+ * TODO: Update this logic to include dynamic allocation and lookup of
+ * completion structs when FW supports more wq_ctx values.
+ */
+ if (wq_ctx != 0) {
+ dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
+{
+ int ret;
+
+ ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&__scm->waitq_comp);
+
+ return 0;
+}
+
+static int qcom_scm_waitq_wakeup(struct qcom_scm *scm, unsigned int wq_ctx)
+{
+ int ret;
+
+ ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
+ if (ret)
+ return ret;
+
+ complete(&__scm->waitq_comp);
+
+ return 0;
+}
+
+static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
+{
+ int ret;
+ struct qcom_scm *scm = data;
+ u32 wq_ctx, flags, more_pending = 0;
+
+ do {
+ ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
+ if (ret) {
+ dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
+ goto out;
+ }
+
+ if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE &&
+ flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) {
+ dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags);
+ goto out;
+ }
+
+ ret = qcom_scm_waitq_wakeup(scm, wq_ctx);
+ if (ret)
+ goto out;
+ } while (more_pending);
+
+out:
+ return IRQ_HANDLED;
+}
+
static int qcom_scm_probe(struct platform_device *pdev)
{
struct qcom_scm *scm;
unsigned long clks;
- int ret;
+ int irq, ret;
scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
if (!scm)
@@ -1402,6 +1477,19 @@ static int qcom_scm_probe(struct platform_device *pdev)
__scm = scm;
__scm->dev = &pdev->dev;
+ init_completion(&__scm->waitq_comp);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ if (irq != -ENXIO)
+ return irq;
+ } else {
+ ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
+ IRQF_ONESHOT, "qcom-scm", __scm);
+ if (ret < 0)
+ return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
+ }
+
__get_convention();
/*
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index db3d08a01209..e6e512bd57d1 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -60,6 +60,9 @@ struct qcom_scm_res {
u64 result[MAX_QCOM_SCM_RETS];
};
+int qcom_scm_wait_for_wq_completion(u32 wq_ctx);
+int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending);
+
#define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
enum qcom_scm_convention qcom_convention,
@@ -129,6 +132,10 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03
#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02
+#define QCOM_SCM_SVC_WAITQ 0x24
+#define QCOM_SCM_WAITQ_RESUME 0x02
+#define QCOM_SCM_WAITQ_GET_WQ_CTX 0x03
+
/* common error codes */
#define QCOM_SCM_V2_EBUSY -12
#define QCOM_SCM_ENOMEM -5
@@ -137,6 +144,7 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_EINVAL_ARG -2
#define QCOM_SCM_ERROR -1
#define QCOM_SCM_INTERRUPTED 1
+#define QCOM_SCM_WAITQ_SLEEP 2
static inline int qcom_scm_remap_error(int err)
{
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 129f68d7a6f5..acd83d29c866 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -738,8 +738,31 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_data);
*/
int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SET_SD_TAPDELAY,
- type, value, NULL);
+ u32 reg = (type == PM_TAPDELAY_INPUT) ? SD_ITAPDLY : SD_OTAPDLYSEL;
+ u32 mask = (node_id == NODE_SD_0) ? GENMASK(15, 0) : GENMASK(31, 16);
+
+ if (value) {
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
+ IOCTL_SET_SD_TAPDELAY,
+ type, value, NULL);
+ }
+
+ /*
+ * Work around completely misdesigned firmware API on Xilinx ZynqMP.
+ * The IOCTL_SET_SD_TAPDELAY firmware call allows the caller to only
+ * ever set IOU_SLCR SD_ITAPDLY Register SD0_ITAPDLYENA/SD1_ITAPDLYENA
+ * bits, but there is no matching call to clear those bits. If those
+ * bits are not cleared, SDMMC tuning may fail.
+ *
+ * Luckily, there are PM_MMIO_READ/PM_MMIO_WRITE calls which seem to
+ * allow complete unrestricted access to all address space, including
+ * IOU_SLCR SD_ITAPDLY Register and all the other registers, access
+ * to which was supposed to be protected by the current firmware API.
+ *
+ * Use PM_MMIO_READ/PM_MMIO_WRITE to re-implement the missing counter
+ * part of IOCTL_SET_SD_TAPDELAY which clears SDx_ITAPDLYENA bits.
+ */
+ return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, reg, mask, 0, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 660ba0db8900..d09221f97f71 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -5,7 +5,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/cpumask.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/pm_opp.h>
#include <linux/nvmem-consumer.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 817599766329..ce6b76c45b6f 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -8,7 +8,7 @@
#include <linux/ascii85.h>
#include <linux/interconnect.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/pm_opp.h>
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
index e7748461cffc..0752fe373351 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -3,7 +3,7 @@
*/
#include "hdmi.h"
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#define HDCP_REG_ENABLE 0x01
#define HDCP_REG_DISABLE 0x00
diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c
index 3d4ffa25e3df..4364c3401ff1 100644
--- a/drivers/input/keyboard/mtk-pmic-keys.c
+++ b/drivers/input/keyboard/mtk-pmic-keys.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/mfd/mt6323/registers.h>
#include <linux/mfd/mt6331/registers.h>
+#include <linux/mfd/mt6357/registers.h>
#include <linux/mfd/mt6358/registers.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6397/registers.h>
@@ -90,6 +91,19 @@ static const struct mtk_pmic_regs mt6331_regs = {
.rst_lprst_mask = MTK_PMIC_MT6331_RST_DU_MASK,
};
+static const struct mtk_pmic_regs mt6357_regs = {
+ .keys_regs[MTK_PMIC_PWRKEY_INDEX] =
+ MTK_PMIC_KEYS_REGS(MT6357_TOPSTATUS,
+ 0x2, MT6357_PSC_TOP_INT_CON0, 0x5,
+ MTK_PMIC_PWRKEY_RST),
+ .keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
+ MTK_PMIC_KEYS_REGS(MT6357_TOPSTATUS,
+ 0x8, MT6357_PSC_TOP_INT_CON0, 0xa,
+ MTK_PMIC_HOMEKEY_INDEX),
+ .pmic_rst_reg = MT6357_TOP_RST_MISC,
+ .rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
+};
+
static const struct mtk_pmic_regs mt6358_regs = {
.keys_regs[MTK_PMIC_PWRKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
@@ -277,6 +291,9 @@ static const struct of_device_id of_mtk_pmic_keys_match_tbl[] = {
.compatible = "mediatek,mt6331-keys",
.data = &mt6331_regs,
}, {
+ .compatible = "mediatek,mt6357-keys",
+ .data = &mt6357_regs,
+ }, {
.compatible = "mediatek,mt6358-keys",
.data = &mt6358_regs,
}, {
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
index 74e9ef2fd580..b5b14108e086 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -4,7 +4,7 @@
*/
#include <linux/of_device.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/ratelimit.h>
#include "arm-smmu.h"
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index d7ad49aa997e..d1b296b95c86 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -7,7 +7,7 @@
#include <linux/adreno-smmu-priv.h>
#include <linux/delay.h>
#include <linux/of_device.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include "arm-smmu.h"
#include "arm-smmu-qcom.h"
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index d7be3adee426..c8b70f476cd8 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -27,7 +27,7 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index 07d4dceb5e72..61ff20a7e935 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -12,7 +12,7 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/sizes.h>
#include <linux/soc/qcom/mdt_loader.h>
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index 09cd4318a83d..025bb628aaf3 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -163,14 +163,36 @@ static const struct regmap_access_table rpcif_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(rpcif_volatile_ranges),
};
+struct rpcif_priv {
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *dirmap;
+ struct regmap *regmap;
+ struct reset_control *rstc;
+ struct platform_device *vdev;
+ size_t size;
+ enum rpcif_type type;
+ enum rpcif_data_dir dir;
+ u8 bus_size;
+ u8 xfer_size;
+ void *buffer;
+ u32 xferlen;
+ u32 smcr;
+ u32 smadr;
+ u32 command; /* DRCMR or SMCMR */
+ u32 option; /* DROPR or SMOPR */
+ u32 enable; /* DRENR or SMENR */
+ u32 dummy; /* DRDMCR or SMDMCR */
+ u32 ddr; /* DRDRENR or SMDRENR */
+};
/*
* Custom accessor functions to ensure SM[RW]DR[01] are always accessed with
- * proper width. Requires rpcif.xfer_size to be correctly set before!
+ * proper width. Requires rpcif_priv.xfer_size to be correctly set before!
*/
static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val)
{
- struct rpcif *rpc = context;
+ struct rpcif_priv *rpc = context;
switch (reg) {
case RPCIF_SMRDR0:
@@ -206,7 +228,7 @@ static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val)
static int rpcif_reg_write(void *context, unsigned int reg, unsigned int val)
{
- struct rpcif *rpc = context;
+ struct rpcif_priv *rpc = context;
switch (reg) {
case RPCIF_SMWDR0:
@@ -253,39 +275,18 @@ static const struct regmap_config rpcif_regmap_config = {
.volatile_table = &rpcif_volatile_table,
};
-int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
+int rpcif_sw_init(struct rpcif *rpcif, struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct resource *res;
+ struct rpcif_priv *rpc = dev_get_drvdata(dev);
- rpc->dev = dev;
-
- rpc->base = devm_platform_ioremap_resource_byname(pdev, "regs");
- if (IS_ERR(rpc->base))
- return PTR_ERR(rpc->base);
-
- rpc->regmap = devm_regmap_init(&pdev->dev, NULL, rpc, &rpcif_regmap_config);
- if (IS_ERR(rpc->regmap)) {
- dev_err(&pdev->dev,
- "failed to init regmap for rpcif, error %ld\n",
- PTR_ERR(rpc->regmap));
- return PTR_ERR(rpc->regmap);
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
- rpc->dirmap = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(rpc->dirmap))
- return PTR_ERR(rpc->dirmap);
- rpc->size = resource_size(res);
-
- rpc->type = (uintptr_t)of_device_get_match_data(dev);
- rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
-
- return PTR_ERR_OR_ZERO(rpc->rstc);
+ rpcif->dev = dev;
+ rpcif->dirmap = rpc->dirmap;
+ rpcif->size = rpc->size;
+ return 0;
}
EXPORT_SYMBOL(rpcif_sw_init);
-static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif *rpc)
+static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif_priv *rpc)
{
regmap_write(rpc->regmap, RPCIF_PHYWR, 0xa5390000);
regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000000);
@@ -299,15 +300,17 @@ static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif *rpc)
regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000032);
}
-int rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
+int rpcif_hw_init(struct device *dev, bool hyperflash)
{
+ struct rpcif_priv *rpc = dev_get_drvdata(dev);
u32 dummy;
+ int ret;
- pm_runtime_get_sync(rpc->dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
if (rpc->type == RPCIF_RZ_G2L) {
- int ret;
-
ret = reset_control_reset(rpc->rstc);
if (ret)
return ret;
@@ -356,7 +359,7 @@ int rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
regmap_write(rpc->regmap, RPCIF_SSLDR, RPCIF_SSLDR_SPNDL(7) |
RPCIF_SSLDR_SLNDL(7) | RPCIF_SSLDR_SCKDL(7));
- pm_runtime_put(rpc->dev);
+ pm_runtime_put(dev);
rpc->bus_size = hyperflash ? 2 : 1;
@@ -364,7 +367,7 @@ int rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
}
EXPORT_SYMBOL(rpcif_hw_init);
-static int wait_msg_xfer_end(struct rpcif *rpc)
+static int wait_msg_xfer_end(struct rpcif_priv *rpc)
{
u32 sts;
@@ -373,7 +376,7 @@ static int wait_msg_xfer_end(struct rpcif *rpc)
USEC_PER_SEC);
}
-static u8 rpcif_bits_set(struct rpcif *rpc, u32 nbytes)
+static u8 rpcif_bits_set(struct rpcif_priv *rpc, u32 nbytes)
{
if (rpc->bus_size == 2)
nbytes /= 2;
@@ -386,9 +389,11 @@ static u8 rpcif_bit_size(u8 buswidth)
return buswidth > 4 ? 2 : ilog2(buswidth);
}
-void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs,
+void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs,
size_t *len)
{
+ struct rpcif_priv *rpc = dev_get_drvdata(dev);
+
rpc->smcr = 0;
rpc->smadr = 0;
rpc->enable = 0;
@@ -430,8 +435,7 @@ void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs,
if (op->dummy.buswidth) {
rpc->enable |= RPCIF_SMENR_DME;
- rpc->dummy = RPCIF_SMDMCR_DMCYC(op->dummy.ncycles /
- op->dummy.buswidth);
+ rpc->dummy = RPCIF_SMDMCR_DMCYC(op->dummy.ncycles);
}
if (op->option.buswidth) {
@@ -472,12 +476,15 @@ void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs,
}
EXPORT_SYMBOL(rpcif_prepare);
-int rpcif_manual_xfer(struct rpcif *rpc)
+int rpcif_manual_xfer(struct device *dev)
{
+ struct rpcif_priv *rpc = dev_get_drvdata(dev);
u32 smenr, smcr, pos = 0, max = rpc->bus_size == 2 ? 8 : 4;
int ret = 0;
- pm_runtime_get_sync(rpc->dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
regmap_update_bits(rpc->regmap, RPCIF_PHYCNT,
RPCIF_PHYCNT_CAL, RPCIF_PHYCNT_CAL);
@@ -587,13 +594,13 @@ int rpcif_manual_xfer(struct rpcif *rpc)
}
exit:
- pm_runtime_put(rpc->dev);
+ pm_runtime_put(dev);
return ret;
err_out:
if (reset_control_reset(rpc->rstc))
- dev_err(rpc->dev, "Failed to reset HW\n");
- rpcif_hw_init(rpc, rpc->bus_size == 2);
+ dev_err(dev, "Failed to reset HW\n");
+ rpcif_hw_init(dev, rpc->bus_size == 2);
goto exit;
}
EXPORT_SYMBOL(rpcif_manual_xfer);
@@ -640,15 +647,19 @@ static void memcpy_fromio_readw(void *to,
}
}
-ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf)
+ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf)
{
+ struct rpcif_priv *rpc = dev_get_drvdata(dev);
loff_t from = offs & (rpc->size - 1);
size_t size = rpc->size - from;
+ int ret;
if (len > size)
len = size;
- pm_runtime_get_sync(rpc->dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
regmap_update_bits(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_MD, 0);
regmap_write(rpc->regmap, RPCIF_DRCR, 0);
@@ -666,7 +677,7 @@ ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf)
else
memcpy_fromio(buf, rpc->dirmap + from, len);
- pm_runtime_put(rpc->dev);
+ pm_runtime_put(dev);
return len;
}
@@ -674,14 +685,17 @@ EXPORT_SYMBOL(rpcif_dirmap_read);
static int rpcif_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct platform_device *vdev;
struct device_node *flash;
+ struct rpcif_priv *rpc;
+ struct resource *res;
const char *name;
int ret;
- flash = of_get_next_child(pdev->dev.of_node, NULL);
+ flash = of_get_next_child(dev->of_node, NULL);
if (!flash) {
- dev_warn(&pdev->dev, "no flash node found\n");
+ dev_warn(dev, "no flash node found\n");
return -ENODEV;
}
@@ -691,16 +705,45 @@ static int rpcif_probe(struct platform_device *pdev)
name = "rpc-if-hyperflash";
} else {
of_node_put(flash);
- dev_warn(&pdev->dev, "unknown flash type\n");
+ dev_warn(dev, "unknown flash type\n");
return -ENODEV;
}
of_node_put(flash);
+ rpc = devm_kzalloc(dev, sizeof(*rpc), GFP_KERNEL);
+ if (!rpc)
+ return -ENOMEM;
+
+ rpc->base = devm_platform_ioremap_resource_byname(pdev, "regs");
+ if (IS_ERR(rpc->base))
+ return PTR_ERR(rpc->base);
+
+ rpc->regmap = devm_regmap_init(dev, NULL, rpc, &rpcif_regmap_config);
+ if (IS_ERR(rpc->regmap)) {
+ dev_err(dev, "failed to init regmap for rpcif, error %ld\n",
+ PTR_ERR(rpc->regmap));
+ return PTR_ERR(rpc->regmap);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
+ rpc->dirmap = devm_ioremap_resource(dev, res);
+ if (IS_ERR(rpc->dirmap))
+ return PTR_ERR(rpc->dirmap);
+ rpc->size = resource_size(res);
+
+ rpc->type = (uintptr_t)of_device_get_match_data(dev);
+ rpc->rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(rpc->rstc))
+ return PTR_ERR(rpc->rstc);
+
vdev = platform_device_alloc(name, pdev->id);
if (!vdev)
return -ENOMEM;
- vdev->dev.parent = &pdev->dev;
- platform_set_drvdata(pdev, vdev);
+ vdev->dev.parent = dev;
+
+ rpc->dev = dev;
+ rpc->vdev = vdev;
+ platform_set_drvdata(pdev, rpc);
ret = platform_device_add(vdev);
if (ret) {
@@ -713,9 +756,9 @@ static int rpcif_probe(struct platform_device *pdev)
static int rpcif_remove(struct platform_device *pdev)
{
- struct platform_device *vdev = platform_get_drvdata(pdev);
+ struct rpcif_priv *rpc = platform_get_drvdata(pdev);
- platform_device_unregister(vdev);
+ platform_device_unregister(rpc->vdev);
return 0;
}
diff --git a/drivers/memory/ti-emif-pm.c b/drivers/memory/ti-emif-pm.c
index 31d6266f008c..cef0d3beb63b 100644
--- a/drivers/memory/ti-emif-pm.c
+++ b/drivers/memory/ti-emif-pm.c
@@ -277,18 +277,13 @@ static int ti_emif_probe(struct platform_device *pdev)
int ret;
struct resource *res;
struct device *dev = &pdev->dev;
- const struct of_device_id *match;
struct ti_emif_data *emif_data;
emif_data = devm_kzalloc(dev, sizeof(*emif_data), GFP_KERNEL);
if (!emif_data)
return -ENOMEM;
- match = of_match_device(ti_emif_of_match, &pdev->dev);
- if (!match)
- return -ENODEV;
-
- emif_data->pm_data.ti_emif_sram_config = (unsigned long)match->data;
+ emif_data->pm_data.ti_emif_sram_config = (unsigned long) device_get_match_data(&pdev->dev);
emif_data->pm_data.ti_emif_base_addr_virt = devm_platform_get_and_ioremap_resource(pdev,
0,
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 7ccaca1b7cb8..a701132638cf 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -18,7 +18,7 @@
#include <linux/rpmsg.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <uapi/misc/fastrpc.h>
#include <linux/of_reserved_mem.h>
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 4ac8651d0b29..8ac81d57a3df 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -13,7 +13,7 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/iopoll.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/regulator/consumer.h>
#include <linux/interconnect.h>
#include <linux/pinctrl/consumer.h>
diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c
index d00d30243403..ef32fca5f785 100644
--- a/drivers/mtd/hyperbus/rpc-if.c
+++ b/drivers/mtd/hyperbus/rpc-if.c
@@ -56,7 +56,7 @@ static void rpcif_hb_prepare_read(struct rpcif *rpc, void *to,
op.data.nbytes = len;
op.data.buf.in = to;
- rpcif_prepare(rpc, &op, NULL, NULL);
+ rpcif_prepare(rpc->dev, &op, NULL, NULL);
}
static void rpcif_hb_prepare_write(struct rpcif *rpc, unsigned long to,
@@ -70,7 +70,7 @@ static void rpcif_hb_prepare_write(struct rpcif *rpc, unsigned long to,
op.data.nbytes = len;
op.data.buf.out = from;
- rpcif_prepare(rpc, &op, NULL, NULL);
+ rpcif_prepare(rpc->dev, &op, NULL, NULL);
}
static u16 rpcif_hb_read16(struct hyperbus_device *hbdev, unsigned long addr)
@@ -81,7 +81,7 @@ static u16 rpcif_hb_read16(struct hyperbus_device *hbdev, unsigned long addr)
rpcif_hb_prepare_read(&hyperbus->rpc, &data, addr, 2);
- rpcif_manual_xfer(&hyperbus->rpc);
+ rpcif_manual_xfer(hyperbus->rpc.dev);
return data.x[0];
}
@@ -94,7 +94,7 @@ static void rpcif_hb_write16(struct hyperbus_device *hbdev, unsigned long addr,
rpcif_hb_prepare_write(&hyperbus->rpc, addr, &data, 2);
- rpcif_manual_xfer(&hyperbus->rpc);
+ rpcif_manual_xfer(hyperbus->rpc.dev);
}
static void rpcif_hb_copy_from(struct hyperbus_device *hbdev, void *to,
@@ -105,7 +105,7 @@ static void rpcif_hb_copy_from(struct hyperbus_device *hbdev, void *to,
rpcif_hb_prepare_read(&hyperbus->rpc, to, from, len);
- rpcif_dirmap_read(&hyperbus->rpc, from, len, to);
+ rpcif_dirmap_read(hyperbus->rpc.dev, from, len, to);
}
static const struct hyperbus_ops rpcif_hb_ops = {
@@ -130,9 +130,9 @@ static int rpcif_hb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hyperbus);
- rpcif_enable_rpm(&hyperbus->rpc);
+ pm_runtime_enable(hyperbus->rpc.dev);
- error = rpcif_hw_init(&hyperbus->rpc, true);
+ error = rpcif_hw_init(hyperbus->rpc.dev, true);
if (error)
goto out_disable_rpm;
@@ -150,7 +150,7 @@ static int rpcif_hb_probe(struct platform_device *pdev)
return 0;
out_disable_rpm:
- rpcif_disable_rpm(&hyperbus->rpc);
+ pm_runtime_disable(hyperbus->rpc.dev);
return error;
}
@@ -160,7 +160,7 @@ static int rpcif_hb_remove(struct platform_device *pdev)
hyperbus_unregister_device(&hyperbus->hbdev);
- rpcif_disable_rpm(&hyperbus->rpc);
+ pm_runtime_disable(hyperbus->rpc.dev);
return 0;
}
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 6cb7bf96a626..4cc8d8d6bc9b 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -16,7 +16,7 @@
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/pm_runtime.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "ipa.h"
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 3f94fbf83702..90f457b8e1fe 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -13,7 +13,7 @@
#include <linux/module.h>
#include <linux/net.h>
#include <linux/platform_device.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/soc/qcom/smem.h>
#include <linux/string.h>
#include <net/sock.h>
diff --git a/drivers/phy/samsung/phy-exynos-dp-video.c b/drivers/phy/samsung/phy-exynos-dp-video.c
index 2b670ef91deb..6069fedbd8f3 100644
--- a/drivers/phy/samsung/phy-exynos-dp-video.c
+++ b/drivers/phy/samsung/phy-exynos-dp-video.c
@@ -83,8 +83,11 @@ static int exynos_dp_video_phy_probe(struct platform_device *pdev)
if (!state)
return -ENOMEM;
- state->regs = syscon_regmap_lookup_by_phandle(dev->of_node,
- "samsung,pmu-syscon");
+ state->regs = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(state->regs))
+ /* Backwards compatible way */
+ state->regs = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,pmu-syscon");
if (IS_ERR(state->regs)) {
dev_err(dev, "Failed to lookup PMU regmap\n");
return PTR_ERR(state->regs);
diff --git a/drivers/phy/samsung/phy-exynos-mipi-video.c b/drivers/phy/samsung/phy-exynos-mipi-video.c
index c1df1ef3ee3c..a7f67857e5b2 100644
--- a/drivers/phy/samsung/phy-exynos-mipi-video.c
+++ b/drivers/phy/samsung/phy-exynos-mipi-video.c
@@ -298,7 +298,7 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct phy_provider *phy_provider;
- unsigned int i;
+ unsigned int i = 0;
phy_dev = of_device_get_match_data(dev);
if (!phy_dev)
@@ -308,7 +308,10 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
if (!state)
return -ENOMEM;
- for (i = 0; i < phy_dev->num_regmaps; i++) {
+ state->regmaps[i] = syscon_node_to_regmap(dev->parent->of_node);
+ if (!IS_ERR(state->regmaps[i]))
+ i++;
+ for (; i < phy_dev->num_regmaps; i++) {
state->regmaps[i] = syscon_regmap_lookup_by_phandle(np,
phy_dev->regmap_names[i]);
if (IS_ERR(state->regmaps[i]))
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index a69f93e74435..daeb79a9a602 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -14,7 +14,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/reboot.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 6a32f022943b..c78be9f322e6 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -174,6 +174,15 @@ config BATTERY_PMU
Say Y here to expose battery information on Apple machines
through the generic battery class.
+config BATTERY_QCOM_BATTMGR
+ tristate "Qualcomm PMIC GLINK battery manager support"
+ depends on QCOM_PMIC_GLINK
+ select AUXILIARY_BUS
+ help
+ Say Y here to enable the Qualcomm PMIC GLINK power supply driver,
+ which is used on modern Qualcomm platforms to provide battery and
+ power supply information.
+
config BATTERY_OLPC
tristate "One Laptop Per Child battery"
depends on OLPC_EC
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 3aec9bc10f3c..4adbfba02d05 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_BATTERY_GAUGE_LTC2941) += ltc2941-battery-gauge.o
obj-$(CONFIG_BATTERY_GOLDFISH) += goldfish_battery.o
obj-$(CONFIG_BATTERY_LEGO_EV3) += lego_ev3_battery.o
obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
+obj-$(CONFIG_BATTERY_QCOM_BATTMGR) += qcom_battmgr.o
obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
obj-$(CONFIG_BATTERY_SAMSUNG_SDI) += samsung-sdi-battery.o
obj-$(CONFIG_BATTERY_COLLIE) += collie_battery.o
diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c
new file mode 100644
index 000000000000..ec31f887184f
--- /dev/null
+++ b/drivers/power/supply/qcom_battmgr.c
@@ -0,0 +1,1411 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd
+ */
+#include <linux/auxiliary_bus.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/power_supply.h>
+#include <linux/soc/qcom/pdr.h>
+#include <linux/soc/qcom/pmic_glink.h>
+#include <linux/math.h>
+#include <linux/units.h>
+
+#define BATTMGR_CHEMISTRY_LEN 4
+#define BATTMGR_STRING_LEN 128
+
+enum qcom_battmgr_variant {
+ QCOM_BATTMGR_SM8350,
+ QCOM_BATTMGR_SC8280XP,
+};
+
+#define BATTMGR_BAT_STATUS 0x1
+
+#define BATTMGR_REQUEST_NOTIFICATION 0x4
+
+#define BATTMGR_NOTIFICATION 0x7
+#define NOTIF_BAT_PROPERTY 0x30
+#define NOTIF_USB_PROPERTY 0x32
+#define NOTIF_WLS_PROPERTY 0x34
+#define NOTIF_BAT_INFO 0x81
+#define NOTIF_BAT_STATUS 0x80
+
+#define BATTMGR_BAT_INFO 0x9
+
+#define BATTMGR_BAT_DISCHARGE_TIME 0xc
+
+#define BATTMGR_BAT_CHARGE_TIME 0xd
+
+#define BATTMGR_BAT_PROPERTY_GET 0x30
+#define BATTMGR_BAT_PROPERTY_SET 0x31
+#define BATT_STATUS 0
+#define BATT_HEALTH 1
+#define BATT_PRESENT 2
+#define BATT_CHG_TYPE 3
+#define BATT_CAPACITY 4
+#define BATT_SOH 5
+#define BATT_VOLT_OCV 6
+#define BATT_VOLT_NOW 7
+#define BATT_VOLT_MAX 8
+#define BATT_CURR_NOW 9
+#define BATT_CHG_CTRL_LIM 10
+#define BATT_CHG_CTRL_LIM_MAX 11
+#define BATT_TEMP 12
+#define BATT_TECHNOLOGY 13
+#define BATT_CHG_COUNTER 14
+#define BATT_CYCLE_COUNT 15
+#define BATT_CHG_FULL_DESIGN 16
+#define BATT_CHG_FULL 17
+#define BATT_MODEL_NAME 18
+#define BATT_TTF_AVG 19
+#define BATT_TTE_AVG 20
+#define BATT_RESISTANCE 21
+#define BATT_POWER_NOW 22
+#define BATT_POWER_AVG 23
+
+#define BATTMGR_USB_PROPERTY_GET 0x32
+#define BATTMGR_USB_PROPERTY_SET 0x33
+#define USB_ONLINE 0
+#define USB_VOLT_NOW 1
+#define USB_VOLT_MAX 2
+#define USB_CURR_NOW 3
+#define USB_CURR_MAX 4
+#define USB_INPUT_CURR_LIMIT 5
+#define USB_TYPE 6
+#define USB_ADAP_TYPE 7
+#define USB_MOISTURE_DET_EN 8
+#define USB_MOISTURE_DET_STS 9
+
+#define BATTMGR_WLS_PROPERTY_GET 0x34
+#define BATTMGR_WLS_PROPERTY_SET 0x35
+#define WLS_ONLINE 0
+#define WLS_VOLT_NOW 1
+#define WLS_VOLT_MAX 2
+#define WLS_CURR_NOW 3
+#define WLS_CURR_MAX 4
+#define WLS_TYPE 5
+#define WLS_BOOST_EN 6
+
+struct qcom_battmgr_enable_request {
+ struct pmic_glink_hdr hdr;
+ __le32 battery_id;
+ __le32 power_state;
+ __le32 low_capacity;
+ __le32 high_capacity;
+};
+
+struct qcom_battmgr_property_request {
+ struct pmic_glink_hdr hdr;
+ __le32 battery;
+ __le32 property;
+ __le32 value;
+};
+
+struct qcom_battmgr_update_request {
+ struct pmic_glink_hdr hdr;
+ u32 battery_id;
+};
+
+struct qcom_battmgr_charge_time_request {
+ struct pmic_glink_hdr hdr;
+ __le32 battery_id;
+ __le32 percent;
+ __le32 reserved;
+};
+
+struct qcom_battmgr_discharge_time_request {
+ struct pmic_glink_hdr hdr;
+ __le32 battery_id;
+ __le32 rate; /* 0 for current rate */
+ __le32 reserved;
+};
+
+struct qcom_battmgr_message {
+ struct pmic_glink_hdr hdr;
+ union {
+ struct {
+ __le32 property;
+ __le32 value;
+ __le32 result;
+ } intval;
+ struct {
+ __le32 property;
+ char model[BATTMGR_STRING_LEN];
+ } strval;
+ struct {
+ /*
+ * 0: mWh
+ * 1: mAh
+ */
+ __le32 power_unit;
+ __le32 design_capacity;
+ __le32 last_full_capacity;
+ /*
+ * 0 nonrechargable
+ * 1 rechargable
+ */
+ __le32 battery_tech;
+ __le32 design_voltage; /* mV */
+ __le32 capacity_low;
+ __le32 capacity_warning;
+ __le32 cycle_count;
+ /* thousandth of persent */
+ __le32 accuracy;
+ __le32 max_sample_time_ms;
+ __le32 min_sample_time_ms;
+ __le32 max_average_interval_ms;
+ __le32 min_average_interval_ms;
+ /* granularity between low and warning */
+ __le32 capacity_granularity1;
+ /* granularity between warning and full */
+ __le32 capacity_granularity2;
+ /*
+ * 0: no
+ * 1: cold
+ * 2: hot
+ */
+ __le32 swappable;
+ __le32 capabilities;
+ char model_number[BATTMGR_STRING_LEN];
+ char serial_number[BATTMGR_STRING_LEN];
+ char battery_type[BATTMGR_STRING_LEN];
+ char oem_info[BATTMGR_STRING_LEN];
+ char battery_chemistry[BATTMGR_CHEMISTRY_LEN];
+ char uid[BATTMGR_STRING_LEN];
+ __le32 critical_bias;
+ u8 day;
+ u8 month;
+ __le16 year;
+ __le32 battery_id;
+ } info;
+ struct {
+ /*
+ * BIT(0) discharging
+ * BIT(1) charging
+ * BIT(2) critical low
+ */
+ __le32 battery_state;
+ /* mWh or mAh, based on info->power_unit */
+ __le32 capacity;
+ __le32 rate;
+ /* mv */
+ __le32 battery_voltage;
+ /*
+ * BIT(0) power online
+ * BIT(1) discharging
+ * BIT(2) charging
+ * BIT(3) battery critical
+ */
+ __le32 power_state;
+ /*
+ * 1: AC
+ * 2: USB
+ * 3: Wireless
+ */
+ __le32 charging_source;
+ __le32 temperature;
+ } status;
+ __le32 time;
+ __le32 notification;
+ };
+};
+
+#define BATTMGR_CHARGING_SOURCE_AC 1
+#define BATTMGR_CHARGING_SOURCE_USB 2
+#define BATTMGR_CHARGING_SOURCE_WIRELESS 3
+
+enum qcom_battmgr_unit {
+ QCOM_BATTMGR_UNIT_mWh = 0,
+ QCOM_BATTMGR_UNIT_mAh = 1
+};
+
+struct qcom_battmgr_info {
+ bool valid;
+
+ bool present;
+ unsigned int charge_type;
+ unsigned int design_capacity;
+ unsigned int last_full_capacity;
+ unsigned int voltage_max_design;
+ unsigned int voltage_max;
+ unsigned int capacity_low;
+ unsigned int capacity_warning;
+ unsigned int cycle_count;
+ unsigned int charge_count;
+ char model_number[BATTMGR_STRING_LEN];
+ char serial_number[BATTMGR_STRING_LEN];
+ char oem_info[BATTMGR_STRING_LEN];
+ unsigned char technology;
+ unsigned char day;
+ unsigned char month;
+ unsigned short year;
+};
+
+struct qcom_battmgr_status {
+ unsigned int status;
+ unsigned int health;
+ unsigned int capacity;
+ unsigned int percent;
+ int current_now;
+ int power_now;
+ unsigned int voltage_now;
+ unsigned int voltage_ocv;
+ unsigned int temperature;
+
+ unsigned int discharge_time;
+ unsigned int charge_time;
+};
+
+struct qcom_battmgr_ac {
+ bool online;
+};
+
+struct qcom_battmgr_usb {
+ bool online;
+ unsigned int voltage_now;
+ unsigned int voltage_max;
+ unsigned int current_now;
+ unsigned int current_max;
+ unsigned int current_limit;
+ unsigned int usb_type;
+};
+
+struct qcom_battmgr_wireless {
+ bool online;
+ unsigned int voltage_now;
+ unsigned int voltage_max;
+ unsigned int current_now;
+ unsigned int current_max;
+};
+
+struct qcom_battmgr {
+ struct device *dev;
+ struct pmic_glink_client *client;
+
+ enum qcom_battmgr_variant variant;
+
+ struct power_supply *ac_psy;
+ struct power_supply *bat_psy;
+ struct power_supply *usb_psy;
+ struct power_supply *wls_psy;
+
+ enum qcom_battmgr_unit unit;
+
+ int error;
+ struct completion ack;
+
+ bool service_up;
+
+ struct qcom_battmgr_info info;
+ struct qcom_battmgr_status status;
+ struct qcom_battmgr_ac ac;
+ struct qcom_battmgr_usb usb;
+ struct qcom_battmgr_wireless wireless;
+
+ struct work_struct enable_work;
+
+ /*
+ * @lock is used to prevent concurrent power supply requests to the
+ * firmware, as it then stops responding.
+ */
+ struct mutex lock;
+};
+
+static int qcom_battmgr_request(struct qcom_battmgr *battmgr, void *data, size_t len)
+{
+ unsigned long left;
+ int ret;
+
+ reinit_completion(&battmgr->ack);
+
+ battmgr->error = 0;
+
+ ret = pmic_glink_send(battmgr->client, data, len);
+ if (ret < 0)
+ return ret;
+
+ left = wait_for_completion_timeout(&battmgr->ack, HZ);
+ if (!left)
+ return -ETIMEDOUT;
+
+ return battmgr->error;
+}
+
+static int qcom_battmgr_request_property(struct qcom_battmgr *battmgr, int opcode,
+ int property, u32 value)
+{
+ struct qcom_battmgr_property_request request = {
+ .hdr.owner = cpu_to_le32(PMIC_GLINK_OWNER_BATTMGR),
+ .hdr.type = cpu_to_le32(PMIC_GLINK_REQ_RESP),
+ .hdr.opcode = cpu_to_le32(opcode),
+ .battery = cpu_to_le32(0),
+ .property = cpu_to_le32(property),
+ .value = cpu_to_le32(value),
+ };
+
+ return qcom_battmgr_request(battmgr, &request, sizeof(request));
+}
+
+static int qcom_battmgr_update_status(struct qcom_battmgr *battmgr)
+{
+ struct qcom_battmgr_update_request request = {
+ .hdr.owner = cpu_to_le32(PMIC_GLINK_OWNER_BATTMGR),
+ .hdr.type = cpu_to_le32(PMIC_GLINK_REQ_RESP),
+ .hdr.opcode = cpu_to_le32(BATTMGR_BAT_STATUS),
+ .battery_id = cpu_to_le32(0),
+ };
+
+ return qcom_battmgr_request(battmgr, &request, sizeof(request));
+}
+
+static int qcom_battmgr_update_info(struct qcom_battmgr *battmgr)
+{
+ struct qcom_battmgr_update_request request = {
+ .hdr.owner = cpu_to_le32(PMIC_GLINK_OWNER_BATTMGR),
+ .hdr.type = cpu_to_le32(PMIC_GLINK_REQ_RESP),
+ .hdr.opcode = cpu_to_le32(BATTMGR_BAT_INFO),
+ .battery_id = cpu_to_le32(0),
+ };
+
+ return qcom_battmgr_request(battmgr, &request, sizeof(request));
+}
+
+static int qcom_battmgr_update_charge_time(struct qcom_battmgr *battmgr)
+{
+ struct qcom_battmgr_charge_time_request request = {
+ .hdr.owner = cpu_to_le32(PMIC_GLINK_OWNER_BATTMGR),
+ .hdr.type = cpu_to_le32(PMIC_GLINK_REQ_RESP),
+ .hdr.opcode = cpu_to_le32(BATTMGR_BAT_CHARGE_TIME),
+ .battery_id = cpu_to_le32(0),
+ .percent = cpu_to_le32(100),
+ };
+
+ return qcom_battmgr_request(battmgr, &request, sizeof(request));
+}
+
+static int qcom_battmgr_update_discharge_time(struct qcom_battmgr *battmgr)
+{
+ struct qcom_battmgr_discharge_time_request request = {
+ .hdr.owner = cpu_to_le32(PMIC_GLINK_OWNER_BATTMGR),
+ .hdr.type = cpu_to_le32(PMIC_GLINK_REQ_RESP),
+ .hdr.opcode = cpu_to_le32(BATTMGR_BAT_DISCHARGE_TIME),
+ .battery_id = cpu_to_le32(0),
+ .rate = cpu_to_le32(0),
+ };
+
+ return qcom_battmgr_request(battmgr, &request, sizeof(request));
+}
+
+static const u8 sm8350_bat_prop_map[] = {
+ [POWER_SUPPLY_PROP_STATUS] = BATT_STATUS,
+ [POWER_SUPPLY_PROP_HEALTH] = BATT_HEALTH,
+ [POWER_SUPPLY_PROP_PRESENT] = BATT_PRESENT,
+ [POWER_SUPPLY_PROP_CHARGE_TYPE] = BATT_CHG_TYPE,
+ [POWER_SUPPLY_PROP_CAPACITY] = BATT_CAPACITY,
+ [POWER_SUPPLY_PROP_VOLTAGE_OCV] = BATT_VOLT_OCV,
+ [POWER_SUPPLY_PROP_VOLTAGE_NOW] = BATT_VOLT_NOW,
+ [POWER_SUPPLY_PROP_VOLTAGE_MAX] = BATT_VOLT_MAX,
+ [POWER_SUPPLY_PROP_CURRENT_NOW] = BATT_CURR_NOW,
+ [POWER_SUPPLY_PROP_TEMP] = BATT_TEMP,
+ [POWER_SUPPLY_PROP_TECHNOLOGY] = BATT_TECHNOLOGY,
+ [POWER_SUPPLY_PROP_CHARGE_COUNTER] = BATT_CHG_COUNTER,
+ [POWER_SUPPLY_PROP_CYCLE_COUNT] = BATT_CYCLE_COUNT,
+ [POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN] = BATT_CHG_FULL_DESIGN,
+ [POWER_SUPPLY_PROP_CHARGE_FULL] = BATT_CHG_FULL,
+ [POWER_SUPPLY_PROP_MODEL_NAME] = BATT_MODEL_NAME,
+ [POWER_SUPPLY_PROP_TIME_TO_FULL_AVG] = BATT_TTF_AVG,
+ [POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG] = BATT_TTE_AVG,
+ [POWER_SUPPLY_PROP_POWER_NOW] = BATT_POWER_NOW,
+};
+
+static int qcom_battmgr_bat_sm8350_update(struct qcom_battmgr *battmgr,
+ enum power_supply_property psp)
+{
+ unsigned int prop;
+ int ret;
+
+ if (psp >= ARRAY_SIZE(sm8350_bat_prop_map))
+ return -EINVAL;
+
+ prop = sm8350_bat_prop_map[psp];
+
+ mutex_lock(&battmgr->lock);
+ ret = qcom_battmgr_request_property(battmgr, BATTMGR_BAT_PROPERTY_GET, prop, 0);
+ mutex_unlock(&battmgr->lock);
+
+ return ret;
+}
+
+static int qcom_battmgr_bat_sc8280xp_update(struct qcom_battmgr *battmgr,
+ enum power_supply_property psp)
+{
+ int ret;
+
+ mutex_lock(&battmgr->lock);
+
+ if (!battmgr->info.valid) {
+ ret = qcom_battmgr_update_info(battmgr);
+ if (ret < 0)
+ goto out_unlock;
+ battmgr->info.valid = true;
+ }
+
+ ret = qcom_battmgr_update_status(battmgr);
+ if (ret < 0)
+ goto out_unlock;
+
+ if (psp == POWER_SUPPLY_PROP_TIME_TO_FULL_AVG) {
+ ret = qcom_battmgr_update_charge_time(battmgr);
+ if (ret < 0) {
+ ret = -ENODATA;
+ goto out_unlock;
+ }
+ }
+
+ if (psp == POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG) {
+ ret = qcom_battmgr_update_discharge_time(battmgr);
+ if (ret < 0) {
+ ret = -ENODATA;
+ goto out_unlock;
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&battmgr->lock);
+ return ret;
+}
+
+static int qcom_battmgr_bat_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct qcom_battmgr *battmgr = power_supply_get_drvdata(psy);
+ enum qcom_battmgr_unit unit = battmgr->unit;
+ int ret;
+
+ if (!battmgr->service_up)
+ return -ENODEV;
+
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
+ ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
+ else
+ ret = qcom_battmgr_bat_sm8350_update(battmgr, psp);
+ if (ret < 0)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = battmgr->status.status;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = battmgr->info.charge_type;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = battmgr->status.health;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = battmgr->info.present;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = battmgr->info.technology;
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ val->intval = battmgr->info.cycle_count;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = battmgr->info.voltage_max_design;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = battmgr->info.voltage_max;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = battmgr->status.voltage_now;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ val->intval = battmgr->status.voltage_ocv;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = battmgr->status.current_now;
+ break;
+ case POWER_SUPPLY_PROP_POWER_NOW:
+ val->intval = battmgr->status.power_now;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ if (unit != QCOM_BATTMGR_UNIT_mAh)
+ return -ENODATA;
+ val->intval = battmgr->info.design_capacity;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ if (unit != QCOM_BATTMGR_UNIT_mAh)
+ return -ENODATA;
+ val->intval = battmgr->info.last_full_capacity;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_EMPTY:
+ if (unit != QCOM_BATTMGR_UNIT_mAh)
+ return -ENODATA;
+ val->intval = battmgr->info.capacity_low;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ if (unit != QCOM_BATTMGR_UNIT_mAh)
+ return -ENODATA;
+ val->intval = battmgr->status.capacity;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ val->intval = battmgr->info.charge_count;
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ if (unit != QCOM_BATTMGR_UNIT_mWh)
+ return -ENODATA;
+ val->intval = battmgr->info.design_capacity;
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
+ if (unit != QCOM_BATTMGR_UNIT_mWh)
+ return -ENODATA;
+ val->intval = battmgr->info.last_full_capacity;
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_EMPTY:
+ if (unit != QCOM_BATTMGR_UNIT_mWh)
+ return -ENODATA;
+ val->intval = battmgr->info.capacity_low;
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ if (unit != QCOM_BATTMGR_UNIT_mWh)
+ return -ENODATA;
+ val->intval = battmgr->status.capacity;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = battmgr->status.percent;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = battmgr->status.temperature;
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+ val->intval = battmgr->status.discharge_time;
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
+ val->intval = battmgr->status.charge_time;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURE_YEAR:
+ val->intval = battmgr->info.year;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURE_MONTH:
+ val->intval = battmgr->info.month;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURE_DAY:
+ val->intval = battmgr->info.day;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = battmgr->info.model_number;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = battmgr->info.oem_info;
+ break;
+ case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+ val->strval = battmgr->info.serial_number;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const enum power_supply_property sc8280xp_bat_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_EMPTY,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_FULL,
+ POWER_SUPPLY_PROP_ENERGY_EMPTY,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
+ POWER_SUPPLY_PROP_MANUFACTURE_MONTH,
+ POWER_SUPPLY_PROP_MANUFACTURE_DAY,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+};
+
+static const struct power_supply_desc sc8280xp_bat_psy_desc = {
+ .name = "qcom-battmgr-bat",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = sc8280xp_bat_props,
+ .num_properties = ARRAY_SIZE(sc8280xp_bat_props),
+ .get_property = qcom_battmgr_bat_get_property,
+};
+
+static const enum power_supply_property sm8350_bat_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ POWER_SUPPLY_PROP_POWER_NOW,
+};
+
+static const struct power_supply_desc sm8350_bat_psy_desc = {
+ .name = "qcom-battmgr-bat",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = sm8350_bat_props,
+ .num_properties = ARRAY_SIZE(sm8350_bat_props),
+ .get_property = qcom_battmgr_bat_get_property,
+};
+
+static int qcom_battmgr_ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct qcom_battmgr *battmgr = power_supply_get_drvdata(psy);
+ int ret;
+
+ if (!battmgr->service_up)
+ return -ENODEV;
+
+ ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
+ if (ret)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = battmgr->ac.online;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const enum power_supply_property sc8280xp_ac_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static const struct power_supply_desc sc8280xp_ac_psy_desc = {
+ .name = "qcom-battmgr-ac",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = sc8280xp_ac_props,
+ .num_properties = ARRAY_SIZE(sc8280xp_ac_props),
+ .get_property = qcom_battmgr_ac_get_property,
+};
+
+static const u8 sm8350_usb_prop_map[] = {
+ [POWER_SUPPLY_PROP_ONLINE] = USB_ONLINE,
+ [POWER_SUPPLY_PROP_VOLTAGE_NOW] = USB_VOLT_NOW,
+ [POWER_SUPPLY_PROP_VOLTAGE_MAX] = USB_VOLT_MAX,
+ [POWER_SUPPLY_PROP_CURRENT_NOW] = USB_CURR_NOW,
+ [POWER_SUPPLY_PROP_CURRENT_MAX] = USB_CURR_MAX,
+ [POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT] = USB_INPUT_CURR_LIMIT,
+ [POWER_SUPPLY_PROP_USB_TYPE] = USB_TYPE,
+};
+
+static int qcom_battmgr_usb_sm8350_update(struct qcom_battmgr *battmgr,
+ enum power_supply_property psp)
+{
+ unsigned int prop;
+ int ret;
+
+ if (psp >= ARRAY_SIZE(sm8350_usb_prop_map))
+ return -EINVAL;
+
+ prop = sm8350_usb_prop_map[psp];
+
+ mutex_lock(&battmgr->lock);
+ ret = qcom_battmgr_request_property(battmgr, BATTMGR_USB_PROPERTY_GET, prop, 0);
+ mutex_unlock(&battmgr->lock);
+
+ return ret;
+}
+
+static int qcom_battmgr_usb_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct qcom_battmgr *battmgr = power_supply_get_drvdata(psy);
+ int ret;
+
+ if (!battmgr->service_up)
+ return -ENODEV;
+
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
+ ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
+ else
+ ret = qcom_battmgr_usb_sm8350_update(battmgr, psp);
+ if (ret)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = battmgr->usb.online;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = battmgr->usb.voltage_now;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = battmgr->usb.voltage_max;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = battmgr->usb.current_now;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ val->intval = battmgr->usb.current_max;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ val->intval = battmgr->usb.current_limit;
+ break;
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ val->intval = battmgr->usb.usb_type;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const enum power_supply_usb_type usb_psy_supported_types[] = {
+ POWER_SUPPLY_USB_TYPE_UNKNOWN,
+ POWER_SUPPLY_USB_TYPE_SDP,
+ POWER_SUPPLY_USB_TYPE_DCP,
+ POWER_SUPPLY_USB_TYPE_CDP,
+ POWER_SUPPLY_USB_TYPE_ACA,
+ POWER_SUPPLY_USB_TYPE_C,
+ POWER_SUPPLY_USB_TYPE_PD,
+ POWER_SUPPLY_USB_TYPE_PD_DRP,
+ POWER_SUPPLY_USB_TYPE_PD_PPS,
+ POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID,
+};
+
+static const enum power_supply_property sc8280xp_usb_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static const struct power_supply_desc sc8280xp_usb_psy_desc = {
+ .name = "qcom-battmgr-usb",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = sc8280xp_usb_props,
+ .num_properties = ARRAY_SIZE(sc8280xp_usb_props),
+ .get_property = qcom_battmgr_usb_get_property,
+ .usb_types = usb_psy_supported_types,
+ .num_usb_types = ARRAY_SIZE(usb_psy_supported_types),
+};
+
+static const enum power_supply_property sm8350_usb_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_USB_TYPE,
+};
+
+static const struct power_supply_desc sm8350_usb_psy_desc = {
+ .name = "qcom-battmgr-usb",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = sm8350_usb_props,
+ .num_properties = ARRAY_SIZE(sm8350_usb_props),
+ .get_property = qcom_battmgr_usb_get_property,
+ .usb_types = usb_psy_supported_types,
+ .num_usb_types = ARRAY_SIZE(usb_psy_supported_types),
+};
+
+static const u8 sm8350_wls_prop_map[] = {
+ [POWER_SUPPLY_PROP_ONLINE] = WLS_ONLINE,
+ [POWER_SUPPLY_PROP_VOLTAGE_NOW] = WLS_VOLT_NOW,
+ [POWER_SUPPLY_PROP_VOLTAGE_MAX] = WLS_VOLT_MAX,
+ [POWER_SUPPLY_PROP_CURRENT_NOW] = WLS_CURR_NOW,
+ [POWER_SUPPLY_PROP_CURRENT_MAX] = WLS_CURR_MAX,
+};
+
+static int qcom_battmgr_wls_sm8350_update(struct qcom_battmgr *battmgr,
+ enum power_supply_property psp)
+{
+ unsigned int prop;
+ int ret;
+
+ if (psp >= ARRAY_SIZE(sm8350_wls_prop_map))
+ return -EINVAL;
+
+ prop = sm8350_wls_prop_map[psp];
+
+ mutex_lock(&battmgr->lock);
+ ret = qcom_battmgr_request_property(battmgr, BATTMGR_WLS_PROPERTY_GET, prop, 0);
+ mutex_unlock(&battmgr->lock);
+
+ return ret;
+}
+
+static int qcom_battmgr_wls_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct qcom_battmgr *battmgr = power_supply_get_drvdata(psy);
+ int ret;
+
+ if (!battmgr->service_up)
+ return -ENODEV;
+
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
+ ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
+ else
+ ret = qcom_battmgr_wls_sm8350_update(battmgr, psp);
+ if (ret < 0)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = battmgr->wireless.online;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = battmgr->wireless.voltage_now;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = battmgr->wireless.voltage_max;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = battmgr->wireless.current_now;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ val->intval = battmgr->wireless.current_max;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const enum power_supply_property sc8280xp_wls_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static const struct power_supply_desc sc8280xp_wls_psy_desc = {
+ .name = "qcom-battmgr-wls",
+ .type = POWER_SUPPLY_TYPE_WIRELESS,
+ .properties = sc8280xp_wls_props,
+ .num_properties = ARRAY_SIZE(sc8280xp_wls_props),
+ .get_property = qcom_battmgr_wls_get_property,
+};
+
+static const enum power_supply_property sm8350_wls_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static const struct power_supply_desc sm8350_wls_psy_desc = {
+ .name = "qcom-battmgr-wls",
+ .type = POWER_SUPPLY_TYPE_WIRELESS,
+ .properties = sm8350_wls_props,
+ .num_properties = ARRAY_SIZE(sm8350_wls_props),
+ .get_property = qcom_battmgr_wls_get_property,
+};
+
+static void qcom_battmgr_notification(struct qcom_battmgr *battmgr,
+ const struct qcom_battmgr_message *msg,
+ int len)
+{
+ size_t payload_len = len - sizeof(struct pmic_glink_hdr);
+ unsigned int notification;
+
+ if (payload_len != sizeof(msg->notification)) {
+ dev_warn(battmgr->dev, "ignoring notification with invalid length\n");
+ return;
+ }
+
+ notification = le32_to_cpu(msg->notification);
+ switch (notification) {
+ case NOTIF_BAT_INFO:
+ battmgr->info.valid = false;
+ fallthrough;
+ case NOTIF_BAT_STATUS:
+ case NOTIF_BAT_PROPERTY:
+ power_supply_changed(battmgr->bat_psy);
+ break;
+ case NOTIF_USB_PROPERTY:
+ power_supply_changed(battmgr->usb_psy);
+ break;
+ case NOTIF_WLS_PROPERTY:
+ power_supply_changed(battmgr->wls_psy);
+ break;
+ default:
+ dev_err(battmgr->dev, "unknown notification: %#x\n", notification);
+ break;
+ }
+}
+
+static void qcom_battmgr_sc8280xp_strcpy(char *dest, const char *src)
+{
+ size_t len = src[0];
+
+ /* Some firmware versions return Pascal-style strings */
+ if (len < BATTMGR_STRING_LEN && len == strnlen(src + 1, BATTMGR_STRING_LEN - 1)) {
+ memcpy(dest, src + 1, len);
+ dest[len] = '\0';
+ } else {
+ memcpy(dest, src, BATTMGR_STRING_LEN);
+ }
+}
+
+static unsigned int qcom_battmgr_sc8280xp_parse_technology(const char *chemistry)
+{
+ if (!strncmp(chemistry, "LIO", BATTMGR_CHEMISTRY_LEN))
+ return POWER_SUPPLY_TECHNOLOGY_LION;
+
+ pr_err("Unknown battery technology '%s'\n", chemistry);
+ return POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+}
+
+static unsigned int qcom_battmgr_sc8280xp_convert_temp(unsigned int temperature)
+{
+ return DIV_ROUND_CLOSEST(temperature, 10);
+}
+
+static void qcom_battmgr_sc8280xp_callback(struct qcom_battmgr *battmgr,
+ const struct qcom_battmgr_message *resp,
+ size_t len)
+{
+ unsigned int opcode = le32_to_cpu(resp->hdr.opcode);
+ unsigned int source;
+ unsigned int state;
+ size_t payload_len = len - sizeof(struct pmic_glink_hdr);
+
+ if (payload_len < sizeof(__le32)) {
+ dev_warn(battmgr->dev, "invalid payload length for %#x: %zd\n",
+ opcode, len);
+ return;
+ }
+
+ switch (opcode) {
+ case BATTMGR_REQUEST_NOTIFICATION:
+ battmgr->error = 0;
+ break;
+ case BATTMGR_BAT_INFO:
+ if (payload_len != sizeof(resp->info)) {
+ dev_warn(battmgr->dev,
+ "invalid payload length for battery information request: %zd\n",
+ payload_len);
+ battmgr->error = -ENODATA;
+ return;
+ }
+
+ battmgr->unit = le32_to_cpu(resp->info.power_unit);
+
+ battmgr->info.present = true;
+ battmgr->info.design_capacity = le32_to_cpu(resp->info.design_capacity) * 1000;
+ battmgr->info.last_full_capacity = le32_to_cpu(resp->info.last_full_capacity) * 1000;
+ battmgr->info.voltage_max_design = le32_to_cpu(resp->info.design_voltage) * 1000;
+ battmgr->info.capacity_low = le32_to_cpu(resp->info.capacity_low) * 1000;
+ battmgr->info.cycle_count = le32_to_cpu(resp->info.cycle_count);
+ qcom_battmgr_sc8280xp_strcpy(battmgr->info.model_number, resp->info.model_number);
+ qcom_battmgr_sc8280xp_strcpy(battmgr->info.serial_number, resp->info.serial_number);
+ battmgr->info.technology = qcom_battmgr_sc8280xp_parse_technology(resp->info.battery_chemistry);
+ qcom_battmgr_sc8280xp_strcpy(battmgr->info.oem_info, resp->info.oem_info);
+ battmgr->info.day = resp->info.day;
+ battmgr->info.month = resp->info.month;
+ battmgr->info.year = le16_to_cpu(resp->info.year);
+ break;
+ case BATTMGR_BAT_STATUS:
+ if (payload_len != sizeof(resp->status)) {
+ dev_warn(battmgr->dev,
+ "invalid payload length for battery status request: %zd\n",
+ payload_len);
+ battmgr->error = -ENODATA;
+ return;
+ }
+
+ state = le32_to_cpu(resp->status.battery_state);
+ if (state & BIT(0))
+ battmgr->status.status = POWER_SUPPLY_STATUS_DISCHARGING;
+ else if (state & BIT(1))
+ battmgr->status.status = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ battmgr->status.status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+ battmgr->status.capacity = le32_to_cpu(resp->status.capacity) * 1000;
+ battmgr->status.power_now = le32_to_cpu(resp->status.rate) * 1000;
+ battmgr->status.voltage_now = le32_to_cpu(resp->status.battery_voltage) * 1000;
+ battmgr->status.temperature = qcom_battmgr_sc8280xp_convert_temp(le32_to_cpu(resp->status.temperature));
+
+ source = le32_to_cpu(resp->status.charging_source);
+ battmgr->ac.online = source == BATTMGR_CHARGING_SOURCE_AC;
+ battmgr->usb.online = source == BATTMGR_CHARGING_SOURCE_USB;
+ battmgr->wireless.online = source == BATTMGR_CHARGING_SOURCE_WIRELESS;
+ break;
+ case BATTMGR_BAT_DISCHARGE_TIME:
+ battmgr->status.discharge_time = le32_to_cpu(resp->time);
+ break;
+ case BATTMGR_BAT_CHARGE_TIME:
+ battmgr->status.charge_time = le32_to_cpu(resp->time);
+ break;
+ default:
+ dev_warn(battmgr->dev, "unknown message %#x\n", opcode);
+ break;
+ }
+
+ complete(&battmgr->ack);
+}
+
+static void qcom_battmgr_sm8350_callback(struct qcom_battmgr *battmgr,
+ const struct qcom_battmgr_message *resp,
+ size_t len)
+{
+ unsigned int property;
+ unsigned int opcode = le32_to_cpu(resp->hdr.opcode);
+ size_t payload_len = len - sizeof(struct pmic_glink_hdr);
+ unsigned int val;
+
+ if (payload_len < sizeof(__le32)) {
+ dev_warn(battmgr->dev, "invalid payload length for %#x: %zd\n",
+ opcode, len);
+ return;
+ }
+
+ switch (opcode) {
+ case BATTMGR_BAT_PROPERTY_GET:
+ property = le32_to_cpu(resp->intval.property);
+ if (property == BATT_MODEL_NAME) {
+ if (payload_len != sizeof(resp->strval)) {
+ dev_warn(battmgr->dev,
+ "invalid payload length for BATT_MODEL_NAME request: %zd\n",
+ payload_len);
+ battmgr->error = -ENODATA;
+ return;
+ }
+ } else {
+ if (payload_len != sizeof(resp->intval)) {
+ dev_warn(battmgr->dev,
+ "invalid payload length for %#x request: %zd\n",
+ property, payload_len);
+ battmgr->error = -ENODATA;
+ return;
+ }
+
+ battmgr->error = le32_to_cpu(resp->intval.result);
+ if (battmgr->error)
+ goto out_complete;
+ }
+
+ switch (property) {
+ case BATT_STATUS:
+ battmgr->status.status = le32_to_cpu(resp->intval.value);
+ break;
+ case BATT_HEALTH:
+ battmgr->status.health = le32_to_cpu(resp->intval.value);
+ break;
+ case BATT_PRESENT:
+ battmgr->info.present = le32_to_cpu(resp->intval.value);
+ break;
+ case BATT_CHG_TYPE:
+ battmgr->info.charge_type = le32_to_cpu(resp->intval.value);
+ break;
+ case BATT_CAPACITY:
+ battmgr->status.percent = le32_to_cpu(resp->intval.value);
+ do_div(battmgr->status.percent, 100);
+ break;
+ case BATT_VOLT_OCV:
+ battmgr->status.voltage_ocv = le32_to_cpu(resp->intval.value);
+ break;
+ case BATT_VOLT_NOW:
+ battmgr->status.voltage_now = le32_to_cpu(resp->intval.value);
+ break;
+ case BATT_VOLT_MAX:
+ battmgr->info.voltage_max = le32_to_cpu(resp->intval.value);
+ break;
+ case BATT_CURR_NOW:
+ battmgr->status.current_now = le32_to_cpu(resp->intval.value);
+ break;
+ case BATT_TEMP:
+ val = le32_to_cpu(resp->intval.value);
+ battmgr->status.temperature = DIV_ROUND_CLOSEST(val, 10);
+ break;
+ case BATT_TECHNOLOGY:
+ battmgr->info.technology = le32_to_cpu(resp->intval.value);
+ break;
+ case BATT_CHG_COUNTER:
+ battmgr->info.charge_count = le32_to_cpu(resp->intval.value);
+ break;
+ case BATT_CYCLE_COUNT:
+ battmgr->info.cycle_count = le32_to_cpu(resp->intval.value);
+ break;
+ case BATT_CHG_FULL_DESIGN:
+ battmgr->info.design_capacity = le32_to_cpu(resp->intval.value);
+ break;
+ case BATT_CHG_FULL:
+ battmgr->info.last_full_capacity = le32_to_cpu(resp->intval.value);
+ break;
+ case BATT_MODEL_NAME:
+ strscpy(battmgr->info.model_number, resp->strval.model, BATTMGR_STRING_LEN);
+ break;
+ case BATT_TTF_AVG:
+ battmgr->status.charge_time = le32_to_cpu(resp->intval.value);
+ break;
+ case BATT_TTE_AVG:
+ battmgr->status.discharge_time = le32_to_cpu(resp->intval.value);
+ break;
+ case BATT_POWER_NOW:
+ battmgr->status.power_now = le32_to_cpu(resp->intval.value);
+ break;
+ default:
+ dev_warn(battmgr->dev, "unknown property %#x\n", property);
+ break;
+ }
+ break;
+ case BATTMGR_USB_PROPERTY_GET:
+ property = le32_to_cpu(resp->intval.property);
+ if (payload_len != sizeof(resp->intval)) {
+ dev_warn(battmgr->dev,
+ "invalid payload length for %#x request: %zd\n",
+ property, payload_len);
+ battmgr->error = -ENODATA;
+ return;
+ }
+
+ battmgr->error = le32_to_cpu(resp->intval.result);
+ if (battmgr->error)
+ goto out_complete;
+
+ switch (property) {
+ case USB_ONLINE:
+ battmgr->usb.online = le32_to_cpu(resp->intval.value);
+ break;
+ case USB_VOLT_NOW:
+ battmgr->usb.voltage_now = le32_to_cpu(resp->intval.value);
+ break;
+ case USB_VOLT_MAX:
+ battmgr->usb.voltage_max = le32_to_cpu(resp->intval.value);
+ break;
+ case USB_CURR_NOW:
+ battmgr->usb.current_now = le32_to_cpu(resp->intval.value);
+ break;
+ case USB_CURR_MAX:
+ battmgr->usb.current_max = le32_to_cpu(resp->intval.value);
+ break;
+ case USB_INPUT_CURR_LIMIT:
+ battmgr->usb.current_limit = le32_to_cpu(resp->intval.value);
+ break;
+ case USB_TYPE:
+ battmgr->usb.usb_type = le32_to_cpu(resp->intval.value);
+ break;
+ default:
+ dev_warn(battmgr->dev, "unknown property %#x\n", property);
+ break;
+ }
+ break;
+ case BATTMGR_WLS_PROPERTY_GET:
+ property = le32_to_cpu(resp->intval.property);
+ if (payload_len != sizeof(resp->intval)) {
+ dev_warn(battmgr->dev,
+ "invalid payload length for %#x request: %zd\n",
+ property, payload_len);
+ battmgr->error = -ENODATA;
+ return;
+ }
+
+ battmgr->error = le32_to_cpu(resp->intval.result);
+ if (battmgr->error)
+ goto out_complete;
+
+ switch (property) {
+ case WLS_ONLINE:
+ battmgr->wireless.online = le32_to_cpu(resp->intval.value);
+ break;
+ case WLS_VOLT_NOW:
+ battmgr->wireless.voltage_now = le32_to_cpu(resp->intval.value);
+ break;
+ case WLS_VOLT_MAX:
+ battmgr->wireless.voltage_max = le32_to_cpu(resp->intval.value);
+ break;
+ case WLS_CURR_NOW:
+ battmgr->wireless.current_now = le32_to_cpu(resp->intval.value);
+ break;
+ case WLS_CURR_MAX:
+ battmgr->wireless.current_max = le32_to_cpu(resp->intval.value);
+ break;
+ default:
+ dev_warn(battmgr->dev, "unknown property %#x\n", property);
+ break;
+ }
+ break;
+ case BATTMGR_REQUEST_NOTIFICATION:
+ battmgr->error = 0;
+ break;
+ default:
+ dev_warn(battmgr->dev, "unknown message %#x\n", opcode);
+ break;
+ }
+
+out_complete:
+ complete(&battmgr->ack);
+}
+
+static void qcom_battmgr_callback(const void *data, size_t len, void *priv)
+{
+ const struct pmic_glink_hdr *hdr = data;
+ struct qcom_battmgr *battmgr = priv;
+ unsigned int opcode = le32_to_cpu(hdr->opcode);
+
+ if (opcode == BATTMGR_NOTIFICATION)
+ qcom_battmgr_notification(battmgr, data, len);
+ else if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
+ qcom_battmgr_sc8280xp_callback(battmgr, data, len);
+ else
+ qcom_battmgr_sm8350_callback(battmgr, data, len);
+}
+
+static void qcom_battmgr_enable_worker(struct work_struct *work)
+{
+ struct qcom_battmgr *battmgr = container_of(work, struct qcom_battmgr, enable_work);
+ struct qcom_battmgr_enable_request req = {
+ .hdr.owner = PMIC_GLINK_OWNER_BATTMGR,
+ .hdr.type = PMIC_GLINK_NOTIFY,
+ .hdr.opcode = BATTMGR_REQUEST_NOTIFICATION,
+ };
+ int ret;
+
+ ret = qcom_battmgr_request(battmgr, &req, sizeof(req));
+ if (ret)
+ dev_err(battmgr->dev, "failed to request power notifications\n");
+}
+
+static void qcom_battmgr_pdr_notify(void *priv, int state)
+{
+ struct qcom_battmgr *battmgr = priv;
+
+ if (state == SERVREG_SERVICE_STATE_UP) {
+ battmgr->service_up = true;
+ schedule_work(&battmgr->enable_work);
+ } else {
+ battmgr->service_up = false;
+ }
+}
+
+static const struct of_device_id qcom_battmgr_of_variants[] = {
+ { .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)QCOM_BATTMGR_SC8280XP },
+ { .compatible = "qcom,sc8280xp-pmic-glink", .data = (void *)QCOM_BATTMGR_SC8280XP },
+ /* Unmatched devices falls back to QCOM_BATTMGR_SM8350 */
+ {}
+};
+
+static char *qcom_battmgr_battery[] = { "battery" };
+
+static int qcom_battmgr_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct power_supply_config psy_cfg_supply = {};
+ struct power_supply_config psy_cfg = {};
+ const struct of_device_id *match;
+ struct qcom_battmgr *battmgr;
+ struct device *dev = &adev->dev;
+
+ battmgr = devm_kzalloc(dev, sizeof(*battmgr), GFP_KERNEL);
+ if (!battmgr)
+ return -ENOMEM;
+
+ battmgr->dev = dev;
+
+ psy_cfg.drv_data = battmgr;
+ psy_cfg.of_node = adev->dev.of_node;
+
+ psy_cfg_supply.drv_data = battmgr;
+ psy_cfg_supply.of_node = adev->dev.of_node;
+ psy_cfg_supply.supplied_to = qcom_battmgr_battery;
+ psy_cfg_supply.num_supplicants = 1;
+
+ INIT_WORK(&battmgr->enable_work, qcom_battmgr_enable_worker);
+ mutex_init(&battmgr->lock);
+ init_completion(&battmgr->ack);
+
+ match = of_match_device(qcom_battmgr_of_variants, dev->parent);
+ if (match)
+ battmgr->variant = (unsigned long)match->data;
+ else
+ battmgr->variant = QCOM_BATTMGR_SM8350;
+
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP) {
+ battmgr->bat_psy = devm_power_supply_register(dev, &sc8280xp_bat_psy_desc, &psy_cfg);
+ if (IS_ERR(battmgr->bat_psy))
+ return dev_err_probe(dev, PTR_ERR(battmgr->bat_psy),
+ "failed to register battery power supply\n");
+
+ battmgr->ac_psy = devm_power_supply_register(dev, &sc8280xp_ac_psy_desc, &psy_cfg_supply);
+ if (IS_ERR(battmgr->ac_psy))
+ return dev_err_probe(dev, PTR_ERR(battmgr->ac_psy),
+ "failed to register AC power supply\n");
+
+ battmgr->usb_psy = devm_power_supply_register(dev, &sc8280xp_usb_psy_desc, &psy_cfg_supply);
+ if (IS_ERR(battmgr->usb_psy))
+ return dev_err_probe(dev, PTR_ERR(battmgr->usb_psy),
+ "failed to register USB power supply\n");
+
+ battmgr->wls_psy = devm_power_supply_register(dev, &sc8280xp_wls_psy_desc, &psy_cfg_supply);
+ if (IS_ERR(battmgr->wls_psy))
+ return dev_err_probe(dev, PTR_ERR(battmgr->wls_psy),
+ "failed to register wireless charing power supply\n");
+ } else {
+ battmgr->bat_psy = devm_power_supply_register(dev, &sm8350_bat_psy_desc, &psy_cfg);
+ if (IS_ERR(battmgr->bat_psy))
+ return dev_err_probe(dev, PTR_ERR(battmgr->bat_psy),
+ "failed to register battery power supply\n");
+
+ battmgr->usb_psy = devm_power_supply_register(dev, &sm8350_usb_psy_desc, &psy_cfg_supply);
+ if (IS_ERR(battmgr->usb_psy))
+ return dev_err_probe(dev, PTR_ERR(battmgr->usb_psy),
+ "failed to register USB power supply\n");
+
+ battmgr->wls_psy = devm_power_supply_register(dev, &sm8350_wls_psy_desc, &psy_cfg_supply);
+ if (IS_ERR(battmgr->wls_psy))
+ return dev_err_probe(dev, PTR_ERR(battmgr->wls_psy),
+ "failed to register wireless charing power supply\n");
+ }
+
+ battmgr->client = devm_pmic_glink_register_client(dev,
+ PMIC_GLINK_OWNER_BATTMGR,
+ qcom_battmgr_callback,
+ qcom_battmgr_pdr_notify,
+ battmgr);
+ return PTR_ERR_OR_ZERO(battmgr->client);
+}
+
+static const struct auxiliary_device_id qcom_battmgr_id_table[] = {
+ { .name = "pmic_glink.power-supply", },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, qcom_battmgr_id_table);
+
+static struct auxiliary_driver qcom_battmgr_driver = {
+ .name = "pmic_glink_power_supply",
+ .probe = qcom_battmgr_probe,
+ .id_table = qcom_battmgr_id_table,
+};
+
+module_auxiliary_driver(qcom_battmgr_driver);
+
+MODULE_DESCRIPTION("Qualcomm PMIC GLINK battery manager driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 292e22f58df3..ab053084f7a2 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -34,7 +34,7 @@
#include "qcom_pil_info.h"
#include "qcom_q6v5.h"
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#define MPSS_CRASH_REASON_SMEM 421
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 6cc4e13c5d36..0871108fb4dc 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -18,7 +18,7 @@
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc.h>
#include <linux/soc/qcom/mdt_loader.h>
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index f599d5d5719d..9d4d04fff8c6 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -19,7 +19,7 @@
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc.h>
#include <linux/soc/qcom/mdt_loader.h>
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 5dbb09f843f7..4e176280113a 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -16,12 +16,14 @@ source "drivers/soc/litex/Kconfig"
source "drivers/soc/loongson/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/microchip/Kconfig"
+source "drivers/soc/nuvoton/Kconfig"
source "drivers/soc/pxa/Kconfig"
source "drivers/soc/qcom/Kconfig"
source "drivers/soc/renesas/Kconfig"
source "drivers/soc/rockchip/Kconfig"
source "drivers/soc/samsung/Kconfig"
source "drivers/soc/sifive/Kconfig"
+source "drivers/soc/starfive/Kconfig"
source "drivers/soc/sunxi/Kconfig"
source "drivers/soc/tegra/Kconfig"
source "drivers/soc/ti/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index fff513bd522d..3b0f9fb3b5c8 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -21,13 +21,15 @@ obj-$(CONFIG_LITEX_SOC_CONTROLLER) += litex/
obj-y += loongson/
obj-y += mediatek/
obj-y += microchip/
+obj-y += nuvoton/
obj-y += pxa/
obj-y += amlogic/
obj-y += qcom/
obj-y += renesas/
obj-y += rockchip/
obj-$(CONFIG_SOC_SAMSUNG) += samsung/
-obj-$(CONFIG_SOC_SIFIVE) += sifive/
+obj-y += sifive/
+obj-$(CONFIG_SOC_STARFIVE) += starfive/
obj-y += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c
index dd5f2a13ceb5..f54acffc83f9 100644
--- a/drivers/soc/amlogic/meson-ee-pwrc.c
+++ b/drivers/soc/amlogic/meson-ee-pwrc.c
@@ -46,6 +46,9 @@
#define HHI_NANOQ_MEM_PD_REG1 (0x47 << 2)
#define HHI_VPU_MEM_PD_REG2 (0x4d << 2)
+#define G12A_HHI_NANOQ_MEM_PD_REG0 (0x43 << 2)
+#define G12A_HHI_NANOQ_MEM_PD_REG1 (0x44 << 2)
+
struct meson_ee_pwrc;
struct meson_ee_pwrc_domain;
@@ -106,6 +109,13 @@ static struct meson_ee_pwrc_top_domain sm1_pwrc_usb = SM1_EE_PD(17);
static struct meson_ee_pwrc_top_domain sm1_pwrc_pci = SM1_EE_PD(18);
static struct meson_ee_pwrc_top_domain sm1_pwrc_ge2d = SM1_EE_PD(19);
+static struct meson_ee_pwrc_top_domain g12a_pwrc_nna = {
+ .sleep_reg = GX_AO_RTI_GEN_PWR_SLEEP0,
+ .sleep_mask = BIT(16) | BIT(17),
+ .iso_reg = GX_AO_RTI_GEN_PWR_ISO0,
+ .iso_mask = BIT(16) | BIT(17),
+};
+
/* Memory PD Domains */
#define VPU_MEMPD(__reg) \
@@ -217,6 +227,11 @@ static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = {
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(27, 26) },
};
+static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_nna[] = {
+ { G12A_HHI_NANOQ_MEM_PD_REG0, GENMASK(31, 0) },
+ { G12A_HHI_NANOQ_MEM_PD_REG1, GENMASK(23, 0) },
+};
+
#define VPU_PD(__name, __top_pd, __mem, __is_pwr_off, __resets, __clks) \
{ \
.name = __name, \
@@ -253,6 +268,8 @@ static struct meson_ee_pwrc_domain_desc g12a_pwrc_domains[] = {
[PWRC_G12A_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, g12a_pwrc_mem_vpu,
pwrc_ee_is_powered_off, 11, 2),
[PWRC_G12A_ETH_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
+ [PWRC_G12A_NNA_ID] = TOP_PD("NNA", &g12a_pwrc_nna, g12a_pwrc_mem_nna,
+ pwrc_ee_is_powered_off),
};
static struct meson_ee_pwrc_domain_desc gxbb_pwrc_domains[] = {
diff --git a/drivers/soc/apple/apple-pmgr-pwrstate.c b/drivers/soc/apple/apple-pmgr-pwrstate.c
index e1122288409a..a3e2bc1d2686 100644
--- a/drivers/soc/apple/apple-pmgr-pwrstate.c
+++ b/drivers/soc/apple/apple-pmgr-pwrstate.c
@@ -116,8 +116,9 @@ static int apple_pmgr_ps_power_off(struct generic_pm_domain *genpd)
static int apple_pmgr_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev);
+ unsigned long flags;
- mutex_lock(&ps->genpd.mlock);
+ spin_lock_irqsave(&ps->genpd.slock, flags);
if (ps->genpd.status == GENPD_STATE_OFF)
dev_err(ps->dev, "PS 0x%x: asserting RESET while powered down\n", ps->offset);
@@ -129,7 +130,7 @@ static int apple_pmgr_reset_assert(struct reset_controller_dev *rcdev, unsigned
regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_RESET,
APPLE_PMGR_RESET);
- mutex_unlock(&ps->genpd.mlock);
+ spin_unlock_irqrestore(&ps->genpd.slock, flags);
return 0;
}
@@ -137,8 +138,9 @@ static int apple_pmgr_reset_assert(struct reset_controller_dev *rcdev, unsigned
static int apple_pmgr_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
{
struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev);
+ unsigned long flags;
- mutex_lock(&ps->genpd.mlock);
+ spin_lock_irqsave(&ps->genpd.slock, flags);
dev_dbg(ps->dev, "PS 0x%x: deassert reset\n", ps->offset);
regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_RESET, 0);
@@ -147,7 +149,7 @@ static int apple_pmgr_reset_deassert(struct reset_controller_dev *rcdev, unsigne
if (ps->genpd.status == GENPD_STATE_OFF)
dev_err(ps->dev, "PS 0x%x: RESET was deasserted while powered down\n", ps->offset);
- mutex_unlock(&ps->genpd.mlock);
+ spin_unlock_irqrestore(&ps->genpd.slock, flags);
return 0;
}
@@ -222,6 +224,7 @@ static int apple_pmgr_ps_probe(struct platform_device *pdev)
return ret;
}
+ ps->genpd.flags |= GENPD_FLAG_IRQ_SAFE;
ps->genpd.name = name;
ps->genpd.power_on = apple_pmgr_ps_power_on;
ps->genpd.power_off = apple_pmgr_ps_power_off;
diff --git a/drivers/soc/apple/rtkit-crashlog.c b/drivers/soc/apple/rtkit-crashlog.c
index 732deed64660..8319e365110b 100644
--- a/drivers/soc/apple/rtkit-crashlog.c
+++ b/drivers/soc/apple/rtkit-crashlog.c
@@ -13,6 +13,17 @@
#define APPLE_RTKIT_CRASHLOG_VERSION FOURCC('C', 'v', 'e', 'r')
#define APPLE_RTKIT_CRASHLOG_MBOX FOURCC('C', 'm', 'b', 'x')
#define APPLE_RTKIT_CRASHLOG_TIME FOURCC('C', 't', 'i', 'm')
+#define APPLE_RTKIT_CRASHLOG_REGS FOURCC('C', 'r', 'g', '8')
+
+/* For COMPILE_TEST on non-ARM64 architectures */
+#ifndef PSR_MODE_EL0t
+#define PSR_MODE_EL0t 0x00000000
+#define PSR_MODE_EL1t 0x00000004
+#define PSR_MODE_EL1h 0x00000005
+#define PSR_MODE_EL2t 0x00000008
+#define PSR_MODE_EL2h 0x00000009
+#define PSR_MODE_MASK 0x0000000f
+#endif
struct apple_rtkit_crashlog_header {
u32 fourcc;
@@ -31,6 +42,24 @@ struct apple_rtkit_crashlog_mbox_entry {
};
static_assert(sizeof(struct apple_rtkit_crashlog_mbox_entry) == 0x18);
+struct apple_rtkit_crashlog_regs {
+ u32 unk_0;
+ u32 unk_4;
+ u64 regs[31];
+ u64 sp;
+ u64 pc;
+ u64 psr;
+ u64 cpacr;
+ u64 fpsr;
+ u64 fpcr;
+ u64 unk[64];
+ u64 far;
+ u64 unk_X;
+ u64 esr;
+ u64 unk_Z;
+} __packed;
+static_assert(sizeof(struct apple_rtkit_crashlog_regs) == 0x350);
+
static void apple_rtkit_crashlog_dump_str(struct apple_rtkit *rtk, u8 *bfr,
size_t size)
{
@@ -94,6 +123,66 @@ static void apple_rtkit_crashlog_dump_mailbox(struct apple_rtkit *rtk, u8 *bfr,
}
}
+static void apple_rtkit_crashlog_dump_regs(struct apple_rtkit *rtk, u8 *bfr,
+ size_t size)
+{
+ struct apple_rtkit_crashlog_regs *regs;
+ const char *el;
+ int i;
+
+ if (size < sizeof(*regs)) {
+ dev_warn(rtk->dev, "RTKit: Regs section too small: 0x%zx", size);
+ return;
+ }
+
+ regs = (struct apple_rtkit_crashlog_regs *)bfr;
+
+ switch (regs->psr & PSR_MODE_MASK) {
+ case PSR_MODE_EL0t:
+ el = "EL0t";
+ break;
+ case PSR_MODE_EL1t:
+ el = "EL1t";
+ break;
+ case PSR_MODE_EL1h:
+ el = "EL1h";
+ break;
+ case PSR_MODE_EL2t:
+ el = "EL2t";
+ break;
+ case PSR_MODE_EL2h:
+ el = "EL2h";
+ break;
+ default:
+ el = "unknown";
+ break;
+ }
+
+ dev_warn(rtk->dev, "RTKit: Exception dump:");
+ dev_warn(rtk->dev, " == Exception taken from %s ==", el);
+ dev_warn(rtk->dev, " PSR = 0x%llx", regs->psr);
+ dev_warn(rtk->dev, " PC = 0x%llx\n", regs->pc);
+ dev_warn(rtk->dev, " ESR = 0x%llx\n", regs->esr);
+ dev_warn(rtk->dev, " FAR = 0x%llx\n", regs->far);
+ dev_warn(rtk->dev, " SP = 0x%llx\n", regs->sp);
+ dev_warn(rtk->dev, "\n");
+
+ for (i = 0; i < 31; i += 4) {
+ if (i < 28)
+ dev_warn(rtk->dev,
+ " x%02d-x%02d = %016llx %016llx %016llx %016llx\n",
+ i, i + 3,
+ regs->regs[i], regs->regs[i + 1],
+ regs->regs[i + 2], regs->regs[i + 3]);
+ else
+ dev_warn(rtk->dev,
+ " x%02d-x%02d = %016llx %016llx %016llx\n", i, i + 3,
+ regs->regs[i], regs->regs[i + 1], regs->regs[i + 2]);
+ }
+
+ dev_warn(rtk->dev, "\n");
+}
+
void apple_rtkit_crashlog_dump(struct apple_rtkit *rtk, u8 *bfr, size_t size)
{
size_t offset;
@@ -140,6 +229,10 @@ void apple_rtkit_crashlog_dump(struct apple_rtkit *rtk, u8 *bfr, size_t size)
apple_rtkit_crashlog_dump_time(rtk, bfr + offset + 16,
section_size);
break;
+ case APPLE_RTKIT_CRASHLOG_REGS:
+ apple_rtkit_crashlog_dump_regs(rtk, bfr + offset + 16,
+ section_size);
+ break;
default:
dev_warn(rtk->dev,
"RTKit: Unknown crashlog section: %x",
diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
index 8ec74d7539eb..35ec35aa500d 100644
--- a/drivers/soc/apple/rtkit.c
+++ b/drivers/soc/apple/rtkit.c
@@ -9,6 +9,7 @@
enum {
APPLE_RTKIT_PWR_STATE_OFF = 0x00, /* power off, cannot be restarted */
APPLE_RTKIT_PWR_STATE_SLEEP = 0x01, /* sleeping, can be restarted */
+ APPLE_RTKIT_PWR_STATE_IDLE = 0x201, /* sleeping, retain state */
APPLE_RTKIT_PWR_STATE_QUIESCED = 0x10, /* running but no communication */
APPLE_RTKIT_PWR_STATE_ON = 0x20, /* normal operating state */
};
@@ -698,7 +699,7 @@ static int apple_rtkit_request_mbox_chan(struct apple_rtkit *rtk)
return 0;
}
-static struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
+struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
const char *mbox_name, int mbox_idx,
const struct apple_rtkit_ops *ops)
{
@@ -750,6 +751,7 @@ free_rtk:
kfree(rtk);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(apple_rtkit_init);
static int apple_rtkit_wait_for_completion(struct completion *c)
{
@@ -881,6 +883,26 @@ int apple_rtkit_shutdown(struct apple_rtkit *rtk)
}
EXPORT_SYMBOL_GPL(apple_rtkit_shutdown);
+int apple_rtkit_idle(struct apple_rtkit *rtk)
+{
+ int ret;
+
+ /* if OFF is used here the co-processor will not wake up again */
+ ret = apple_rtkit_set_ap_power_state(rtk,
+ APPLE_RTKIT_PWR_STATE_IDLE);
+ if (ret)
+ return ret;
+
+ ret = apple_rtkit_set_iop_power_state(rtk, APPLE_RTKIT_PWR_STATE_IDLE);
+ if (ret)
+ return ret;
+
+ rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_IDLE;
+ rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_IDLE;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_idle);
+
int apple_rtkit_quiesce(struct apple_rtkit *rtk)
{
int ret;
@@ -926,10 +948,8 @@ int apple_rtkit_wake(struct apple_rtkit *rtk)
}
EXPORT_SYMBOL_GPL(apple_rtkit_wake);
-static void apple_rtkit_free(void *data)
+void apple_rtkit_free(struct apple_rtkit *rtk)
{
- struct apple_rtkit *rtk = data;
-
mbox_free_channel(rtk->mbox_chan);
destroy_workqueue(rtk->wq);
@@ -940,6 +960,12 @@ static void apple_rtkit_free(void *data)
kfree(rtk->syslog_msg_buffer);
kfree(rtk);
}
+EXPORT_SYMBOL_GPL(apple_rtkit_free);
+
+static void apple_rtkit_free_wrapper(void *data)
+{
+ apple_rtkit_free(data);
+}
struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
const char *mbox_name, int mbox_idx,
@@ -952,7 +978,7 @@ struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
if (IS_ERR(rtk))
return rtk;
- ret = devm_add_action_or_reset(dev, apple_rtkit_free, rtk);
+ ret = devm_add_action_or_reset(dev, apple_rtkit_free_wrapper, rtk);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c
index 5bcd047768b6..bf51f03f77d6 100644
--- a/drivers/soc/bcm/bcm2835-power.c
+++ b/drivers/soc/bcm/bcm2835-power.c
@@ -701,14 +701,8 @@ fail:
return ret;
}
-static int bcm2835_power_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static struct platform_driver bcm2835_power_driver = {
.probe = bcm2835_power_probe,
- .remove = bcm2835_power_remove,
.driver = {
.name = "bcm2835-power",
},
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index 4b906791d6c7..a8742fc58f01 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -28,4 +28,15 @@ config SOC_IMX9
help
If you say yes here, you get support for the NXP i.MX9 family
+config IMX8M_BLK_CTRL
+ bool
+ default SOC_IMX8M && IMX_GPCV2_PM_DOMAINS
+ depends on PM_GENERIC_DOMAINS
+ depends on COMMON_CLK
+
+config IMX9_BLK_CTRL
+ bool
+ default SOC_IMX9 && IMX_GPCV2_PM_DOMAINS
+ depends on PM_GENERIC_DOMAINS
+
endmenu
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 7b4099ceafd6..a28c44a1f16a 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -5,7 +5,7 @@ endif
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
-obj-$(CONFIG_SOC_IMX8M) += imx8m-blk-ctrl.o
-obj-$(CONFIG_SOC_IMX8M) += imx8mp-blk-ctrl.o
+obj-$(CONFIG_IMX8M_BLK_CTRL) += imx8m-blk-ctrl.o
+obj-$(CONFIG_IMX8M_BLK_CTRL) += imx8mp-blk-ctrl.o
obj-$(CONFIG_SOC_IMX9) += imx93-src.o imx93-pd.o
-obj-$(CONFIG_SOC_IMX9) += imx93-blk-ctrl.o
+obj-$(CONFIG_IMX9_BLK_CTRL) += imx93-blk-ctrl.o
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
index ddcf6be3d8b4..399cb85105a1 100644
--- a/drivers/soc/imx/imx8m-blk-ctrl.c
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
@@ -4,6 +4,7 @@
* Copyright 2021 Pengutronix, Lucas Stach <kernel@pengutronix.de>
*/
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/module.h>
@@ -654,6 +655,10 @@ static const struct imx8m_blk_ctrl_data imx8mn_disp_blk_ctl_dev_data = {
.num_domains = ARRAY_SIZE(imx8mn_disp_blk_ctl_domain_data),
};
+#define LCDIF_ARCACHE_CTRL 0x4c
+#define LCDIF_1_RD_HURRY GENMASK(15, 13)
+#define LCDIF_0_RD_HURRY GENMASK(12, 10)
+
static int imx8mp_media_power_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -667,14 +672,24 @@ static int imx8mp_media_power_notifier(struct notifier_block *nb,
regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(8));
regmap_set_bits(bc->regmap, BLK_SFT_RSTN, BIT(8));
- /*
- * On power up we have no software backchannel to the GPC to
- * wait for the ADB handshake to happen, so we just delay for a
- * bit. On power down the GPC driver waits for the handshake.
- */
- if (action == GENPD_NOTIFY_ON)
+ if (action == GENPD_NOTIFY_ON) {
+ /*
+ * On power up we have no software backchannel to the GPC to
+ * wait for the ADB handshake to happen, so we just delay for a
+ * bit. On power down the GPC driver waits for the handshake.
+ */
udelay(5);
+ /*
+ * Set panic read hurry level for both LCDIF interfaces to
+ * maximum priority to minimize chances of display FIFO
+ * underflow.
+ */
+ regmap_set_bits(bc->regmap, LCDIF_ARCACHE_CTRL,
+ FIELD_PREP(LCDIF_1_RD_HURRY, 7) |
+ FIELD_PREP(LCDIF_0_RD_HURRY, 7));
+ }
+
return NOTIFY_OK;
}
diff --git a/drivers/soc/imx/imx8mp-blk-ctrl.c b/drivers/soc/imx/imx8mp-blk-ctrl.c
index 0f13853901df..a0592db8fa86 100644
--- a/drivers/soc/imx/imx8mp-blk-ctrl.c
+++ b/drivers/soc/imx/imx8mp-blk-ctrl.c
@@ -4,7 +4,9 @@
* Copyright 2022 Pengutronix, Lucas Stach <kernel@pengutronix.de>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/module.h>
@@ -21,6 +23,15 @@
#define USB_CLOCK_MODULE_EN BIT(1)
#define PCIE_PHY_APB_RST BIT(4)
#define PCIE_PHY_INIT_RST BIT(5)
+#define GPR_REG1 0x4
+#define PLL_LOCK BIT(13)
+#define GPR_REG2 0x8
+#define P_PLL_MASK GENMASK(5, 0)
+#define M_PLL_MASK GENMASK(15, 6)
+#define S_PLL_MASK GENMASK(18, 16)
+#define GPR_REG3 0xc
+#define PLL_CKE BIT(17)
+#define PLL_RST BIT(31)
struct imx8mp_blk_ctrl_domain;
@@ -60,6 +71,7 @@ struct imx8mp_blk_ctrl_domain {
struct imx8mp_blk_ctrl_data {
int max_reg;
+ int (*probe) (struct imx8mp_blk_ctrl *bc);
notifier_fn_t power_notifier_fn;
void (*power_off) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain);
void (*power_on) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain);
@@ -73,6 +85,92 @@ to_imx8mp_blk_ctrl_domain(struct generic_pm_domain *genpd)
return container_of(genpd, struct imx8mp_blk_ctrl_domain, genpd);
}
+struct clk_hsio_pll {
+ struct clk_hw hw;
+ struct regmap *regmap;
+};
+
+static inline struct clk_hsio_pll *to_clk_hsio_pll(struct clk_hw *hw)
+{
+ return container_of(hw, struct clk_hsio_pll, hw);
+}
+
+static int clk_hsio_pll_prepare(struct clk_hw *hw)
+{
+ struct clk_hsio_pll *clk = to_clk_hsio_pll(hw);
+ u32 val;
+
+ /* set the PLL configuration */
+ regmap_update_bits(clk->regmap, GPR_REG2,
+ P_PLL_MASK | M_PLL_MASK | S_PLL_MASK,
+ FIELD_PREP(P_PLL_MASK, 12) |
+ FIELD_PREP(M_PLL_MASK, 800) |
+ FIELD_PREP(S_PLL_MASK, 4));
+
+ /* de-assert PLL reset */
+ regmap_update_bits(clk->regmap, GPR_REG3, PLL_RST, PLL_RST);
+
+ /* enable PLL */
+ regmap_update_bits(clk->regmap, GPR_REG3, PLL_CKE, PLL_CKE);
+
+ return regmap_read_poll_timeout(clk->regmap, GPR_REG1, val,
+ val & PLL_LOCK, 10, 100);
+}
+
+static void clk_hsio_pll_unprepare(struct clk_hw *hw)
+{
+ struct clk_hsio_pll *clk = to_clk_hsio_pll(hw);
+
+ regmap_update_bits(clk->regmap, GPR_REG3, PLL_RST | PLL_CKE, 0);
+}
+
+static int clk_hsio_pll_is_prepared(struct clk_hw *hw)
+{
+ struct clk_hsio_pll *clk = to_clk_hsio_pll(hw);
+
+ return regmap_test_bits(clk->regmap, GPR_REG1, PLL_LOCK);
+}
+
+static unsigned long clk_hsio_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return 100000000;
+}
+
+static const struct clk_ops clk_hsio_pll_ops = {
+ .prepare = clk_hsio_pll_prepare,
+ .unprepare = clk_hsio_pll_unprepare,
+ .is_prepared = clk_hsio_pll_is_prepared,
+ .recalc_rate = clk_hsio_pll_recalc_rate,
+};
+
+static int imx8mp_hsio_blk_ctrl_probe(struct imx8mp_blk_ctrl *bc)
+{
+ struct clk_hsio_pll *clk_hsio_pll;
+ struct clk_hw *hw;
+ struct clk_init_data init = {};
+ int ret;
+
+ clk_hsio_pll = devm_kzalloc(bc->dev, sizeof(*clk_hsio_pll), GFP_KERNEL);
+ if (!clk_hsio_pll)
+ return -ENOMEM;
+
+ init.name = "hsio_pll";
+ init.ops = &clk_hsio_pll_ops;
+ init.parent_names = (const char *[]){"osc_24m"};
+ init.num_parents = 1;
+
+ clk_hsio_pll->regmap = bc->regmap;
+ clk_hsio_pll->hw.init = &init;
+
+ hw = &clk_hsio_pll->hw;
+ ret = devm_clk_hw_register(bc->dev, hw);
+ if (ret)
+ return ret;
+
+ return devm_of_clk_add_hw_provider(bc->dev, of_clk_hw_simple_get, hw);
+}
+
static void imx8mp_hsio_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
struct imx8mp_blk_ctrl_domain *domain)
{
@@ -187,6 +285,7 @@ static const struct imx8mp_blk_ctrl_domain_data imx8mp_hsio_domain_data[] = {
static const struct imx8mp_blk_ctrl_data imx8mp_hsio_blk_ctl_dev_data = {
.max_reg = 0x24,
+ .probe = imx8mp_hsio_blk_ctrl_probe,
.power_on = imx8mp_hsio_blk_ctrl_power_on,
.power_off = imx8mp_hsio_blk_ctrl_power_off,
.power_notifier_fn = imx8mp_hsio_power_notifier,
@@ -201,6 +300,7 @@ static const struct imx8mp_blk_ctrl_data imx8mp_hsio_blk_ctl_dev_data = {
#define HDMI_RTX_CLK_CTL3 0x70
#define HDMI_RTX_CLK_CTL4 0x80
#define HDMI_TX_CONTROL0 0x200
+#define HDMI_LCDIF_NOC_HURRY_MASK GENMASK(14, 12)
static void imx8mp_hdmi_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
struct imx8mp_blk_ctrl_domain *domain)
@@ -217,6 +317,8 @@ static void imx8mp_hdmi_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11));
regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0,
BIT(4) | BIT(5) | BIT(6));
+ regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0,
+ FIELD_PREP(HDMI_LCDIF_NOC_HURRY_MASK, 7));
break;
case IMX8MP_HDMIBLK_PD_PAI:
regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(17));
@@ -635,6 +737,12 @@ static int imx8mp_blk_ctrl_probe(struct platform_device *pdev)
goto cleanup_provider;
}
+ if (bc_data->probe) {
+ ret = bc_data->probe(bc);
+ if (ret)
+ goto cleanup_provider;
+ }
+
dev_set_drvdata(dev, bc);
return 0;
diff --git a/drivers/soc/imx/imx93-pd.c b/drivers/soc/imx/imx93-pd.c
index 4d235c8c4924..832deeed8fd6 100644
--- a/drivers/soc/imx/imx93-pd.c
+++ b/drivers/soc/imx/imx93-pd.c
@@ -164,7 +164,6 @@ MODULE_DEVICE_TABLE(of, imx93_pd_ids);
static struct platform_driver imx93_power_domain_driver = {
.driver = {
.name = "imx93_power_domain",
- .owner = THIS_MODULE,
.of_match_table = imx93_pd_ids,
},
.probe = imx93_pd_probe,
diff --git a/drivers/soc/imx/imx93-src.c b/drivers/soc/imx/imx93-src.c
index 4d74921cae0f..f1c2e22d5cbd 100644
--- a/drivers/soc/imx/imx93-src.c
+++ b/drivers/soc/imx/imx93-src.c
@@ -21,7 +21,6 @@ MODULE_DEVICE_TABLE(of, imx93_src_ids);
static struct platform_driver imx93_src_driver = {
.driver = {
.name = "imx93_src",
- .owner = THIS_MODULE,
.of_match_table = imx93_src_ids,
},
.probe = imx93_src_probe,
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 40d0cc600cae..d6b83a5508ca 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -44,6 +44,11 @@ config MTK_PMIC_WRAP
on different MediaTek SoCs. The PMIC wrapper is a proprietary
hardware to connect the PMIC.
+config MTK_REGULATOR_COUPLER
+ bool "MediaTek SoC Regulator Coupler" if COMPILE_TEST
+ default ARCH_MEDIATEK
+ depends on REGULATOR
+
config MTK_SCPSYS
bool "MediaTek SCPSYS Support"
default ARCH_MEDIATEK
@@ -68,7 +73,7 @@ config MTK_SCPSYS_PM_DOMAINS
tasks in the system.
config MTK_MMSYS
- bool "MediaTek MMSYS Support"
+ tristate "MediaTek MMSYS Support"
default ARCH_MEDIATEK
depends on HAS_IOMEM
help
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index 0e9e703c931a..8c0ddacbcde8 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_MTK_CMDQ) += mtk-cmdq-helper.o
obj-$(CONFIG_MTK_DEVAPC) += mtk-devapc.o
obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o
obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
+obj-$(CONFIG_MTK_REGULATOR_COUPLER) += mtk-regulator-coupler.o
obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
obj-$(CONFIG_MTK_SCPSYS_PM_DOMAINS) += mtk-pm-domains.o
obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o
diff --git a/drivers/soc/mediatek/mt8186-pm-domains.h b/drivers/soc/mediatek/mt8186-pm-domains.h
index 108af61854a3..fce86f79c505 100644
--- a/drivers/soc/mediatek/mt8186-pm-domains.h
+++ b/drivers/soc/mediatek/mt8186-pm-domains.h
@@ -304,7 +304,6 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = {
.ctl_offs = 0x9FC,
.pwr_sta_offs = 0x16C,
.pwr_sta2nd_offs = 0x170,
- .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
},
[MT8186_POWER_DOMAIN_ADSP_INFRA] = {
.name = "adsp_infra",
@@ -312,7 +311,6 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = {
.ctl_offs = 0x9F8,
.pwr_sta_offs = 0x16C,
.pwr_sta2nd_offs = 0x170,
- .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
},
[MT8186_POWER_DOMAIN_ADSP_TOP] = {
.name = "adsp_top",
@@ -332,7 +330,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = {
MT8186_TOP_AXI_PROT_EN_3_CLR,
MT8186_TOP_AXI_PROT_EN_3_STA),
},
- .caps = MTK_SCPD_SRAM_ISO | MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP,
+ .caps = MTK_SCPD_SRAM_ISO | MTK_SCPD_ACTIVE_WAKEUP,
},
};
diff --git a/drivers/soc/mediatek/mt8188-mmsys.h b/drivers/soc/mediatek/mt8188-mmsys.h
new file mode 100644
index 000000000000..448cc3761b43
--- /dev/null
+++ b/drivers/soc/mediatek/mt8188-mmsys.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8188_MMSYS_H
+#define __SOC_MEDIATEK_MT8188_MMSYS_H
+
+#define MT8188_VDO0_OVL_MOUT_EN 0xf14
+#define MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0 BIT(0)
+#define MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0 BIT(1)
+#define MT8188_MOUT_DISP_OVL0_TO_DISP_OVL1 BIT(2)
+#define MT8188_MOUT_DISP_OVL1_TO_DISP_RDMA1 BIT(4)
+#define MT8188_MOUT_DISP_OVL1_TO_DISP_WDMA1 BIT(5)
+#define MT8188_MOUT_DISP_OVL1_TO_DISP_OVL0 BIT(6)
+
+#define MT8188_VDO0_SEL_IN 0xf34
+#define MT8188_VDO0_SEL_OUT 0xf38
+
+#define MT8188_VDO0_DISP_RDMA_SEL 0xf40
+#define MT8188_SOUT_DISP_RDMA0_TO_MASK GENMASK(2, 0)
+#define MT8188_SOUT_DISP_RDMA0_TO_DISP_COLOR0 (0 << 0)
+#define MT8188_SOUT_DISP_RDMA0_TO_DISP_DSI0 (1 << 0)
+#define MT8188_SOUT_DISP_RDMA0_TO_DISP_DP_INTF0 (5 << 0)
+#define MT8188_SEL_IN_DISP_RDMA0_FROM_MASK GENMASK(8, 8)
+#define MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_OVL0 (0 << 8)
+#define MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_RSZ0 (1 << 8)
+
+
+#define MT8188_VDO0_DSI0_SEL_IN 0xf44
+#define MT8188_SEL_IN_DSI0_FROM_MASK BIT(0)
+#define MT8188_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT (0 << 0)
+#define MT8188_SEL_IN_DSI0_FROM_DISP_DITHER0 (1 << 0)
+
+#define MT8188_VDO0_DP_INTF0_SEL_IN 0xf4C
+#define MT8188_SEL_IN_DP_INTF0_FROM_MASK GENMASK(2, 0)
+#define MT8188_SEL_IN_DP_INTF0_FROM_DSC_WRAP0C1_OUT (0 << 0)
+#define MT8188_SEL_IN_DP_INTF0_FROM_VPP_MERGE (1 << 0)
+#define MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0 (3 << 0)
+
+#define MT8188_VDO0_DISP_DITHER0_SEL_OUT 0xf58
+#define MT8188_SOUT_DISP_DITHER0_TO_MASK GENMASK(2, 0)
+#define MT8188_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN (0 << 0)
+#define MT8188_SOUT_DISP_DITHER0_TO_DSI0 (1 << 0)
+#define MT8188_SOUT_DISP_DITHER0_TO_VPP_MERGE0 (6 << 0)
+#define MT8188_SOUT_DISP_DITHER0_TO_DP_INTF0 (7 << 0)
+
+#define MT8188_VDO0_VPP_MERGE_SEL 0xf60
+#define MT8188_SEL_IN_VPP_MERGE_FROM_MASK GENMASK(1, 0)
+#define MT8188_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT (0 << 0)
+#define MT8188_SEL_IN_VPP_MERGE_FROM_DITHER0_OUT (3 << 0)
+
+#define MT8188_SOUT_VPP_MERGE_TO_MASK GENMASK(6, 4)
+#define MT8188_SOUT_VPP_MERGE_TO_DSI1 (0 << 4)
+#define MT8188_SOUT_VPP_MERGE_TO_DP_INTF0 (1 << 4)
+#define MT8188_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0 (2 << 4)
+#define MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA1 (3 << 4)
+#define MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN (4 << 4)
+#define MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA0 (5 << 4)
+#define MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK GENMASK(11, 11)
+#define MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN (0 << 11)
+
+#define MT8188_VDO0_DSC_WARP_SEL 0xf64
+#define MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_MASK GENMASK(0, 0)
+#define MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_DISP_DITHER0 (0 << 0)
+#define MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_VPP_MERGE (1 << 0)
+#define MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK GENMASK(19, 16)
+#define MT8188_SOUT_DSC_WRAP0_OUT_TO_DSI0 BIT(16)
+#define MT8188_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0 BIT(17)
+#define MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE BIT(18)
+#define MT8188_SOUT_DSC_WRAP0_OUT_TO_DISP_WDMA0 BIT(19)
+
+static const struct mtk_mmsys_routes mmsys_mt8188_routing_table[] = {
+ {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
+ MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0,
+ MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
+ MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0,
+ MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
+ MT8188_VDO0_DISP_RDMA_SEL, MT8188_SEL_IN_DISP_RDMA0_FROM_MASK,
+ MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_OVL0
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
+ MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
+ MT8188_SEL_IN_DSI0_FROM_DISP_DITHER0
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_MERGE0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8188_SEL_IN_VPP_MERGE_FROM_DITHER0_OUT
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
+ MT8188_VDO0_DSC_WARP_SEL,
+ MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_MASK,
+ MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_DISP_DITHER0
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DP_INTF0,
+ MT8188_VDO0_DP_INTF0_SEL_IN, MT8188_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8188_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
+ MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
+ MT8188_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
+ MT8188_VDO0_DISP_RDMA_SEL, MT8188_SOUT_DISP_RDMA0_TO_MASK,
+ MT8188_SOUT_DISP_RDMA0_TO_DISP_COLOR0
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT,
+ MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_DSI0
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DP_INTF0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT,
+ MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_DP_INTF0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_DP_INTF0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
+ MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8188_SOUT_DSC_WRAP0_OUT_TO_DSI0
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
+ MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE
+ },
+};
+
+#endif /* __SOC_MEDIATEK_MT8188_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8188-pm-domains.h b/drivers/soc/mediatek/mt8188-pm-domains.h
new file mode 100644
index 000000000000..0692cb444ed0
--- /dev/null
+++ b/drivers/soc/mediatek/mt8188-pm-domains.h
@@ -0,0 +1,623 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#ifndef __SOC_MEDIATEK_MT8188_PM_DOMAINS_H
+#define __SOC_MEDIATEK_MT8188_PM_DOMAINS_H
+
+#include "mtk-pm-domains.h"
+#include <dt-bindings/power/mediatek,mt8188-power.h>
+
+/*
+ * MT8188 power domain support
+ */
+
+static const struct scpsys_domain_data scpsys_domain_data_mt8188[] = {
+ [MT8188_POWER_DOMAIN_MFG0] = {
+ .name = "mfg0",
+ .sta_mask = BIT(1),
+ .ctl_offs = 0x300,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY,
+ },
+ [MT8188_POWER_DOMAIN_MFG1] = {
+ .name = "mfg1",
+ .sta_mask = BIT(2),
+ .ctl_offs = 0x304,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MFG1_STEP1,
+ MT8188_TOP_AXI_PROT_EN_SET,
+ MT8188_TOP_AXI_PROT_EN_CLR,
+ MT8188_TOP_AXI_PROT_EN_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_MFG1_STEP2,
+ MT8188_TOP_AXI_PROT_EN_2_SET,
+ MT8188_TOP_AXI_PROT_EN_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_2_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_1_MFG1_STEP3,
+ MT8188_TOP_AXI_PROT_EN_1_SET,
+ MT8188_TOP_AXI_PROT_EN_1_CLR,
+ MT8188_TOP_AXI_PROT_EN_1_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_MFG1_STEP4,
+ MT8188_TOP_AXI_PROT_EN_2_SET,
+ MT8188_TOP_AXI_PROT_EN_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_2_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MFG1_STEP5,
+ MT8188_TOP_AXI_PROT_EN_SET,
+ MT8188_TOP_AXI_PROT_EN_CLR,
+ MT8188_TOP_AXI_PROT_EN_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MFG1_STEP6,
+ MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET,
+ MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR,
+ MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY,
+ },
+ [MT8188_POWER_DOMAIN_MFG2] = {
+ .name = "mfg2",
+ .sta_mask = BIT(3),
+ .ctl_offs = 0x308,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8188_POWER_DOMAIN_MFG3] = {
+ .name = "mfg3",
+ .sta_mask = BIT(4),
+ .ctl_offs = 0x30C,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8188_POWER_DOMAIN_MFG4] = {
+ .name = "mfg4",
+ .sta_mask = BIT(5),
+ .ctl_offs = 0x310,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8188_POWER_DOMAIN_PEXTP_MAC_P0] = {
+ .name = "pextp_mac_p0",
+ .sta_mask = BIT(10),
+ .ctl_offs = 0x324,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_PEXTP_MAC_P0_STEP1,
+ MT8188_TOP_AXI_PROT_EN_SET,
+ MT8188_TOP_AXI_PROT_EN_CLR,
+ MT8188_TOP_AXI_PROT_EN_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_PEXTP_MAC_P0_STEP2,
+ MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_SET,
+ MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_CLR,
+ MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8188_POWER_DOMAIN_PEXTP_PHY_TOP] = {
+ .name = "pextp_phy_top",
+ .sta_mask = BIT(12),
+ .ctl_offs = 0x328,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8188_POWER_DOMAIN_CSIRX_TOP] = {
+ .name = "pextp_csirx_top",
+ .sta_mask = BIT(17),
+ .ctl_offs = 0x3C4,
+ .pwr_sta_offs = 0x174,
+ .pwr_sta2nd_offs = 0x178,
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8188_POWER_DOMAIN_ETHER] = {
+ .name = "ether",
+ .sta_mask = BIT(1),
+ .ctl_offs = 0x338,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_ETHER_STEP1,
+ MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_SET,
+ MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_CLR,
+ MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8188_POWER_DOMAIN_HDMI_TX] = {
+ .name = "hdmi_tx",
+ .sta_mask = BIT(18),
+ .ctl_offs = 0x37C,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_HDMI_TX_STEP1,
+ MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_SET,
+ MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_CLR,
+ MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8188_POWER_DOMAIN_ADSP_AO] = {
+ .name = "adsp_ao",
+ .sta_mask = BIT(10),
+ .ctl_offs = 0x35C,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_ADSP_AO_STEP1,
+ MT8188_TOP_AXI_PROT_EN_2_SET,
+ MT8188_TOP_AXI_PROT_EN_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_2_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_ADSP_AO_STEP2,
+ MT8188_TOP_AXI_PROT_EN_2_SET,
+ MT8188_TOP_AXI_PROT_EN_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_2_STA),
+ },
+ .caps = MTK_SCPD_ALWAYS_ON,
+ },
+ [MT8188_POWER_DOMAIN_ADSP_INFRA] = {
+ .name = "adsp_infra",
+ .sta_mask = BIT(9),
+ .ctl_offs = 0x358,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_ADSP_INFRA_STEP1,
+ MT8188_TOP_AXI_PROT_EN_2_SET,
+ MT8188_TOP_AXI_PROT_EN_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_2_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_ADSP_INFRA_STEP2,
+ MT8188_TOP_AXI_PROT_EN_2_SET,
+ MT8188_TOP_AXI_PROT_EN_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_2_STA),
+ },
+ .caps = MTK_SCPD_SRAM_ISO | MTK_SCPD_ALWAYS_ON,
+ },
+ [MT8188_POWER_DOMAIN_ADSP] = {
+ .name = "adsp",
+ .sta_mask = BIT(8),
+ .ctl_offs = 0x354,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_ADSP_STEP1,
+ MT8188_TOP_AXI_PROT_EN_2_SET,
+ MT8188_TOP_AXI_PROT_EN_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_2_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_ADSP_STEP2,
+ MT8188_TOP_AXI_PROT_EN_2_SET,
+ MT8188_TOP_AXI_PROT_EN_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_2_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_SRAM_ISO | MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8188_POWER_DOMAIN_AUDIO] = {
+ .name = "audio",
+ .sta_mask = BIT(6),
+ .ctl_offs = 0x34C,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_AUDIO_STEP1,
+ MT8188_TOP_AXI_PROT_EN_2_SET,
+ MT8188_TOP_AXI_PROT_EN_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_2_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_AUDIO_STEP2,
+ MT8188_TOP_AXI_PROT_EN_2_SET,
+ MT8188_TOP_AXI_PROT_EN_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_2_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8188_POWER_DOMAIN_AUDIO_ASRC] = {
+ .name = "audio_asrc",
+ .sta_mask = BIT(7),
+ .ctl_offs = 0x350,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_AUDIO_ASRC_STEP1,
+ MT8188_TOP_AXI_PROT_EN_2_SET,
+ MT8188_TOP_AXI_PROT_EN_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_2_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_AUDIO_ASRC_STEP2,
+ MT8188_TOP_AXI_PROT_EN_2_SET,
+ MT8188_TOP_AXI_PROT_EN_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_2_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8188_POWER_DOMAIN_VPPSYS0] = {
+ .name = "vppsys0",
+ .sta_mask = BIT(11),
+ .ctl_offs = 0x360,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_VPPSYS0_STEP1,
+ MT8188_TOP_AXI_PROT_EN_SET,
+ MT8188_TOP_AXI_PROT_EN_CLR,
+ MT8188_TOP_AXI_PROT_EN_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_VPPSYS0_STEP2,
+ MT8188_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_2_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_VPPSYS0_STEP3,
+ MT8188_TOP_AXI_PROT_EN_SET,
+ MT8188_TOP_AXI_PROT_EN_CLR,
+ MT8188_TOP_AXI_PROT_EN_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_VPPSYS0_STEP4,
+ MT8188_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_2_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VPPSYS0_STEP5,
+ MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET,
+ MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR,
+ MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA),
+ },
+ },
+ [MT8188_POWER_DOMAIN_VDOSYS0] = {
+ .name = "vdosys0",
+ .sta_mask = BIT(13),
+ .ctl_offs = 0x368,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VDOSYS0_STEP1,
+ MT8188_TOP_AXI_PROT_EN_MM_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_VDOSYS0_STEP2,
+ MT8188_TOP_AXI_PROT_EN_SET,
+ MT8188_TOP_AXI_PROT_EN_CLR,
+ MT8188_TOP_AXI_PROT_EN_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VDOSYS0_STEP3,
+ MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET,
+ MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR,
+ MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA),
+ },
+ },
+ [MT8188_POWER_DOMAIN_VDOSYS1] = {
+ .name = "vdosys1",
+ .sta_mask = BIT(14),
+ .ctl_offs = 0x36C,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VDOSYS1_STEP1,
+ MT8188_TOP_AXI_PROT_EN_MM_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VDOSYS1_STEP2,
+ MT8188_TOP_AXI_PROT_EN_MM_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_VDOSYS1_STEP3,
+ MT8188_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_2_STA),
+ },
+ },
+ [MT8188_POWER_DOMAIN_DP_TX] = {
+ .name = "dp_tx",
+ .sta_mask = BIT(16),
+ .ctl_offs = 0x374,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_DP_TX_STEP1,
+ MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_SET,
+ MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_CLR,
+ MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8188_POWER_DOMAIN_EDP_TX] = {
+ .name = "edp_tx",
+ .sta_mask = BIT(17),
+ .ctl_offs = 0x378,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_EDP_TX_STEP1,
+ MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_SET,
+ MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_CLR,
+ MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8188_POWER_DOMAIN_VPPSYS1] = {
+ .name = "vppsys1",
+ .sta_mask = BIT(12),
+ .ctl_offs = 0x364,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VPPSYS1_STEP1,
+ MT8188_TOP_AXI_PROT_EN_MM_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VPPSYS1_STEP2,
+ MT8188_TOP_AXI_PROT_EN_MM_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_VPPSYS1_STEP3,
+ MT8188_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_2_STA),
+ },
+ },
+ [MT8188_POWER_DOMAIN_WPE] = {
+ .name = "wpe",
+ .sta_mask = BIT(15),
+ .ctl_offs = 0x370,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_WPE_STEP1,
+ MT8188_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_2_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_WPE_STEP2,
+ MT8188_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_2_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8188_POWER_DOMAIN_VDEC0] = {
+ .name = "vdec0",
+ .sta_mask = BIT(19),
+ .ctl_offs = 0x380,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VDEC0_STEP1,
+ MT8188_TOP_AXI_PROT_EN_MM_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_VDEC0_STEP2,
+ MT8188_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_2_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8188_POWER_DOMAIN_VDEC1] = {
+ .name = "vdec1",
+ .sta_mask = BIT(20),
+ .ctl_offs = 0x384,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VDEC1_STEP1,
+ MT8188_TOP_AXI_PROT_EN_MM_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VDEC1_STEP2,
+ MT8188_TOP_AXI_PROT_EN_MM_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8188_POWER_DOMAIN_VENC] = {
+ .name = "venc",
+ .sta_mask = BIT(22),
+ .ctl_offs = 0x38C,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VENC_STEP1,
+ MT8188_TOP_AXI_PROT_EN_MM_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VENC_STEP2,
+ MT8188_TOP_AXI_PROT_EN_MM_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_VENC_STEP3,
+ MT8188_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_2_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8188_POWER_DOMAIN_IMG_VCORE] = {
+ .name = "vcore",
+ .sta_mask = BIT(28),
+ .ctl_offs = 0x3A4,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_IMG_VCORE_STEP1,
+ MT8188_TOP_AXI_PROT_EN_MM_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_IMG_VCORE_STEP2,
+ MT8188_TOP_AXI_PROT_EN_MM_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_IMG_VCORE_STEP3,
+ MT8188_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_2_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY,
+ },
+ [MT8188_POWER_DOMAIN_IMG_MAIN] = {
+ .name = "img_main",
+ .sta_mask = BIT(29),
+ .ctl_offs = 0x3A8,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_IMG_MAIN_STEP1,
+ MT8188_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_2_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_IMG_MAIN_STEP2,
+ MT8188_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_2_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8188_POWER_DOMAIN_DIP] = {
+ .name = "dip",
+ .sta_mask = BIT(30),
+ .ctl_offs = 0x3AC,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8188_POWER_DOMAIN_IPE] = {
+ .name = "ipe",
+ .sta_mask = BIT(31),
+ .ctl_offs = 0x3B0,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8188_POWER_DOMAIN_CAM_VCORE] = {
+ .name = "cam_vcore",
+ .sta_mask = BIT(27),
+ .ctl_offs = 0x3A0,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_CAM_VCORE_STEP1,
+ MT8188_TOP_AXI_PROT_EN_MM_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_CAM_VCORE_STEP2,
+ MT8188_TOP_AXI_PROT_EN_2_SET,
+ MT8188_TOP_AXI_PROT_EN_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_2_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_1_CAM_VCORE_STEP3,
+ MT8188_TOP_AXI_PROT_EN_1_SET,
+ MT8188_TOP_AXI_PROT_EN_1_CLR,
+ MT8188_TOP_AXI_PROT_EN_1_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_CAM_VCORE_STEP4,
+ MT8188_TOP_AXI_PROT_EN_MM_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_CAM_VCORE_STEP5,
+ MT8188_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_2_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY,
+ },
+ [MT8188_POWER_DOMAIN_CAM_MAIN] = {
+ .name = "cam_main",
+ .sta_mask = BIT(24),
+ .ctl_offs = 0x394,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_CAM_MAIN_STEP1,
+ MT8188_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_2_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_CAM_MAIN_STEP2,
+ MT8188_TOP_AXI_PROT_EN_2_SET,
+ MT8188_TOP_AXI_PROT_EN_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_2_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_CAM_MAIN_STEP3,
+ MT8188_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8188_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_MM_2_STA),
+ BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_CAM_MAIN_STEP4,
+ MT8188_TOP_AXI_PROT_EN_2_SET,
+ MT8188_TOP_AXI_PROT_EN_2_CLR,
+ MT8188_TOP_AXI_PROT_EN_2_STA),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8188_POWER_DOMAIN_CAM_SUBA] = {
+ .name = "cam_suba",
+ .sta_mask = BIT(25),
+ .ctl_offs = 0x398,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8188_POWER_DOMAIN_CAM_SUBB] = {
+ .name = "cam_subb",
+ .sta_mask = BIT(26),
+ .ctl_offs = 0x39C,
+ .pwr_sta_offs = 0x16C,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+};
+
+static const struct scpsys_soc_data mt8188_scpsys_data = {
+ .domains_data = scpsys_domain_data_mt8188,
+ .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8188),
+};
+
+#endif /* __SOC_MEDIATEK_MT8188_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mt8195-mmsys.h b/drivers/soc/mediatek/mt8195-mmsys.h
index abfe94a30248..a6652ae63431 100644
--- a/drivers/soc/mediatek/mt8195-mmsys.h
+++ b/drivers/soc/mediatek/mt8195-mmsys.h
@@ -75,6 +75,77 @@
#define MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0 (2 << 16)
#define MT8195_SOUT_DSC_WRAP1_OUT_TO_VPP_MERGE (3 << 16)
+#define MT8195_VDO1_SW0_RST_B 0x1d0
+#define MT8195_VDO1_MERGE0_ASYNC_CFG_WD 0xe30
+#define MT8195_VDO1_HDRBE_ASYNC_CFG_WD 0xe70
+#define MT8195_VDO1_HDR_TOP_CFG 0xd00
+#define MT8195_VDO1_MIXER_IN1_ALPHA 0xd30
+#define MT8195_VDO1_MIXER_IN1_PAD 0xd40
+
+#define MT8195_VDO1_VPP_MERGE0_P0_SEL_IN 0xf04
+#define MT8195_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0 1
+
+#define MT8195_VDO1_VPP_MERGE0_P1_SEL_IN 0xf08
+#define MT8195_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1 1
+
+#define MT8195_VDO1_DISP_DPI1_SEL_IN 0xf10
+#define MT8195_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT 0
+
+#define MT8195_VDO1_DISP_DP_INTF0_SEL_IN 0xf14
+#define MT8195_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT 0
+
+#define MT8195_VDO1_MERGE4_SOUT_SEL 0xf18
+#define MT8195_MERGE4_SOUT_TO_DPI1_SEL 2
+#define MT8195_MERGE4_SOUT_TO_DP_INTF0_SEL 3
+
+#define MT8195_VDO1_MIXER_IN1_SEL_IN 0xf24
+#define MT8195_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT 1
+
+#define MT8195_VDO1_MIXER_IN2_SEL_IN 0xf28
+#define MT8195_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT 1
+
+#define MT8195_VDO1_MIXER_IN3_SEL_IN 0xf2c
+#define MT8195_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT 1
+
+#define MT8195_VDO1_MIXER_IN4_SEL_IN 0xf30
+#define MT8195_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT 1
+
+#define MT8195_VDO1_MIXER_OUT_SOUT_SEL 0xf34
+#define MT8195_MIXER_SOUT_TO_MERGE4_ASYNC_SEL 1
+
+#define MT8195_VDO1_VPP_MERGE1_P0_SEL_IN 0xf3c
+#define MT8195_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2 1
+
+#define MT8195_VDO1_MERGE0_ASYNC_SOUT_SEL 0xf40
+#define MT8195_SOUT_TO_MIXER_IN1_SEL 1
+
+#define MT8195_VDO1_MERGE1_ASYNC_SOUT_SEL 0xf44
+#define MT8195_SOUT_TO_MIXER_IN2_SEL 1
+
+#define MT8195_VDO1_MERGE2_ASYNC_SOUT_SEL 0xf48
+#define MT8195_SOUT_TO_MIXER_IN3_SEL 1
+
+#define MT8195_VDO1_MERGE3_ASYNC_SOUT_SEL 0xf4c
+#define MT8195_SOUT_TO_MIXER_IN4_SEL 1
+
+#define MT8195_VDO1_MERGE4_ASYNC_SEL_IN 0xf50
+#define MT8195_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT 1
+
+#define MT8195_VDO1_MIXER_IN1_SOUT_SEL 0xf58
+#define MT8195_MIXER_IN1_SOUT_TO_DISP_MIXER 0
+
+#define MT8195_VDO1_MIXER_IN2_SOUT_SEL 0xf5c
+#define MT8195_MIXER_IN2_SOUT_TO_DISP_MIXER 0
+
+#define MT8195_VDO1_MIXER_IN3_SOUT_SEL 0xf60
+#define MT8195_MIXER_IN3_SOUT_TO_DISP_MIXER 0
+
+#define MT8195_VDO1_MIXER_IN4_SOUT_SEL 0xf64
+#define MT8195_MIXER_IN4_SOUT_TO_DISP_MIXER 0
+
+#define MT8195_VDO1_MIXER_SOUT_SEL_IN 0xf68
+#define MT8195_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER 0
+
static const struct mtk_mmsys_routes mmsys_mt8195_routing_table[] = {
{
DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
@@ -367,4 +438,79 @@ static const struct mtk_mmsys_routes mmsys_mt8195_routing_table[] = {
}
};
+static const struct mtk_mmsys_routes mmsys_mt8195_vdo1_routing_table[] = {
+ {
+ DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1,
+ MT8195_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
+ MT8195_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0
+ }, {
+ DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1,
+ MT8195_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
+ MT8195_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1
+ }, {
+ DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2,
+ MT8195_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
+ MT8195_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2
+ }, {
+ DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
+ MT8195_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN1_SEL
+ }, {
+ DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
+ MT8195_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN2_SEL
+ }, {
+ DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
+ MT8195_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN3_SEL
+ }, {
+ DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
+ MT8195_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN4_SEL
+ }, {
+ DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
+ MT8195_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
+ MT8195_MIXER_SOUT_TO_MERGE4_ASYNC_SEL
+ }, {
+ DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT
+ }, {
+ DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT
+ }, {
+ DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT
+ }, {
+ DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT
+ }, {
+ DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
+ MT8195_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
+ MT8195_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER
+ }, {
+ DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
+ MT8195_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
+ MT8195_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
+ MT8195_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
+ MT8195_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
+ MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
+ MT8195_MERGE4_SOUT_TO_DPI1_SEL
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
+ MT8195_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
+ MT8195_MERGE4_SOUT_TO_DP_INTF0_SEL
+ }
+};
#endif /* __SOC_MEDIATEK_MT8195_MMSYS_H */
diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c
index fc13334db1b1..bad139cb117e 100644
--- a/drivers/soc/mediatek/mtk-devapc.c
+++ b/drivers/soc/mediatek/mtk-devapc.c
@@ -276,19 +276,14 @@ static int mtk_devapc_probe(struct platform_device *pdev)
if (!devapc_irq)
return -EINVAL;
- ctx->infra_clk = devm_clk_get(&pdev->dev, "devapc-infra-clock");
+ ctx->infra_clk = devm_clk_get_enabled(&pdev->dev, "devapc-infra-clock");
if (IS_ERR(ctx->infra_clk))
return -EINVAL;
- if (clk_prepare_enable(ctx->infra_clk))
- return -EINVAL;
-
ret = devm_request_irq(&pdev->dev, devapc_irq, devapc_violation_irq,
IRQF_TRIGGER_NONE, "devapc", ctx);
- if (ret) {
- clk_disable_unprepare(ctx->infra_clk);
+ if (ret)
return ret;
- }
platform_set_drvdata(pdev, ctx);
@@ -303,8 +298,6 @@ static int mtk_devapc_remove(struct platform_device *pdev)
stop_devapc(ctx);
- clk_disable_unprepare(ctx->infra_clk);
-
return 0;
}
diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
index f3431448e843..eb4c7e57896c 100644
--- a/drivers/soc/mediatek/mtk-mmsys.c
+++ b/drivers/soc/mediatek/mtk-mmsys.c
@@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
@@ -16,10 +17,13 @@
#include "mt8167-mmsys.h"
#include "mt8183-mmsys.h"
#include "mt8186-mmsys.h"
+#include "mt8188-mmsys.h"
#include "mt8192-mmsys.h"
#include "mt8195-mmsys.h"
#include "mt8365-mmsys.h"
+#define MMSYS_SW_RESET_PER_REG 32
+
static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
.clk_driver = "clk-mt2701-mm",
.routes = mmsys_default_routing_table,
@@ -51,6 +55,7 @@ static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
.routes = mmsys_default_routing_table,
.num_routes = ARRAY_SIZE(mmsys_default_routing_table),
.sw0_rst_offset = MT8183_MMSYS_SW0_RST_B,
+ .num_resets = 32,
};
static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
@@ -58,6 +63,7 @@ static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
.routes = mmsys_mt8183_routing_table,
.num_routes = ARRAY_SIZE(mmsys_mt8183_routing_table),
.sw0_rst_offset = MT8183_MMSYS_SW0_RST_B,
+ .num_resets = 32,
};
static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = {
@@ -65,6 +71,13 @@ static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = {
.routes = mmsys_mt8186_routing_table,
.num_routes = ARRAY_SIZE(mmsys_mt8186_routing_table),
.sw0_rst_offset = MT8186_MMSYS_SW0_RST_B,
+ .num_resets = 32,
+};
+
+static const struct mtk_mmsys_driver_data mt8188_vdosys0_driver_data = {
+ .clk_driver = "clk-mt8188-vdo0",
+ .routes = mmsys_mt8188_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_mt8188_routing_table),
};
static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
@@ -72,6 +85,7 @@ static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
.routes = mmsys_mt8192_routing_table,
.num_routes = ARRAY_SIZE(mmsys_mt8192_routing_table),
.sw0_rst_offset = MT8186_MMSYS_SW0_RST_B,
+ .num_resets = 32,
};
static const struct mtk_mmsys_driver_data mt8195_vdosys0_driver_data = {
@@ -80,6 +94,24 @@ static const struct mtk_mmsys_driver_data mt8195_vdosys0_driver_data = {
.num_routes = ARRAY_SIZE(mmsys_mt8195_routing_table),
};
+static const struct mtk_mmsys_driver_data mt8195_vdosys1_driver_data = {
+ .clk_driver = "clk-mt8195-vdo1",
+ .routes = mmsys_mt8195_vdo1_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_mt8195_vdo1_routing_table),
+ .sw0_rst_offset = MT8195_VDO1_SW0_RST_B,
+ .num_resets = 64,
+};
+
+static const struct mtk_mmsys_driver_data mt8195_vppsys0_driver_data = {
+ .clk_driver = "clk-mt8195-vpp0",
+ .is_vppsys = true,
+};
+
+static const struct mtk_mmsys_driver_data mt8195_vppsys1_driver_data = {
+ .clk_driver = "clk-mt8195-vpp1",
+ .is_vppsys = true,
+};
+
static const struct mtk_mmsys_driver_data mt8365_mmsys_driver_data = {
.clk_driver = "clk-mt8365-mm",
.routes = mt8365_mmsys_routing_table,
@@ -91,24 +123,44 @@ struct mtk_mmsys {
const struct mtk_mmsys_driver_data *data;
spinlock_t lock; /* protects mmsys_sw_rst_b reg */
struct reset_controller_dev rcdev;
+ struct cmdq_client_reg cmdq_base;
};
+static void mtk_mmsys_update_bits(struct mtk_mmsys *mmsys, u32 offset, u32 mask, u32 val,
+ struct cmdq_pkt *cmdq_pkt)
+{
+ u32 tmp;
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (cmdq_pkt) {
+ if (mmsys->cmdq_base.size == 0) {
+ pr_err("mmsys lose gce property, failed to update mmsys bits with cmdq");
+ return;
+ }
+ cmdq_pkt_write_mask(cmdq_pkt, mmsys->cmdq_base.subsys,
+ mmsys->cmdq_base.offset + offset, val,
+ mask);
+ return;
+ }
+#endif
+
+ tmp = readl_relaxed(mmsys->regs + offset);
+ tmp = (tmp & ~mask) | (val & mask);
+ writel_relaxed(tmp, mmsys->regs + offset);
+}
+
void mtk_mmsys_ddp_connect(struct device *dev,
enum mtk_ddp_comp_id cur,
enum mtk_ddp_comp_id next)
{
struct mtk_mmsys *mmsys = dev_get_drvdata(dev);
const struct mtk_mmsys_routes *routes = mmsys->data->routes;
- u32 reg;
int i;
for (i = 0; i < mmsys->data->num_routes; i++)
- if (cur == routes[i].from_comp && next == routes[i].to_comp) {
- reg = readl_relaxed(mmsys->regs + routes[i].addr);
- reg &= ~routes[i].mask;
- reg |= routes[i].val;
- writel_relaxed(reg, mmsys->regs + routes[i].addr);
- }
+ if (cur == routes[i].from_comp && next == routes[i].to_comp)
+ mtk_mmsys_update_bits(mmsys, routes[i].addr, routes[i].mask,
+ routes[i].val, NULL);
}
EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_connect);
@@ -118,26 +170,51 @@ void mtk_mmsys_ddp_disconnect(struct device *dev,
{
struct mtk_mmsys *mmsys = dev_get_drvdata(dev);
const struct mtk_mmsys_routes *routes = mmsys->data->routes;
- u32 reg;
int i;
for (i = 0; i < mmsys->data->num_routes; i++)
- if (cur == routes[i].from_comp && next == routes[i].to_comp) {
- reg = readl_relaxed(mmsys->regs + routes[i].addr);
- reg &= ~routes[i].mask;
- writel_relaxed(reg, mmsys->regs + routes[i].addr);
- }
+ if (cur == routes[i].from_comp && next == routes[i].to_comp)
+ mtk_mmsys_update_bits(mmsys, routes[i].addr, routes[i].mask, 0, NULL);
}
EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_disconnect);
-static void mtk_mmsys_update_bits(struct mtk_mmsys *mmsys, u32 offset, u32 mask, u32 val)
+void mtk_mmsys_merge_async_config(struct device *dev, int idx, int width, int height,
+ struct cmdq_pkt *cmdq_pkt)
{
- u32 tmp;
+ mtk_mmsys_update_bits(dev_get_drvdata(dev), MT8195_VDO1_MERGE0_ASYNC_CFG_WD + 0x10 * idx,
+ ~0, height << 16 | width, cmdq_pkt);
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_merge_async_config);
- tmp = readl_relaxed(mmsys->regs + offset);
- tmp = (tmp & ~mask) | val;
- writel_relaxed(tmp, mmsys->regs + offset);
+void mtk_mmsys_hdr_config(struct device *dev, int be_width, int be_height,
+ struct cmdq_pkt *cmdq_pkt)
+{
+ mtk_mmsys_update_bits(dev_get_drvdata(dev), MT8195_VDO1_HDRBE_ASYNC_CFG_WD, ~0,
+ be_height << 16 | be_width, cmdq_pkt);
}
+EXPORT_SYMBOL_GPL(mtk_mmsys_hdr_config);
+
+void mtk_mmsys_mixer_in_config(struct device *dev, int idx, bool alpha_sel, u16 alpha,
+ u8 mode, u32 biwidth, struct cmdq_pkt *cmdq_pkt)
+{
+ struct mtk_mmsys *mmsys = dev_get_drvdata(dev);
+
+ mtk_mmsys_update_bits(mmsys, MT8195_VDO1_MIXER_IN1_ALPHA + (idx - 1) * 4, ~0,
+ alpha << 16 | alpha, cmdq_pkt);
+ mtk_mmsys_update_bits(mmsys, MT8195_VDO1_HDR_TOP_CFG, BIT(19 + idx),
+ alpha_sel << (19 + idx), cmdq_pkt);
+ mtk_mmsys_update_bits(mmsys, MT8195_VDO1_MIXER_IN1_PAD + (idx - 1) * 4,
+ GENMASK(31, 16) | GENMASK(1, 0), biwidth << 16 | mode, cmdq_pkt);
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_mixer_in_config);
+
+void mtk_mmsys_mixer_in_channel_swap(struct device *dev, int idx, bool channel_swap,
+ struct cmdq_pkt *cmdq_pkt)
+{
+ mtk_mmsys_update_bits(dev_get_drvdata(dev), MT8195_VDO1_MIXER_IN1_PAD + (idx - 1) * 4,
+ BIT(4), channel_swap << 4, cmdq_pkt);
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_mixer_in_channel_swap);
void mtk_mmsys_ddp_dpi_fmt_config(struct device *dev, u32 val)
{
@@ -146,20 +223,20 @@ void mtk_mmsys_ddp_dpi_fmt_config(struct device *dev, u32 val)
switch (val) {
case MTK_DPI_RGB888_SDR_CON:
mtk_mmsys_update_bits(mmsys, MT8186_MMSYS_DPI_OUTPUT_FORMAT,
- MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB888_SDR_CON);
+ MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB888_SDR_CON, NULL);
break;
case MTK_DPI_RGB565_SDR_CON:
mtk_mmsys_update_bits(mmsys, MT8186_MMSYS_DPI_OUTPUT_FORMAT,
- MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB565_SDR_CON);
+ MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB565_SDR_CON, NULL);
break;
case MTK_DPI_RGB565_DDR_CON:
mtk_mmsys_update_bits(mmsys, MT8186_MMSYS_DPI_OUTPUT_FORMAT,
- MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB565_DDR_CON);
+ MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB565_DDR_CON, NULL);
break;
case MTK_DPI_RGB888_DDR_CON:
default:
mtk_mmsys_update_bits(mmsys, MT8186_MMSYS_DPI_OUTPUT_FORMAT,
- MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB888_DDR_CON);
+ MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB888_DDR_CON, NULL);
break;
}
}
@@ -170,18 +247,19 @@ static int mtk_mmsys_reset_update(struct reset_controller_dev *rcdev, unsigned l
{
struct mtk_mmsys *mmsys = container_of(rcdev, struct mtk_mmsys, rcdev);
unsigned long flags;
+ u32 offset;
u32 reg;
- spin_lock_irqsave(&mmsys->lock, flags);
+ offset = (id / MMSYS_SW_RESET_PER_REG) * sizeof(u32);
+ id = id % MMSYS_SW_RESET_PER_REG;
+ reg = mmsys->data->sw0_rst_offset + offset;
- reg = readl_relaxed(mmsys->regs + mmsys->data->sw0_rst_offset);
+ spin_lock_irqsave(&mmsys->lock, flags);
if (assert)
- reg &= ~BIT(id);
+ mtk_mmsys_update_bits(mmsys, reg, BIT(id), 0, NULL);
else
- reg |= BIT(id);
-
- writel_relaxed(reg, mmsys->regs + mmsys->data->sw0_rst_offset);
+ mtk_mmsys_update_bits(mmsys, reg, BIT(id), BIT(id), NULL);
spin_unlock_irqrestore(&mmsys->lock, flags);
@@ -236,19 +314,28 @@ static int mtk_mmsys_probe(struct platform_device *pdev)
return ret;
}
- spin_lock_init(&mmsys->lock);
+ mmsys->data = of_device_get_match_data(&pdev->dev);
- mmsys->rcdev.owner = THIS_MODULE;
- mmsys->rcdev.nr_resets = 32;
- mmsys->rcdev.ops = &mtk_mmsys_reset_ops;
- mmsys->rcdev.of_node = pdev->dev.of_node;
- ret = devm_reset_controller_register(&pdev->dev, &mmsys->rcdev);
- if (ret) {
- dev_err(&pdev->dev, "Couldn't register mmsys reset controller: %d\n", ret);
- return ret;
+ if (mmsys->data->num_resets > 0) {
+ spin_lock_init(&mmsys->lock);
+
+ mmsys->rcdev.owner = THIS_MODULE;
+ mmsys->rcdev.nr_resets = mmsys->data->num_resets;
+ mmsys->rcdev.ops = &mtk_mmsys_reset_ops;
+ mmsys->rcdev.of_node = pdev->dev.of_node;
+ ret = devm_reset_controller_register(&pdev->dev, &mmsys->rcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't register mmsys reset controller: %d\n", ret);
+ return ret;
+ }
}
- mmsys->data = of_device_get_match_data(&pdev->dev);
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(dev, &mmsys->cmdq_base, 0);
+ if (ret)
+ dev_dbg(dev, "No mediatek,gce-client-reg!\n");
+#endif
+
platform_set_drvdata(pdev, mmsys);
clks = platform_device_register_data(&pdev->dev, mmsys->data->clk_driver,
@@ -256,6 +343,9 @@ static int mtk_mmsys_probe(struct platform_device *pdev)
if (IS_ERR(clks))
return PTR_ERR(clks);
+ if (mmsys->data->is_vppsys)
+ goto out_probe_done;
+
drm = platform_device_register_data(&pdev->dev, "mediatek-drm",
PLATFORM_DEVID_AUTO, NULL, 0);
if (IS_ERR(drm)) {
@@ -263,6 +353,7 @@ static int mtk_mmsys_probe(struct platform_device *pdev)
return PTR_ERR(drm);
}
+out_probe_done:
return 0;
}
@@ -300,6 +391,10 @@ static const struct of_device_id of_match_mtk_mmsys[] = {
.data = &mt8186_mmsys_driver_data,
},
{
+ .compatible = "mediatek,mt8188-vdosys0",
+ .data = &mt8188_vdosys0_driver_data,
+ },
+ {
.compatible = "mediatek,mt8192-mmsys",
.data = &mt8192_mmsys_driver_data,
},
@@ -312,6 +407,18 @@ static const struct of_device_id of_match_mtk_mmsys[] = {
.data = &mt8195_vdosys0_driver_data,
},
{
+ .compatible = "mediatek,mt8195-vdosys1",
+ .data = &mt8195_vdosys1_driver_data,
+ },
+ {
+ .compatible = "mediatek,mt8195-vppsys0",
+ .data = &mt8195_vppsys0_driver_data,
+ },
+ {
+ .compatible = "mediatek,mt8195-vppsys1",
+ .data = &mt8195_vppsys1_driver_data,
+ },
+ {
.compatible = "mediatek,mt8365-mmsys",
.data = &mt8365_mmsys_driver_data,
},
@@ -326,4 +433,19 @@ static struct platform_driver mtk_mmsys_drv = {
.probe = mtk_mmsys_probe,
};
-builtin_platform_driver(mtk_mmsys_drv);
+static int __init mtk_mmsys_init(void)
+{
+ return platform_driver_register(&mtk_mmsys_drv);
+}
+
+static void __exit mtk_mmsys_exit(void)
+{
+ platform_driver_unregister(&mtk_mmsys_drv);
+}
+
+module_init(mtk_mmsys_init);
+module_exit(mtk_mmsys_exit);
+
+MODULE_AUTHOR("Yongqiang Niu <yongqiang.niu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek SoC MMSYS driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h
index 77f37f8c715b..56f8cc3a97b7 100644
--- a/drivers/soc/mediatek/mtk-mmsys.h
+++ b/drivers/soc/mediatek/mtk-mmsys.h
@@ -91,6 +91,8 @@ struct mtk_mmsys_driver_data {
const struct mtk_mmsys_routes *routes;
const unsigned int num_routes;
const u16 sw0_rst_offset;
+ const u32 num_resets;
+ const bool is_vppsys;
};
/*
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index c1a33d52038e..c5b1b42303ac 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -116,6 +116,21 @@
#define MT8173_MUTEX_MOD_DISP_PWM1 24
#define MT8173_MUTEX_MOD_DISP_OD 25
+#define MT8188_MUTEX_MOD_DISP_OVL0 0
+#define MT8188_MUTEX_MOD_DISP_WDMA0 1
+#define MT8188_MUTEX_MOD_DISP_RDMA0 2
+#define MT8188_MUTEX_MOD_DISP_COLOR0 3
+#define MT8188_MUTEX_MOD_DISP_CCORR0 4
+#define MT8188_MUTEX_MOD_DISP_AAL0 5
+#define MT8188_MUTEX_MOD_DISP_GAMMA0 6
+#define MT8188_MUTEX_MOD_DISP_DITHER0 7
+#define MT8188_MUTEX_MOD_DISP_DSI0 8
+#define MT8188_MUTEX_MOD_DISP_DSC_WRAP0_CORE0 9
+#define MT8188_MUTEX_MOD_DISP_VPP_MERGE 20
+#define MT8188_MUTEX_MOD_DISP_DP_INTF0 21
+#define MT8188_MUTEX_MOD_DISP_POSTMASK0 24
+#define MT8188_MUTEX_MOD2_DISP_PWM0 33
+
#define MT8195_MUTEX_MOD_DISP_OVL0 0
#define MT8195_MUTEX_MOD_DISP_WDMA0 1
#define MT8195_MUTEX_MOD_DISP_RDMA0 2
@@ -130,6 +145,24 @@
#define MT8195_MUTEX_MOD_DISP_DP_INTF0 21
#define MT8195_MUTEX_MOD_DISP_PWM0 27
+#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA0 0
+#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA1 1
+#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA2 2
+#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA3 3
+#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA4 4
+#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA5 5
+#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA6 6
+#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA7 7
+#define MT8195_MUTEX_MOD_DISP1_VPP_MERGE0 8
+#define MT8195_MUTEX_MOD_DISP1_VPP_MERGE1 9
+#define MT8195_MUTEX_MOD_DISP1_VPP_MERGE2 10
+#define MT8195_MUTEX_MOD_DISP1_VPP_MERGE3 11
+#define MT8195_MUTEX_MOD_DISP1_VPP_MERGE4 12
+#define MT8195_MUTEX_MOD_DISP1_DISP_MIXER 18
+#define MT8195_MUTEX_MOD_DISP1_DPI0 25
+#define MT8195_MUTEX_MOD_DISP1_DPI1 26
+#define MT8195_MUTEX_MOD_DISP1_DP_INTF0 27
+
#define MT8365_MUTEX_MOD_DISP_OVL0 7
#define MT8365_MUTEX_MOD_DISP_OVL0_2L 8
#define MT8365_MUTEX_MOD_DISP_RDMA0 9
@@ -180,6 +213,8 @@
#define MT8167_MUTEX_SOF_DPI1 3
#define MT8183_MUTEX_SOF_DSI0 1
#define MT8183_MUTEX_SOF_DPI0 2
+#define MT8188_MUTEX_SOF_DSI0 1
+#define MT8188_MUTEX_SOF_DP_INTF0 3
#define MT8195_MUTEX_SOF_DSI0 1
#define MT8195_MUTEX_SOF_DSI1 2
#define MT8195_MUTEX_SOF_DP_INTF0 3
@@ -189,6 +224,8 @@
#define MT8183_MUTEX_EOF_DSI0 (MT8183_MUTEX_SOF_DSI0 << 6)
#define MT8183_MUTEX_EOF_DPI0 (MT8183_MUTEX_SOF_DPI0 << 6)
+#define MT8188_MUTEX_EOF_DSI0 (MT8188_MUTEX_SOF_DSI0 << 7)
+#define MT8188_MUTEX_EOF_DP_INTF0 (MT8188_MUTEX_SOF_DP_INTF0 << 7)
#define MT8195_MUTEX_EOF_DSI0 (MT8195_MUTEX_SOF_DSI0 << 7)
#define MT8195_MUTEX_EOF_DSI1 (MT8195_MUTEX_SOF_DSI1 << 7)
#define MT8195_MUTEX_EOF_DP_INTF0 (MT8195_MUTEX_SOF_DP_INTF0 << 7)
@@ -344,6 +381,23 @@ static const unsigned int mt8186_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
[MUTEX_MOD_IDX_MDP_COLOR0] = MT8186_MUTEX_MOD_MDP_COLOR0,
};
+static const unsigned int mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_OVL0] = MT8188_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_WDMA0] = MT8188_MUTEX_MOD_DISP_WDMA0,
+ [DDP_COMPONENT_RDMA0] = MT8188_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_COLOR0] = MT8188_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_CCORR] = MT8188_MUTEX_MOD_DISP_CCORR0,
+ [DDP_COMPONENT_AAL0] = MT8188_MUTEX_MOD_DISP_AAL0,
+ [DDP_COMPONENT_GAMMA] = MT8188_MUTEX_MOD_DISP_GAMMA0,
+ [DDP_COMPONENT_POSTMASK0] = MT8188_MUTEX_MOD_DISP_POSTMASK0,
+ [DDP_COMPONENT_DITHER0] = MT8188_MUTEX_MOD_DISP_DITHER0,
+ [DDP_COMPONENT_MERGE0] = MT8188_MUTEX_MOD_DISP_VPP_MERGE,
+ [DDP_COMPONENT_DSC0] = MT8188_MUTEX_MOD_DISP_DSC_WRAP0_CORE0,
+ [DDP_COMPONENT_DSI0] = MT8188_MUTEX_MOD_DISP_DSI0,
+ [DDP_COMPONENT_PWM0] = MT8188_MUTEX_MOD2_DISP_PWM0,
+ [DDP_COMPONENT_DP_INTF0] = MT8188_MUTEX_MOD_DISP_DP_INTF0,
+};
+
static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8192_MUTEX_MOD_DISP_AAL0,
[DDP_COMPONENT_CCORR] = MT8192_MUTEX_MOD_DISP_CCORR0,
@@ -372,6 +426,21 @@ static const unsigned int mt8195_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_DSI0] = MT8195_MUTEX_MOD_DISP_DSI0,
[DDP_COMPONENT_PWM0] = MT8195_MUTEX_MOD_DISP_PWM0,
[DDP_COMPONENT_DP_INTF0] = MT8195_MUTEX_MOD_DISP_DP_INTF0,
+ [DDP_COMPONENT_MDP_RDMA0] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA0,
+ [DDP_COMPONENT_MDP_RDMA1] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA1,
+ [DDP_COMPONENT_MDP_RDMA2] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA2,
+ [DDP_COMPONENT_MDP_RDMA3] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA3,
+ [DDP_COMPONENT_MDP_RDMA4] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA4,
+ [DDP_COMPONENT_MDP_RDMA5] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA5,
+ [DDP_COMPONENT_MDP_RDMA6] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA6,
+ [DDP_COMPONENT_MDP_RDMA7] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA7,
+ [DDP_COMPONENT_MERGE1] = MT8195_MUTEX_MOD_DISP1_VPP_MERGE0,
+ [DDP_COMPONENT_MERGE2] = MT8195_MUTEX_MOD_DISP1_VPP_MERGE1,
+ [DDP_COMPONENT_MERGE3] = MT8195_MUTEX_MOD_DISP1_VPP_MERGE2,
+ [DDP_COMPONENT_MERGE4] = MT8195_MUTEX_MOD_DISP1_VPP_MERGE3,
+ [DDP_COMPONENT_ETHDR_MIXER] = MT8195_MUTEX_MOD_DISP1_DISP_MIXER,
+ [DDP_COMPONENT_MERGE5] = MT8195_MUTEX_MOD_DISP1_VPP_MERGE4,
+ [DDP_COMPONENT_DP_INTF1] = MT8195_MUTEX_MOD_DISP1_DP_INTF0,
};
static const unsigned int mt8365_mutex_mod[DDP_COMPONENT_ID_MAX] = {
@@ -435,6 +504,14 @@ static const unsigned int mt8186_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
* but also detect the error at end of frame(EAEOF) when EOF signal
* arrives.
*/
+static const unsigned int mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+ [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [MUTEX_SOF_DSI0] =
+ MT8188_MUTEX_SOF_DSI0 | MT8188_MUTEX_EOF_DSI0,
+ [MUTEX_SOF_DP_INTF0] =
+ MT8188_MUTEX_SOF_DP_INTF0 | MT8188_MUTEX_EOF_DP_INTF0,
+};
+
static const unsigned int mt8195_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MT8195_MUTEX_SOF_DSI0 | MT8195_MUTEX_EOF_DSI0,
@@ -505,6 +582,13 @@ static const struct mtk_mutex_data mt8186_mutex_driver_data = {
.mutex_sof_reg = MT8183_MUTEX0_SOF0,
};
+static const struct mtk_mutex_data mt8188_mutex_driver_data = {
+ .mutex_mod = mt8188_mutex_mod,
+ .mutex_sof = mt8188_mutex_sof,
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+};
+
static const struct mtk_mutex_data mt8192_mutex_driver_data = {
.mutex_mod = mt8192_mutex_mod,
.mutex_sof = mt8183_mutex_sof,
@@ -602,6 +686,9 @@ void mtk_mutex_add_comp(struct mtk_mutex *mutex,
case DDP_COMPONENT_DP_INTF0:
sof_id = MUTEX_SOF_DP_INTF0;
break;
+ case DDP_COMPONENT_DP_INTF1:
+ sof_id = MUTEX_SOF_DP_INTF1;
+ break;
default:
if (mtx->data->mutex_mod[id] < 32) {
offset = DISP_REG_MUTEX_MOD(mtx->data->mutex_mod_reg,
@@ -642,6 +729,7 @@ void mtk_mutex_remove_comp(struct mtk_mutex *mutex,
case DDP_COMPONENT_DPI0:
case DDP_COMPONENT_DPI1:
case DDP_COMPONENT_DP_INTF0:
+ case DDP_COMPONENT_DP_INTF1:
writel_relaxed(MUTEX_SOF_SINGLE_MODE,
mtx->regs +
DISP_REG_MUTEX_SOF(mtx->data->mutex_sof_reg,
@@ -832,11 +920,6 @@ static int mtk_mutex_probe(struct platform_device *pdev)
return 0;
}
-static int mtk_mutex_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static const struct of_device_id mutex_driver_dt_match[] = {
{ .compatible = "mediatek,mt2701-disp-mutex",
.data = &mt2701_mutex_driver_data},
@@ -854,6 +937,8 @@ static const struct of_device_id mutex_driver_dt_match[] = {
.data = &mt8186_mutex_driver_data},
{ .compatible = "mediatek,mt8186-mdp3-mutex",
.data = &mt8186_mdp_mutex_driver_data},
+ { .compatible = "mediatek,mt8188-disp-mutex",
+ .data = &mt8188_mutex_driver_data},
{ .compatible = "mediatek,mt8192-disp-mutex",
.data = &mt8192_mutex_driver_data},
{ .compatible = "mediatek,mt8195-disp-mutex",
@@ -866,7 +951,6 @@ MODULE_DEVICE_TABLE(of, mutex_driver_dt_match);
static struct platform_driver mtk_mutex_driver = {
.probe = mtk_mutex_probe,
- .remove = mtk_mutex_remove,
.driver = {
.name = "mediatek-mutex",
.owner = THIS_MODULE,
@@ -874,4 +958,19 @@ static struct platform_driver mtk_mutex_driver = {
},
};
-builtin_platform_driver(mtk_mutex_driver);
+static int __init mtk_mutex_init(void)
+{
+ return platform_driver_register(&mtk_mutex_driver);
+}
+
+static void __exit mtk_mutex_exit(void)
+{
+ platform_driver_unregister(&mtk_mutex_driver);
+}
+
+module_init(mtk_mutex_init);
+module_exit(mtk_mutex_exit);
+
+MODULE_AUTHOR("Yongqiang Niu <yongqiang.niu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek SoC MUTEX driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
index 474b272f9b02..354249cc1b12 100644
--- a/drivers/soc/mediatek/mtk-pm-domains.c
+++ b/drivers/soc/mediatek/mtk-pm-domains.c
@@ -21,6 +21,7 @@
#include "mt8173-pm-domains.h"
#include "mt8183-pm-domains.h"
#include "mt8186-pm-domains.h"
+#include "mt8188-pm-domains.h"
#include "mt8192-pm-domains.h"
#include "mt8195-pm-domains.h"
@@ -218,6 +219,10 @@ static int scpsys_power_on(struct generic_pm_domain *genpd)
if (ret)
goto err_reg;
+ if (pd->data->ext_buck_iso_offs && MTK_SCPD_CAPS(pd, MTK_SCPD_EXT_BUCK_ISO))
+ regmap_clear_bits(scpsys->base, pd->data->ext_buck_iso_offs,
+ pd->data->ext_buck_iso_mask);
+
/* subsys power on */
regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
@@ -272,6 +277,10 @@ static int scpsys_power_off(struct generic_pm_domain *genpd)
if (ret < 0)
return ret;
+ if (pd->data->ext_buck_iso_offs && MTK_SCPD_CAPS(pd, MTK_SCPD_EXT_BUCK_ISO))
+ regmap_set_bits(scpsys->base, pd->data->ext_buck_iso_offs,
+ pd->data->ext_buck_iso_mask);
+
clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks);
/* subsys power off */
@@ -580,6 +589,10 @@ static const struct of_device_id scpsys_of_match[] = {
.data = &mt8186_scpsys_data,
},
{
+ .compatible = "mediatek,mt8188-power-controller",
+ .data = &mt8188_scpsys_data,
+ },
+ {
.compatible = "mediatek,mt8192-power-controller",
.data = &mt8192_scpsys_data,
},
diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h
index 7d3c0c36316c..5ec53ee073c4 100644
--- a/drivers/soc/mediatek/mtk-pm-domains.h
+++ b/drivers/soc/mediatek/mtk-pm-domains.h
@@ -10,6 +10,7 @@
#define MTK_SCPD_DOMAIN_SUPPLY BIT(4)
/* can't set MTK_SCPD_KEEP_DEFAULT_OFF at the same time */
#define MTK_SCPD_ALWAYS_ON BIT(5)
+#define MTK_SCPD_EXT_BUCK_ISO BIT(6)
#define MTK_SCPD_CAPS(_scpd, _x) ((_scpd)->data->caps & (_x))
#define SPM_VDE_PWR_CON 0x0210
@@ -81,6 +82,8 @@ struct scpsys_bus_prot_data {
* @ctl_offs: The offset for main power control register.
* @sram_pdn_bits: The mask for sram power control bits.
* @sram_pdn_ack_bits: The mask for sram power control acked bits.
+ * @ext_buck_iso_offs: The offset for external buck isolation
+ * @ext_buck_iso_mask: The mask for external buck isolation
* @caps: The flag for active wake-up action.
* @bp_infracfg: bus protection for infracfg subsystem
* @bp_smi: bus protection for smi subsystem
@@ -91,6 +94,8 @@ struct scpsys_domain_data {
int ctl_offs;
u32 sram_pdn_bits;
u32 sram_pdn_ack_bits;
+ int ext_buck_iso_offs;
+ u32 ext_buck_iso_mask;
u8 caps;
const struct scpsys_bus_prot_data bp_infracfg[SPM_MAX_BUS_PROT_DATA];
const struct scpsys_bus_prot_data bp_smi[SPM_MAX_BUS_PROT_DATA];
diff --git a/drivers/soc/mediatek/mtk-regulator-coupler.c b/drivers/soc/mediatek/mtk-regulator-coupler.c
new file mode 100644
index 000000000000..ad2ed42aa697
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-regulator-coupler.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Voltage regulators coupler for MediaTek SoCs
+ *
+ * Copyright (C) 2022 Collabora, Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/regulator/coupler.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/suspend.h>
+
+#define to_mediatek_coupler(x) container_of(x, struct mediatek_regulator_coupler, coupler)
+
+struct mediatek_regulator_coupler {
+ struct regulator_coupler coupler;
+ struct regulator_dev *vsram_rdev;
+};
+
+/*
+ * We currently support only couples of not more than two vregs and
+ * modify the vsram voltage only when changing voltage of vgpu.
+ *
+ * This function is limited to the GPU<->SRAM voltages relationships.
+ */
+static int mediatek_regulator_balance_voltage(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct mediatek_regulator_coupler *mrc = to_mediatek_coupler(coupler);
+ int max_spread = rdev->constraints->max_spread[0];
+ int vsram_min_uV = mrc->vsram_rdev->constraints->min_uV;
+ int vsram_max_uV = mrc->vsram_rdev->constraints->max_uV;
+ int vsram_target_min_uV, vsram_target_max_uV;
+ int min_uV = 0;
+ int max_uV = INT_MAX;
+ int ret;
+
+ /*
+ * If the target device is on, setting the SRAM voltage directly
+ * is not supported as it scales through its coupled supply voltage.
+ *
+ * An exception is made in case the use_count is zero: this means
+ * that this is the first time we power up the SRAM regulator, which
+ * implies that the target device has yet to perform initialization
+ * and setting a voltage at that time is harmless.
+ */
+ if (rdev == mrc->vsram_rdev) {
+ if (rdev->use_count == 0)
+ return regulator_do_balance_voltage(rdev, state, true);
+
+ return -EPERM;
+ }
+
+ ret = regulator_check_consumers(rdev, &min_uV, &max_uV, state);
+ if (ret < 0)
+ return ret;
+
+ if (min_uV == 0) {
+ ret = regulator_get_voltage_rdev(rdev);
+ if (ret < 0)
+ return ret;
+ min_uV = ret;
+ }
+
+ ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * If we're asked to set a voltage less than VSRAM min_uV, set
+ * the minimum allowed voltage on VSRAM, as in this case it is
+ * safe to ignore the max_spread parameter.
+ */
+ vsram_target_min_uV = max(vsram_min_uV, min_uV + max_spread);
+ vsram_target_max_uV = min(vsram_max_uV, vsram_target_min_uV + max_spread);
+
+ /* Make sure we're not out of range */
+ vsram_target_min_uV = min(vsram_target_min_uV, vsram_max_uV);
+
+ pr_debug("Setting voltage %d-%duV on %s (minuV %d)\n",
+ vsram_target_min_uV, vsram_target_max_uV,
+ rdev_get_name(mrc->vsram_rdev), min_uV);
+
+ ret = regulator_set_voltage_rdev(mrc->vsram_rdev, vsram_target_min_uV,
+ vsram_target_max_uV, state);
+ if (ret)
+ return ret;
+
+ /* The sram voltage is now balanced: update the target vreg voltage */
+ return regulator_do_balance_voltage(rdev, state, true);
+}
+
+static int mediatek_regulator_attach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ struct mediatek_regulator_coupler *mrc = to_mediatek_coupler(coupler);
+ const char *rdev_name = rdev_get_name(rdev);
+
+ /*
+ * If we're getting a coupling of more than two regulators here and
+ * this means that this is surely not a GPU<->SRAM couple: in that
+ * case, we may want to use another coupler implementation, if any,
+ * or the generic one: the regulator core will keep walking through
+ * the list of couplers when any .attach_regulator() cb returns 1.
+ */
+ if (rdev->coupling_desc.n_coupled > 2)
+ return 1;
+
+ if (strstr(rdev_name, "sram")) {
+ if (mrc->vsram_rdev)
+ return -EINVAL;
+ mrc->vsram_rdev = rdev;
+ } else if (!strstr(rdev_name, "vgpu") && !strstr(rdev_name, "Vgpu")) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static int mediatek_regulator_detach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ struct mediatek_regulator_coupler *mrc = to_mediatek_coupler(coupler);
+
+ if (rdev == mrc->vsram_rdev)
+ mrc->vsram_rdev = NULL;
+
+ return 0;
+}
+
+static struct mediatek_regulator_coupler mediatek_coupler = {
+ .coupler = {
+ .attach_regulator = mediatek_regulator_attach,
+ .detach_regulator = mediatek_regulator_detach,
+ .balance_voltage = mediatek_regulator_balance_voltage,
+ },
+};
+
+static int mediatek_regulator_coupler_init(void)
+{
+ if (!of_machine_is_compatible("mediatek,mt8183") &&
+ !of_machine_is_compatible("mediatek,mt8186") &&
+ !of_machine_is_compatible("mediatek,mt8192"))
+ return 0;
+
+ return regulator_coupler_register(&mediatek_coupler.coupler);
+}
+arch_initcall(mediatek_regulator_coupler_init);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_DESCRIPTION("MediaTek Regulator Coupler driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
index 0469c9dfeb04..f26eb2f637d5 100644
--- a/drivers/soc/mediatek/mtk-svs.c
+++ b/drivers/soc/mediatek/mtk-svs.c
@@ -138,6 +138,7 @@
static DEFINE_SPINLOCK(svs_lock);
+#ifdef CONFIG_DEBUG_FS
#define debug_fops_ro(name) \
static int svs_##name##_debug_open(struct inode *inode, \
struct file *filp) \
@@ -170,6 +171,7 @@ static DEFINE_SPINLOCK(svs_lock);
}
#define svs_dentry_data(name) {__stringify(name), &svs_##name##_debug_fops}
+#endif
/**
* enum svsb_phase - svs bank phase enumeration
@@ -311,15 +313,12 @@ static const u32 svs_regs_v2[] = {
/**
* struct svs_platform - svs platform control
- * @name: svs platform name
* @base: svs platform register base
* @dev: svs platform device
* @main_clk: main clock for svs bank
* @pbank: svs bank pointer needing to be protected by spin_lock section
* @banks: svs banks that svs platform supports
* @rst: svs platform reset control
- * @efuse_parsing: svs platform efuse parsing function pointer
- * @probe: svs platform probe function pointer
* @efuse_max: total number of svs efuse
* @tefuse_max: total number of thermal efuse
* @regs: svs platform registers map
@@ -328,15 +327,12 @@ static const u32 svs_regs_v2[] = {
* @tefuse: thermal efuse data received from NVMEM framework
*/
struct svs_platform {
- char *name;
void __iomem *base;
struct device *dev;
struct clk *main_clk;
struct svs_bank *pbank;
struct svs_bank *banks;
struct reset_control *rst;
- bool (*efuse_parsing)(struct svs_platform *svsp);
- int (*probe)(struct svs_platform *svsp);
size_t efuse_max;
size_t tefuse_max;
const u32 *regs;
@@ -628,6 +624,7 @@ unlock_mutex:
return ret;
}
+#ifdef CONFIG_DEBUG_FS
static int svs_dump_debug_show(struct seq_file *m, void *p)
{
struct svs_platform *svsp = (struct svs_platform *)m->private;
@@ -843,6 +840,7 @@ static int svs_create_debug_cmds(struct svs_platform *svsp)
return 0;
}
+#endif /* CONFIG_DEBUG_FS */
static u32 interpolate(u32 f0, u32 f1, u32 v0, u32 v1, u32 fx)
{
@@ -1324,7 +1322,7 @@ static int svs_init01(struct svs_platform *svsp)
svsb->pm_runtime_enabled_count++;
}
- ret = pm_runtime_get_sync(svsb->opp_dev);
+ ret = pm_runtime_resume_and_get(svsb->opp_dev);
if (ret < 0) {
dev_err(svsb->dev, "mtcmos on fail: %d\n", ret);
goto svs_init01_resume_cpuidle;
@@ -1461,6 +1459,7 @@ static int svs_init02(struct svs_platform *svsp)
{
struct svs_bank *svsb;
unsigned long flags, time_left;
+ int ret;
u32 idx;
for (idx = 0; idx < svsp->bank_max; idx++) {
@@ -1479,7 +1478,8 @@ static int svs_init02(struct svs_platform *svsp)
msecs_to_jiffies(5000));
if (!time_left) {
dev_err(svsb->dev, "init02 completion timeout\n");
- return -EBUSY;
+ ret = -EBUSY;
+ goto out_of_init02;
}
}
@@ -1497,12 +1497,30 @@ static int svs_init02(struct svs_platform *svsp)
if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) {
if (svs_sync_bank_volts_from_opp(svsb)) {
dev_err(svsb->dev, "sync volt fail\n");
- return -EPERM;
+ ret = -EPERM;
+ goto out_of_init02;
}
}
}
return 0;
+
+out_of_init02:
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ spin_lock_irqsave(&svs_lock, flags);
+ svsp->pbank = svsb;
+ svs_switch_bank(svsp);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
+ spin_unlock_irqrestore(&svs_lock, flags);
+
+ svsb->phase = SVSB_PHASE_ERROR;
+ svs_adjust_pm_opp_volts(svsb);
+ }
+
+ return ret;
}
static void svs_mon_mode(struct svs_platform *svsp)
@@ -1594,12 +1612,16 @@ static int svs_resume(struct device *dev)
ret = svs_init02(svsp);
if (ret)
- goto out_of_resume;
+ goto svs_resume_reset_assert;
svs_mon_mode(svsp);
return 0;
+svs_resume_reset_assert:
+ dev_err(svsp->dev, "assert reset: %d\n",
+ reset_control_assert(svsp->rst));
+
out_of_resume:
clk_disable_unprepare(svsp->main_clk);
return ret;
@@ -1899,26 +1921,27 @@ static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp)
o_slope_sign = (svsp->tefuse[0] >> 7) & BIT(0);
ts_id = (svsp->tefuse[1] >> 9) & BIT(0);
- o_slope = (svsp->tefuse[0] >> 26) & GENMASK(5, 0);
-
- if (adc_cali_en_t == 1) {
- if (!ts_id)
- o_slope = 0;
-
- if (adc_ge_t < 265 || adc_ge_t > 758 ||
- adc_oe_t < 265 || adc_oe_t > 758 ||
- o_vtsmcu[0] < -8 || o_vtsmcu[0] > 484 ||
- o_vtsmcu[1] < -8 || o_vtsmcu[1] > 484 ||
- o_vtsmcu[2] < -8 || o_vtsmcu[2] > 484 ||
- o_vtsmcu[3] < -8 || o_vtsmcu[3] > 484 ||
- o_vtsmcu[4] < -8 || o_vtsmcu[4] > 484 ||
- o_vtsabb < -8 || o_vtsabb > 484 ||
- degc_cali < 1 || degc_cali > 63) {
- dev_err(svsp->dev, "bad thermal efuse, no mon mode\n");
- goto remove_mt8183_svsb_mon_mode;
- }
+ if (!ts_id) {
+ o_slope = 1534;
} else {
- dev_err(svsp->dev, "no thermal efuse, no mon mode\n");
+ o_slope = (svsp->tefuse[0] >> 26) & GENMASK(5, 0);
+ if (!o_slope_sign)
+ o_slope = 1534 + o_slope * 10;
+ else
+ o_slope = 1534 - o_slope * 10;
+ }
+
+ if (adc_cali_en_t == 0 ||
+ adc_ge_t < 265 || adc_ge_t > 758 ||
+ adc_oe_t < 265 || adc_oe_t > 758 ||
+ o_vtsmcu[0] < -8 || o_vtsmcu[0] > 484 ||
+ o_vtsmcu[1] < -8 || o_vtsmcu[1] > 484 ||
+ o_vtsmcu[2] < -8 || o_vtsmcu[2] > 484 ||
+ o_vtsmcu[3] < -8 || o_vtsmcu[3] > 484 ||
+ o_vtsmcu[4] < -8 || o_vtsmcu[4] > 484 ||
+ o_vtsabb < -8 || o_vtsabb > 484 ||
+ degc_cali < 1 || degc_cali > 63) {
+ dev_err(svsp->dev, "bad thermal efuse, no mon mode\n");
goto remove_mt8183_svsb_mon_mode;
}
@@ -1937,11 +1960,7 @@ static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp)
x_roomt[i] = (((format[i] * 10000) / 4096) * 10000) / gain;
temp0 = (10000 * 100000 / gain) * 15 / 18;
-
- if (!o_slope_sign)
- mts = (temp0 * 10) / (1534 + o_slope * 10);
- else
- mts = (temp0 * 10) / (1534 - o_slope * 10);
+ mts = (temp0 * 10) / o_slope;
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
@@ -1968,11 +1987,7 @@ static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp)
temp0 = (degc_cali * 10 / 2);
temp1 = ((10000 * 100000 / 4096 / gain) *
oe + tb_roomt * 10) * 15 / 18;
-
- if (!o_slope_sign)
- temp2 = temp1 * 100 / (1534 + o_slope * 10);
- else
- temp2 = temp1 * 100 / (1534 - o_slope * 10);
+ temp2 = temp1 * 100 / o_slope;
svsb->bts = (temp0 + temp2 - 250) * 4 / 10;
}
@@ -2011,7 +2026,7 @@ static bool svs_is_efuse_data_correct(struct svs_platform *svsp)
svsp->efuse_max /= sizeof(u32);
nvmem_cell_put(cell);
- return svsp->efuse_parsing(svsp);
+ return true;
}
static struct device *svs_get_subsys_device(struct svs_platform *svsp,
@@ -2326,50 +2341,38 @@ static const struct of_device_id svs_of_match[] = {
/* Sentinel */
},
};
+MODULE_DEVICE_TABLE(of, svs_of_match);
-static struct svs_platform *svs_platform_probe(struct platform_device *pdev)
+static int svs_probe(struct platform_device *pdev)
{
struct svs_platform *svsp;
const struct svs_platform_data *svsp_data;
- int ret;
+ int ret, svsp_irq;
svsp_data = of_device_get_match_data(&pdev->dev);
- if (!svsp_data) {
- dev_err(&pdev->dev, "no svs platform data?\n");
- return ERR_PTR(-EPERM);
- }
svsp = devm_kzalloc(&pdev->dev, sizeof(*svsp), GFP_KERNEL);
if (!svsp)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
svsp->dev = &pdev->dev;
- svsp->name = svsp_data->name;
svsp->banks = svsp_data->banks;
- svsp->efuse_parsing = svsp_data->efuse_parsing;
- svsp->probe = svsp_data->probe;
svsp->regs = svsp_data->regs;
svsp->bank_max = svsp_data->bank_max;
- ret = svsp->probe(svsp);
+ ret = svsp_data->probe(svsp);
if (ret)
- return ERR_PTR(ret);
-
- return svsp;
-}
-
-static int svs_probe(struct platform_device *pdev)
-{
- struct svs_platform *svsp;
- int svsp_irq, ret;
-
- svsp = svs_platform_probe(pdev);
- if (IS_ERR(svsp))
- return PTR_ERR(svsp);
+ return ret;
if (!svs_is_efuse_data_correct(svsp)) {
dev_notice(svsp->dev, "efuse data isn't correct\n");
ret = -EPERM;
+ goto svs_probe_free_efuse;
+ }
+
+ if (!svsp_data->efuse_parsing(svsp)) {
+ dev_err(svsp->dev, "efuse data parsing failed\n");
+ ret = -EPERM;
goto svs_probe_free_resource;
}
@@ -2385,14 +2388,6 @@ static int svs_probe(struct platform_device *pdev)
goto svs_probe_free_resource;
}
- ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr,
- IRQF_ONESHOT, svsp->name, svsp);
- if (ret) {
- dev_err(svsp->dev, "register irq(%d) failed: %d\n",
- svsp_irq, ret);
- goto svs_probe_free_resource;
- }
-
svsp->main_clk = devm_clk_get(svsp->dev, "main");
if (IS_ERR(svsp->main_clk)) {
dev_err(svsp->dev, "failed to get clock: %ld\n",
@@ -2414,17 +2409,27 @@ static int svs_probe(struct platform_device *pdev)
goto svs_probe_clk_disable;
}
+ ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr,
+ IRQF_ONESHOT, svsp_data->name, svsp);
+ if (ret) {
+ dev_err(svsp->dev, "register irq(%d) failed: %d\n",
+ svsp_irq, ret);
+ goto svs_probe_iounmap;
+ }
+
ret = svs_start(svsp);
if (ret) {
dev_err(svsp->dev, "svs start fail: %d\n", ret);
goto svs_probe_iounmap;
}
+#ifdef CONFIG_DEBUG_FS
ret = svs_create_debug_cmds(svsp);
if (ret) {
dev_err(svsp->dev, "svs create debug cmds fail: %d\n", ret);
goto svs_probe_iounmap;
}
+#endif
return 0;
@@ -2435,11 +2440,13 @@ svs_probe_clk_disable:
clk_disable_unprepare(svsp->main_clk);
svs_probe_free_resource:
- if (!IS_ERR_OR_NULL(svsp->efuse))
- kfree(svsp->efuse);
if (!IS_ERR_OR_NULL(svsp->tefuse))
kfree(svsp->tefuse);
+svs_probe_free_efuse:
+ if (!IS_ERR_OR_NULL(svsp->efuse))
+ kfree(svsp->efuse);
+
return ret;
}
diff --git a/drivers/soc/nuvoton/Kconfig b/drivers/soc/nuvoton/Kconfig
new file mode 100644
index 000000000000..df46182088ec
--- /dev/null
+++ b/drivers/soc/nuvoton/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+menuconfig WPCM450_SOC
+ tristate "Nuvoton WPCM450 SoC driver"
+ default y if ARCH_WPCM450
+ select SOC_BUS
+ help
+ Say Y here to compile the SoC information driver for Nuvoton
+ WPCM450 SoCs.
+
+ This driver provides information such as the SoC model and
+ revision.
diff --git a/drivers/soc/nuvoton/Makefile b/drivers/soc/nuvoton/Makefile
new file mode 100644
index 000000000000..e30317b4e829
--- /dev/null
+++ b/drivers/soc/nuvoton/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_WPCM450_SOC) += wpcm450-soc.o
diff --git a/drivers/soc/nuvoton/wpcm450-soc.c b/drivers/soc/nuvoton/wpcm450-soc.c
new file mode 100644
index 000000000000..c5e0d11c383b
--- /dev/null
+++ b/drivers/soc/nuvoton/wpcm450-soc.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton WPCM450 SoC Identification
+ *
+ * Copyright (C) 2022 Jonathan Neuschäfer
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#define GCR_PDID 0
+#define PDID_CHIP(x) ((x) & 0x00ffffff)
+#define CHIP_WPCM450 0x926450
+#define PDID_REV(x) ((x) >> 24)
+
+struct revision {
+ u8 number;
+ const char *name;
+};
+
+static const struct revision revisions[] __initconst = {
+ { 0x00, "Z1" },
+ { 0x03, "Z2" },
+ { 0x04, "Z21" },
+ { 0x08, "A1" },
+ { 0x09, "A2" },
+ { 0x0a, "A3" },
+ {}
+};
+
+static const char * __init get_revision(unsigned int rev)
+{
+ int i;
+
+ for (i = 0; revisions[i].name; i++)
+ if (revisions[i].number == rev)
+ return revisions[i].name;
+ return NULL;
+}
+
+static struct soc_device_attribute *wpcm450_attr;
+static struct soc_device *wpcm450_soc;
+
+static int __init wpcm450_soc_init(void)
+{
+ struct soc_device_attribute *attr;
+ struct soc_device *soc;
+ const char *revision;
+ struct regmap *gcr;
+ u32 pdid;
+ int ret;
+
+ if (!of_machine_is_compatible("nuvoton,wpcm450"))
+ return 0;
+
+ gcr = syscon_regmap_lookup_by_compatible("nuvoton,wpcm450-gcr");
+ if (IS_ERR(gcr))
+ return PTR_ERR(gcr);
+ ret = regmap_read(gcr, GCR_PDID, &pdid);
+ if (ret)
+ return ret;
+
+ if (PDID_CHIP(pdid) != CHIP_WPCM450) {
+ pr_warn("Unknown chip ID in GCR.PDID: 0x%06x\n", PDID_CHIP(pdid));
+ return -ENODEV;
+ }
+
+ revision = get_revision(PDID_REV(pdid));
+ if (!revision) {
+ pr_warn("Unknown chip revision in GCR.PDID: 0x%02x\n", PDID_REV(pdid));
+ return -ENODEV;
+ }
+
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ attr->family = "Nuvoton NPCM";
+ attr->soc_id = "WPCM450";
+ attr->revision = revision;
+ soc = soc_device_register(attr);
+ if (IS_ERR(soc)) {
+ kfree(attr);
+ pr_warn("Could not register SoC device\n");
+ return PTR_ERR(soc);
+ }
+
+ wpcm450_soc = soc;
+ wpcm450_attr = attr;
+ return 0;
+}
+module_init(wpcm450_soc_init);
+
+static void __exit wpcm450_soc_exit(void)
+{
+ if (wpcm450_soc) {
+ soc_device_unregister(wpcm450_soc);
+ wpcm450_soc = NULL;
+ kfree(wpcm450_attr);
+ }
+}
+module_exit(wpcm450_soc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jonathan Neuschäfer");
+MODULE_DESCRIPTION("Nuvoton WPCM450 SoC Identification driver");
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index ae504c43d9e7..a8f283086a21 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -91,11 +91,38 @@ config QCOM_OCMEM
config QCOM_PDR_HELPERS
tristate
select QCOM_QMI_HELPERS
+ depends on NET
+
+config QCOM_PMIC_GLINK
+ tristate "Qualcomm PMIC GLINK driver"
+ depends on RPMSG
+ depends on TYPEC
+ depends on DRM
+ depends on NET
+ depends on OF
+ select AUXILIARY_BUS
+ select QCOM_PDR_HELPERS
+ help
+ The Qualcomm PMIC GLINK driver provides access, over GLINK, to the
+ USB and battery firmware running on one of the coprocessors in
+ several modern Qualcomm platforms.
+
+ Say yes here to support USB-C and battery status on modern Qualcomm
+ platforms.
config QCOM_QMI_HELPERS
tristate
depends on NET
+config QCOM_RAMP_CTRL
+ tristate "Qualcomm Ramp Controller driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ The Ramp Controller is used to program the sequence ID for pulse
+ swallowing, enable sequence and link sequence IDs for the CPU
+ cores on some Qualcomm SoCs.
+ Say y here to enable support for the ramp controller.
+
config QCOM_RMTFS_MEM
tristate "Qualcomm Remote Filesystem memory driver"
depends on ARCH_QCOM
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index d66604aff2b0..6e88da899f60 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -8,8 +8,11 @@ obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
obj-$(CONFIG_QCOM_OCMEM) += ocmem.o
obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o
+obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink.o
+obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink_altmode.o
obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
qmi_helpers-y += qmi_encdec.o qmi_interface.o
+obj-$(CONFIG_QCOM_RAMP_CTRL) += ramp_controller.o
obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o
obj-$(CONFIG_QCOM_RPMH) += qcom_rpmh.o
qcom_rpmh-y += rpmh-rsc.o
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
index 3f11554df2f3..33dd8c315eb7 100644
--- a/drivers/soc/qcom/mdt_loader.c
+++ b/drivers/soc/qcom/mdt_loader.c
@@ -12,7 +12,7 @@
#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/soc/qcom/mdt_loader.h>
diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
index c92d26b73e6f..199fe9872035 100644
--- a/drivers/soc/qcom/ocmem.c
+++ b/drivers/soc/qcom/ocmem.c
@@ -16,7 +16,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/types.h>
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
new file mode 100644
index 000000000000..bb3fb57abcc6
--- /dev/null
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd
+ */
+#include <linux/auxiliary_bus.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/pdr.h>
+#include <linux/soc/qcom/pmic_glink.h>
+
+struct pmic_glink {
+ struct device *dev;
+ struct pdr_handle *pdr;
+
+ struct rpmsg_endpoint *ept;
+
+ struct auxiliary_device altmode_aux;
+ struct auxiliary_device ps_aux;
+ struct auxiliary_device ucsi_aux;
+
+ /* serializing client_state and pdr_state updates */
+ struct mutex state_lock;
+ unsigned int client_state;
+ unsigned int pdr_state;
+
+ /* serializing clients list updates */
+ struct mutex client_lock;
+ struct list_head clients;
+};
+
+static struct pmic_glink *__pmic_glink;
+static DEFINE_MUTEX(__pmic_glink_lock);
+
+struct pmic_glink_client {
+ struct list_head node;
+
+ struct pmic_glink *pg;
+ unsigned int id;
+
+ void (*cb)(const void *data, size_t len, void *priv);
+ void (*pdr_notify)(void *priv, int state);
+ void *priv;
+};
+
+static void _devm_pmic_glink_release_client(struct device *dev, void *res)
+{
+ struct pmic_glink_client *client = (struct pmic_glink_client *)res;
+ struct pmic_glink *pg = client->pg;
+
+ mutex_lock(&pg->client_lock);
+ list_del(&client->node);
+ mutex_unlock(&pg->client_lock);
+}
+
+struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
+ unsigned int id,
+ void (*cb)(const void *, size_t, void *),
+ void (*pdr)(void *, int),
+ void *priv)
+{
+ struct pmic_glink_client *client;
+ struct pmic_glink *pg = dev_get_drvdata(dev->parent);
+
+ client = devres_alloc(_devm_pmic_glink_release_client, sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ client->pg = pg;
+ client->id = id;
+ client->cb = cb;
+ client->pdr_notify = pdr;
+ client->priv = priv;
+
+ mutex_lock(&pg->client_lock);
+ list_add(&client->node, &pg->clients);
+ mutex_unlock(&pg->client_lock);
+
+ devres_add(dev, client);
+
+ return client;
+}
+EXPORT_SYMBOL_GPL(devm_pmic_glink_register_client);
+
+int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len)
+{
+ struct pmic_glink *pg = client->pg;
+
+ return rpmsg_send(pg->ept, data, len);
+}
+EXPORT_SYMBOL_GPL(pmic_glink_send);
+
+static int pmic_glink_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 addr)
+{
+ struct pmic_glink_client *client;
+ struct pmic_glink_hdr *hdr;
+ struct pmic_glink *pg = dev_get_drvdata(&rpdev->dev);
+
+ if (len < sizeof(*hdr)) {
+ dev_warn(pg->dev, "ignoring truncated message\n");
+ return 0;
+ }
+
+ hdr = data;
+
+ list_for_each_entry(client, &pg->clients, node) {
+ if (client->id == le32_to_cpu(hdr->owner))
+ client->cb(data, len, client->priv);
+ }
+
+ return 0;
+}
+
+static void pmic_glink_aux_release(struct device *dev) {}
+
+static int pmic_glink_add_aux_device(struct pmic_glink *pg,
+ struct auxiliary_device *aux,
+ const char *name)
+{
+ struct device *parent = pg->dev;
+ int ret;
+
+ aux->name = name;
+ aux->dev.parent = parent;
+ aux->dev.release = pmic_glink_aux_release;
+ device_set_of_node_from_dev(&aux->dev, parent);
+ ret = auxiliary_device_init(aux);
+ if (ret)
+ return ret;
+
+ ret = auxiliary_device_add(aux);
+ if (ret)
+ auxiliary_device_uninit(aux);
+
+ return ret;
+}
+
+static void pmic_glink_del_aux_device(struct pmic_glink *pg,
+ struct auxiliary_device *aux)
+{
+ auxiliary_device_delete(aux);
+ auxiliary_device_uninit(aux);
+}
+
+static void pmic_glink_state_notify_clients(struct pmic_glink *pg)
+{
+ struct pmic_glink_client *client;
+ unsigned int new_state = pg->client_state;
+
+ if (pg->client_state != SERVREG_SERVICE_STATE_UP) {
+ if (pg->pdr_state == SERVREG_SERVICE_STATE_UP && pg->ept)
+ new_state = SERVREG_SERVICE_STATE_UP;
+ } else {
+ if (pg->pdr_state == SERVREG_SERVICE_STATE_UP && pg->ept)
+ new_state = SERVREG_SERVICE_STATE_DOWN;
+ }
+
+ if (new_state != pg->client_state) {
+ list_for_each_entry(client, &pg->clients, node)
+ client->pdr_notify(client->priv, new_state);
+ pg->client_state = new_state;
+ }
+}
+
+static void pmic_glink_pdr_callback(int state, char *svc_path, void *priv)
+{
+ struct pmic_glink *pg = priv;
+
+ mutex_lock(&pg->state_lock);
+ pg->pdr_state = state;
+
+ pmic_glink_state_notify_clients(pg);
+ mutex_unlock(&pg->state_lock);
+}
+
+static int pmic_glink_rpmsg_probe(struct rpmsg_device *rpdev)
+{
+ struct pmic_glink *pg = __pmic_glink;
+ int ret = 0;
+
+ mutex_lock(&__pmic_glink_lock);
+ if (!pg) {
+ ret = dev_err_probe(&rpdev->dev, -ENODEV, "no pmic_glink device to attach to\n");
+ goto out_unlock;
+ }
+
+ dev_set_drvdata(&rpdev->dev, pg);
+
+ mutex_lock(&pg->state_lock);
+ pg->ept = rpdev->ept;
+ pmic_glink_state_notify_clients(pg);
+ mutex_unlock(&pg->state_lock);
+
+out_unlock:
+ mutex_unlock(&__pmic_glink_lock);
+ return ret;
+}
+
+static void pmic_glink_rpmsg_remove(struct rpmsg_device *rpdev)
+{
+ struct pmic_glink *pg;
+
+ mutex_lock(&__pmic_glink_lock);
+ pg = __pmic_glink;
+ if (!pg)
+ goto out_unlock;
+
+ mutex_lock(&pg->state_lock);
+ pg->ept = NULL;
+ pmic_glink_state_notify_clients(pg);
+ mutex_unlock(&pg->state_lock);
+out_unlock:
+ mutex_unlock(&__pmic_glink_lock);
+}
+
+static const struct rpmsg_device_id pmic_glink_rpmsg_id_match[] = {
+ { "PMIC_RTR_ADSP_APPS" },
+ {}
+};
+
+static struct rpmsg_driver pmic_glink_rpmsg_driver = {
+ .probe = pmic_glink_rpmsg_probe,
+ .remove = pmic_glink_rpmsg_remove,
+ .callback = pmic_glink_rpmsg_callback,
+ .id_table = pmic_glink_rpmsg_id_match,
+ .drv = {
+ .name = "qcom_pmic_glink_rpmsg",
+ },
+};
+
+static int pmic_glink_probe(struct platform_device *pdev)
+{
+ struct pdr_service *service;
+ struct pmic_glink *pg;
+ int ret;
+
+ pg = devm_kzalloc(&pdev->dev, sizeof(*pg), GFP_KERNEL);
+ if (!pg)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, pg);
+
+ pg->dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&pg->clients);
+ mutex_init(&pg->client_lock);
+ mutex_init(&pg->state_lock);
+
+ ret = pmic_glink_add_aux_device(pg, &pg->altmode_aux, "altmode");
+ if (ret)
+ return ret;
+ ret = pmic_glink_add_aux_device(pg, &pg->ps_aux, "power-supply");
+ if (ret)
+ goto out_release_altmode_aux;
+
+ pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
+ if (IS_ERR(pg->pdr)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr), "failed to initialize pdr\n");
+ goto out_release_aux_devices;
+ }
+
+ service = pdr_add_lookup(pg->pdr, "tms/servreg", "msm/adsp/charger_pd");
+ if (IS_ERR(service)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(service),
+ "failed adding pdr lookup for charger_pd\n");
+ goto out_release_pdr_handle;
+ }
+
+ mutex_lock(&__pmic_glink_lock);
+ __pmic_glink = pg;
+ mutex_unlock(&__pmic_glink_lock);
+
+ return 0;
+
+out_release_pdr_handle:
+ pdr_handle_release(pg->pdr);
+out_release_aux_devices:
+ pmic_glink_del_aux_device(pg, &pg->ps_aux);
+out_release_altmode_aux:
+ pmic_glink_del_aux_device(pg, &pg->altmode_aux);
+
+ return ret;
+}
+
+static int pmic_glink_remove(struct platform_device *pdev)
+{
+ struct pmic_glink *pg = dev_get_drvdata(&pdev->dev);
+
+ pdr_handle_release(pg->pdr);
+
+ pmic_glink_del_aux_device(pg, &pg->ps_aux);
+ pmic_glink_del_aux_device(pg, &pg->altmode_aux);
+
+ mutex_lock(&__pmic_glink_lock);
+ __pmic_glink = NULL;
+ mutex_unlock(&__pmic_glink_lock);
+
+ return 0;
+}
+
+static const struct of_device_id pmic_glink_of_match[] = {
+ { .compatible = "qcom,pmic-glink", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, pmic_glink_of_match);
+
+static struct platform_driver pmic_glink_driver = {
+ .probe = pmic_glink_probe,
+ .remove = pmic_glink_remove,
+ .driver = {
+ .name = "qcom_pmic_glink",
+ .of_match_table = pmic_glink_of_match,
+ },
+};
+
+static int pmic_glink_init(void)
+{
+ platform_driver_register(&pmic_glink_driver);
+ register_rpmsg_driver(&pmic_glink_rpmsg_driver);
+
+ return 0;
+};
+module_init(pmic_glink_init);
+
+static void pmic_glink_exit(void)
+{
+ unregister_rpmsg_driver(&pmic_glink_rpmsg_driver);
+ platform_driver_unregister(&pmic_glink_driver);
+};
+module_exit(pmic_glink_exit);
+
+MODULE_DESCRIPTION("Qualcomm PMIC GLINK driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
new file mode 100644
index 000000000000..4d7895bdeaf2
--- /dev/null
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -0,0 +1,478 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd
+ */
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/soc/qcom/pdr.h>
+#include <drm/drm_bridge.h>
+
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+
+#include <linux/soc/qcom/pmic_glink.h>
+
+#define PMIC_GLINK_MAX_PORTS 2
+
+#define USBC_SC8180X_NOTIFY_IND 0x13
+#define USBC_CMD_WRITE_REQ 0x15
+#define USBC_NOTIFY_IND 0x16
+
+#define ALTMODE_PAN_EN 0x10
+#define ALTMODE_PAN_ACK 0x11
+
+struct usbc_write_req {
+ struct pmic_glink_hdr hdr;
+ __le32 cmd;
+ __le32 arg;
+ __le32 reserved;
+};
+
+#define NOTIFY_PAYLOAD_SIZE 16
+struct usbc_notify {
+ struct pmic_glink_hdr hdr;
+ char payload[NOTIFY_PAYLOAD_SIZE];
+ u32 reserved;
+};
+
+struct usbc_sc8180x_notify {
+ struct pmic_glink_hdr hdr;
+ __le32 notification;
+ __le32 reserved[2];
+};
+
+enum pmic_glink_altmode_pin_assignment {
+ DPAM_HPD_OUT,
+ DPAM_HPD_A,
+ DPAM_HPD_B,
+ DPAM_HPD_C,
+ DPAM_HPD_D,
+ DPAM_HPD_E,
+ DPAM_HPD_F,
+};
+
+struct pmic_glink_altmode;
+
+#define work_to_altmode_port(w) container_of((w), struct pmic_glink_altmode_port, work)
+
+struct pmic_glink_altmode_port {
+ struct pmic_glink_altmode *altmode;
+ unsigned int index;
+
+ struct typec_switch *typec_switch;
+ struct typec_mux *typec_mux;
+ struct typec_mux_state state;
+ struct typec_altmode dp_alt;
+
+ struct work_struct work;
+
+ struct drm_bridge bridge;
+
+ enum typec_orientation orientation;
+ u16 svid;
+ u8 dp_data;
+ u8 mode;
+ u8 hpd_state;
+ u8 hpd_irq;
+};
+
+#define work_to_altmode(w) container_of((w), struct pmic_glink_altmode, enable_work)
+
+struct pmic_glink_altmode {
+ struct device *dev;
+
+ unsigned int owner_id;
+
+ /* To synchronize WRITE_REQ acks */
+ struct mutex lock;
+
+ struct completion pan_ack;
+ struct pmic_glink_client *client;
+
+ struct work_struct enable_work;
+
+ struct pmic_glink_altmode_port ports[PMIC_GLINK_MAX_PORTS];
+};
+
+static int pmic_glink_altmode_request(struct pmic_glink_altmode *altmode, u32 cmd, u32 arg)
+{
+ struct usbc_write_req req = {};
+ unsigned long left;
+ int ret;
+
+ /*
+ * The USBC_CMD_WRITE_REQ ack doesn't identify the request, so wait for
+ * one ack at a time.
+ */
+ mutex_lock(&altmode->lock);
+
+ req.hdr.owner = cpu_to_le32(altmode->owner_id);
+ req.hdr.type = cpu_to_le32(PMIC_GLINK_REQ_RESP);
+ req.hdr.opcode = cpu_to_le32(USBC_CMD_WRITE_REQ);
+ req.cmd = cpu_to_le32(cmd);
+ req.arg = cpu_to_le32(arg);
+
+ ret = pmic_glink_send(altmode->client, &req, sizeof(req));
+ if (ret) {
+ dev_err(altmode->dev, "failed to send altmode request: %#x (%d)\n", cmd, ret);
+ goto out_unlock;
+ }
+
+ left = wait_for_completion_timeout(&altmode->pan_ack, 5 * HZ);
+ if (!left) {
+ dev_err(altmode->dev, "timeout waiting for altmode request ack for: %#x\n", cmd);
+ ret = -ETIMEDOUT;
+ }
+
+out_unlock:
+ mutex_unlock(&altmode->lock);
+ return ret;
+}
+
+static void pmic_glink_altmode_enable_dp(struct pmic_glink_altmode *altmode,
+ struct pmic_glink_altmode_port *port,
+ u8 mode, bool hpd_state,
+ bool hpd_irq)
+{
+ struct typec_displayport_data dp_data = {};
+ int ret;
+
+ dp_data.status = DP_STATUS_ENABLED;
+ if (hpd_state)
+ dp_data.status |= DP_STATUS_HPD_STATE;
+ if (hpd_irq)
+ dp_data.status |= DP_STATUS_IRQ_HPD;
+ dp_data.conf = DP_CONF_SET_PIN_ASSIGN(mode);
+
+ port->state.alt = &port->dp_alt;
+ port->state.data = &dp_data;
+ port->state.mode = TYPEC_MODAL_STATE(mode);
+
+ ret = typec_mux_set(port->typec_mux, &port->state);
+ if (ret)
+ dev_err(altmode->dev, "failed to switch mux to DP\n");
+}
+
+static void pmic_glink_altmode_enable_usb(struct pmic_glink_altmode *altmode,
+ struct pmic_glink_altmode_port *port)
+{
+ int ret;
+
+ port->state.alt = NULL;
+ port->state.data = NULL;
+ port->state.mode = TYPEC_STATE_USB;
+
+ ret = typec_mux_set(port->typec_mux, &port->state);
+ if (ret)
+ dev_err(altmode->dev, "failed to switch mux to USB\n");
+}
+
+static void pmic_glink_altmode_worker(struct work_struct *work)
+{
+ struct pmic_glink_altmode_port *alt_port = work_to_altmode_port(work);
+ struct pmic_glink_altmode *altmode = alt_port->altmode;
+
+ typec_switch_set(alt_port->typec_switch, alt_port->orientation);
+
+ if (alt_port->svid == USB_TYPEC_DP_SID)
+ pmic_glink_altmode_enable_dp(altmode, alt_port, alt_port->mode,
+ alt_port->hpd_state, alt_port->hpd_irq);
+ else
+ pmic_glink_altmode_enable_usb(altmode, alt_port);
+
+ if (alt_port->hpd_state)
+ drm_bridge_hpd_notify(&alt_port->bridge, connector_status_connected);
+ else
+ drm_bridge_hpd_notify(&alt_port->bridge, connector_status_disconnected);
+
+ pmic_glink_altmode_request(altmode, ALTMODE_PAN_ACK, alt_port->index);
+};
+
+static enum typec_orientation pmic_glink_altmode_orientation(unsigned int orientation)
+{
+ if (orientation == 0)
+ return TYPEC_ORIENTATION_NORMAL;
+ else if (orientation == 1)
+ return TYPEC_ORIENTATION_REVERSE;
+ else
+ return TYPEC_ORIENTATION_NONE;
+}
+
+#define SC8180X_PORT_MASK 0x000000ff
+#define SC8180X_ORIENTATION_MASK 0x0000ff00
+#define SC8180X_MUX_MASK 0x00ff0000
+#define SC8180X_MODE_MASK 0x3f000000
+#define SC8180X_HPD_STATE_MASK 0x40000000
+#define SC8180X_HPD_IRQ_MASK 0x80000000
+
+static void pmic_glink_altmode_sc8180xp_notify(struct pmic_glink_altmode *altmode,
+ const void *data, size_t len)
+{
+ struct pmic_glink_altmode_port *alt_port;
+ const struct usbc_sc8180x_notify *msg;
+ u32 notification;
+ u8 orientation;
+ u8 hpd_state;
+ u8 hpd_irq;
+ u16 svid;
+ u8 port;
+ u8 mode;
+ u8 mux;
+
+ if (len != sizeof(*msg)) {
+ dev_warn(altmode->dev, "invalid length of USBC_NOTIFY indication: %zd\n", len);
+ return;
+ }
+
+ msg = data;
+ notification = le32_to_cpu(msg->notification);
+ port = FIELD_GET(SC8180X_PORT_MASK, notification);
+ orientation = FIELD_GET(SC8180X_ORIENTATION_MASK, notification);
+ mux = FIELD_GET(SC8180X_MUX_MASK, notification);
+ mode = FIELD_GET(SC8180X_MODE_MASK, notification);
+ hpd_state = FIELD_GET(SC8180X_HPD_STATE_MASK, notification);
+ hpd_irq = FIELD_GET(SC8180X_HPD_IRQ_MASK, notification);
+
+ svid = mux == 2 ? USB_TYPEC_DP_SID : 0;
+
+ if (!altmode->ports[port].altmode) {
+ dev_dbg(altmode->dev, "notification on undefined port %d\n", port);
+ return;
+ }
+
+ alt_port = &altmode->ports[port];
+ alt_port->orientation = pmic_glink_altmode_orientation(orientation);
+ alt_port->svid = svid;
+ alt_port->mode = mode;
+ alt_port->hpd_state = hpd_state;
+ alt_port->hpd_irq = hpd_irq;
+ schedule_work(&alt_port->work);
+}
+
+#define SC8280XP_DPAM_MASK 0x3f
+#define SC8280XP_HPD_STATE_MASK BIT(6)
+#define SC8280XP_HPD_IRQ_MASK BIT(7)
+
+static void pmic_glink_altmode_sc8280xp_notify(struct pmic_glink_altmode *altmode,
+ u16 svid, const void *data, size_t len)
+{
+ struct pmic_glink_altmode_port *alt_port;
+ const struct usbc_notify *notify;
+ u8 orientation;
+ u8 hpd_state;
+ u8 hpd_irq;
+ u8 mode;
+ u8 port;
+
+ if (len != sizeof(*notify)) {
+ dev_warn(altmode->dev, "invalid length USBC_NOTIFY_IND: %zd\n",
+ len);
+ return;
+ }
+
+ notify = data;
+
+ port = notify->payload[0];
+ orientation = notify->payload[1];
+ mode = FIELD_GET(SC8280XP_DPAM_MASK, notify->payload[8]) - DPAM_HPD_A;
+ hpd_state = FIELD_GET(SC8280XP_HPD_STATE_MASK, notify->payload[8]);
+ hpd_irq = FIELD_GET(SC8280XP_HPD_IRQ_MASK, notify->payload[8]);
+
+ if (!altmode->ports[port].altmode) {
+ dev_dbg(altmode->dev, "notification on undefined port %d\n", port);
+ return;
+ }
+
+ alt_port = &altmode->ports[port];
+ alt_port->orientation = pmic_glink_altmode_orientation(orientation);
+ alt_port->svid = svid;
+ alt_port->mode = mode;
+ alt_port->hpd_state = hpd_state;
+ alt_port->hpd_irq = hpd_irq;
+ schedule_work(&alt_port->work);
+}
+
+static void pmic_glink_altmode_callback(const void *data, size_t len, void *priv)
+{
+ struct pmic_glink_altmode *altmode = priv;
+ const struct pmic_glink_hdr *hdr = data;
+ u16 opcode;
+ u16 svid;
+
+ opcode = le32_to_cpu(hdr->opcode) & 0xff;
+ svid = le32_to_cpu(hdr->opcode) >> 16;
+
+ switch (opcode) {
+ case USBC_CMD_WRITE_REQ:
+ complete(&altmode->pan_ack);
+ break;
+ case USBC_NOTIFY_IND:
+ pmic_glink_altmode_sc8280xp_notify(altmode, svid, data, len);
+ break;
+ case USBC_SC8180X_NOTIFY_IND:
+ pmic_glink_altmode_sc8180xp_notify(altmode, data, len);
+ break;
+ }
+}
+
+static int pmic_glink_altmode_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
+}
+
+static const struct drm_bridge_funcs pmic_glink_altmode_bridge_funcs = {
+ .attach = pmic_glink_altmode_attach,
+};
+
+static void pmic_glink_altmode_put_mux(void *data)
+{
+ typec_mux_put(data);
+}
+
+static void pmic_glink_altmode_put_switch(void *data)
+{
+ typec_switch_put(data);
+}
+
+static void pmic_glink_altmode_enable_worker(struct work_struct *work)
+{
+ struct pmic_glink_altmode *altmode = work_to_altmode(work);
+ int ret;
+
+ ret = pmic_glink_altmode_request(altmode, ALTMODE_PAN_EN, 0);
+ if (ret)
+ dev_err(altmode->dev, "failed to request altmode notifications\n");
+}
+
+static void pmic_glink_altmode_pdr_notify(void *priv, int state)
+{
+ struct pmic_glink_altmode *altmode = priv;
+
+ if (state == SERVREG_SERVICE_STATE_UP)
+ schedule_work(&altmode->enable_work);
+}
+
+static const struct of_device_id pmic_glink_altmode_of_quirks[] = {
+ { .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)PMIC_GLINK_OWNER_USBC },
+ {}
+};
+
+static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct pmic_glink_altmode_port *alt_port;
+ struct pmic_glink_altmode *altmode;
+ struct typec_altmode_desc mux_desc = {};
+ const struct of_device_id *match;
+ struct fwnode_handle *fwnode;
+ struct device *dev = &adev->dev;
+ u32 port;
+ int ret;
+
+ altmode = devm_kzalloc(dev, sizeof(*altmode), GFP_KERNEL);
+ if (!altmode)
+ return -ENOMEM;
+
+ altmode->dev = dev;
+
+ match = of_match_device(pmic_glink_altmode_of_quirks, dev->parent);
+ if (match)
+ altmode->owner_id = (unsigned long)match->data;
+ else
+ altmode->owner_id = PMIC_GLINK_OWNER_USBC_PAN;
+
+ INIT_WORK(&altmode->enable_work, pmic_glink_altmode_enable_worker);
+ init_completion(&altmode->pan_ack);
+ mutex_init(&altmode->lock);
+
+ device_for_each_child_node(dev, fwnode) {
+ ret = fwnode_property_read_u32(fwnode, "reg", &port);
+ if (ret < 0) {
+ dev_err(dev, "missing reg property of %pOFn\n", fwnode);
+ return ret;
+ }
+
+ if (port >= ARRAY_SIZE(altmode->ports)) {
+ dev_warn(dev, "invalid connector number, ignoring\n");
+ continue;
+ }
+
+ if (altmode->ports[port].altmode) {
+ dev_err(dev, "multiple connector definition for port %u\n", port);
+ return -EINVAL;
+ }
+
+ alt_port = &altmode->ports[port];
+ alt_port->altmode = altmode;
+ alt_port->index = port;
+ INIT_WORK(&alt_port->work, pmic_glink_altmode_worker);
+
+ alt_port->bridge.funcs = &pmic_glink_altmode_bridge_funcs;
+ alt_port->bridge.of_node = to_of_node(fwnode);
+ alt_port->bridge.ops = DRM_BRIDGE_OP_HPD;
+ alt_port->bridge.type = DRM_MODE_CONNECTOR_USB;
+
+ ret = devm_drm_bridge_add(dev, &alt_port->bridge);
+ if (ret)
+ return ret;
+
+ alt_port->dp_alt.svid = USB_TYPEC_DP_SID;
+ alt_port->dp_alt.mode = USB_TYPEC_DP_MODE;
+ alt_port->dp_alt.active = 1;
+
+ mux_desc.svid = USB_TYPEC_DP_SID;
+ mux_desc.mode = USB_TYPEC_DP_MODE;
+ alt_port->typec_mux = fwnode_typec_mux_get(fwnode, &mux_desc);
+ if (IS_ERR(alt_port->typec_mux))
+ return dev_err_probe(dev, PTR_ERR(alt_port->typec_mux),
+ "failed to acquire mode-switch for port: %d\n",
+ port);
+
+ ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_mux,
+ alt_port->typec_mux);
+ if (ret)
+ return ret;
+
+ alt_port->typec_switch = fwnode_typec_switch_get(fwnode);
+ if (IS_ERR(alt_port->typec_switch))
+ return dev_err_probe(dev, PTR_ERR(alt_port->typec_switch),
+ "failed to acquire orientation-switch for port: %d\n",
+ port);
+
+ ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_switch,
+ alt_port->typec_switch);
+ if (ret)
+ return ret;
+ }
+
+ altmode->client = devm_pmic_glink_register_client(dev,
+ altmode->owner_id,
+ pmic_glink_altmode_callback,
+ pmic_glink_altmode_pdr_notify,
+ altmode);
+ return PTR_ERR_OR_ZERO(altmode->client);
+}
+
+static const struct auxiliary_device_id pmic_glink_altmode_id_table[] = {
+ { .name = "pmic_glink.altmode", },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, pmic_glink_altmode_id_table);
+
+static struct auxiliary_driver pmic_glink_altmode_driver = {
+ .name = "pmic_glink_altmode",
+ .probe = pmic_glink_altmode_probe,
+ .id_table = pmic_glink_altmode_id_table,
+};
+
+module_auxiliary_driver(pmic_glink_altmode_driver);
+
+MODULE_DESCRIPTION("Qualcomm PMIC GLINK Altmode driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/qcom_stats.c b/drivers/soc/qcom/qcom_stats.c
index 6228af057120..c207bb96c523 100644
--- a/drivers/soc/qcom/qcom_stats.c
+++ b/drivers/soc/qcom/qcom_stats.c
@@ -92,7 +92,7 @@ static int qcom_subsystem_sleep_stats_show(struct seq_file *s, void *unused)
/* Items are allocated lazily, so lookup pointer each time */
stat = qcom_smem_get(subsystem->pid, subsystem->smem_item, NULL);
if (IS_ERR(stat))
- return -EIO;
+ return 0;
qcom_print_stats(s, stat);
@@ -170,20 +170,14 @@ static void qcom_create_soc_sleep_stat_files(struct dentry *root, void __iomem *
static void qcom_create_subsystem_stat_files(struct dentry *root,
const struct stats_config *config)
{
- const struct sleep_stats *stat;
int i;
if (!config->subsystem_stats_in_smem)
return;
- for (i = 0; i < ARRAY_SIZE(subsystems); i++) {
- stat = qcom_smem_get(subsystems[i].pid, subsystems[i].smem_item, NULL);
- if (IS_ERR(stat))
- continue;
-
+ for (i = 0; i < ARRAY_SIZE(subsystems); i++)
debugfs_create_file(subsystems[i].name, 0400, root, (void *)&subsystems[i],
&qcom_subsystem_sleep_stats_fops);
- }
}
static int qcom_stats_probe(struct platform_device *pdev)
diff --git a/drivers/soc/qcom/ramp_controller.c b/drivers/soc/qcom/ramp_controller.c
new file mode 100644
index 000000000000..dc74d2a19de2
--- /dev/null
+++ b/drivers/soc/qcom/ramp_controller.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm Ramp Controller driver
+ * Copyright (c) 2022, AngeloGioacchino Del Regno
+ * <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#define RC_UPDATE_EN BIT(0)
+#define RC_ROOT_EN BIT(1)
+
+#define RC_REG_CFG_UPDATE 0x60
+#define RC_CFG_UPDATE_EN BIT(8)
+#define RC_CFG_ACK GENMASK(31, 16)
+
+#define RC_DCVS_CFG_SID 2
+#define RC_LINK_SID 3
+#define RC_LMH_SID 6
+#define RC_DFS_SID 14
+
+#define RC_UPDATE_TIMEOUT_US 500
+
+/**
+ * struct qcom_ramp_controller_desc - SoC specific parameters
+ * @cfg_dfs_sid: Dynamic Frequency Scaling SID configuration
+ * @cfg_link_sid: Link SID configuration
+ * @cfg_lmh_sid: Limits Management hardware SID configuration
+ * @cfg_ramp_en: Ramp Controller enable sequence
+ * @cfg_ramp_dis: Ramp Controller disable sequence
+ * @cmd_reg: Command register offset
+ * @num_dfs_sids: Number of DFS SIDs (max 8)
+ * @num_link_sids: Number of Link SIDs (max 3)
+ * @num_lmh_sids: Number of LMh SIDs (max 8)
+ * @num_ramp_en: Number of entries in enable sequence
+ * @num_ramp_dis: Number of entries in disable sequence
+ */
+struct qcom_ramp_controller_desc {
+ const struct reg_sequence *cfg_dfs_sid;
+ const struct reg_sequence *cfg_link_sid;
+ const struct reg_sequence *cfg_lmh_sid;
+ const struct reg_sequence *cfg_ramp_en;
+ const struct reg_sequence *cfg_ramp_dis;
+ u8 cmd_reg;
+ u8 num_dfs_sids;
+ u8 num_link_sids;
+ u8 num_lmh_sids;
+ u8 num_ramp_en;
+ u8 num_ramp_dis;
+};
+
+/**
+ * struct qcom_ramp_controller - Main driver structure
+ * @regmap: Regmap handle
+ * @desc: SoC specific parameters
+ */
+struct qcom_ramp_controller {
+ struct regmap *regmap;
+ const struct qcom_ramp_controller_desc *desc;
+};
+
+/**
+ * rc_wait_for_update() - Wait for Ramp Controller root update
+ * @qrc: Main driver structure
+ *
+ * Return: Zero for success or negative number for failure
+ */
+static int rc_wait_for_update(struct qcom_ramp_controller *qrc)
+{
+ const struct qcom_ramp_controller_desc *d = qrc->desc;
+ struct regmap *r = qrc->regmap;
+ u32 val;
+ int ret;
+
+ ret = regmap_set_bits(r, d->cmd_reg, RC_ROOT_EN);
+ if (ret)
+ return ret;
+
+ return regmap_read_poll_timeout(r, d->cmd_reg, val, !(val & RC_UPDATE_EN),
+ 1, RC_UPDATE_TIMEOUT_US);
+}
+
+/**
+ * rc_set_cfg_update() - Ramp Controller configuration update
+ * @qrc: Main driver structure
+ * @ce: Configuration entry to update
+ *
+ * Return: Zero for success or negative number for failure
+ */
+static int rc_set_cfg_update(struct qcom_ramp_controller *qrc, u8 ce)
+{
+ const struct qcom_ramp_controller_desc *d = qrc->desc;
+ struct regmap *r = qrc->regmap;
+ u32 ack, val;
+ int ret;
+
+ /* The ack bit is between bits 16-31 of RC_REG_CFG_UPDATE */
+ ack = FIELD_PREP(RC_CFG_ACK, BIT(ce));
+
+ /* Write the configuration type first... */
+ ret = regmap_set_bits(r, d->cmd_reg + RC_REG_CFG_UPDATE, ce);
+ if (ret)
+ return ret;
+
+ /* ...and after that, enable the update bit to sync the changes */
+ ret = regmap_set_bits(r, d->cmd_reg + RC_REG_CFG_UPDATE, RC_CFG_UPDATE_EN);
+ if (ret)
+ return ret;
+
+ /* Wait for the changes to go through */
+ ret = regmap_read_poll_timeout(r, d->cmd_reg + RC_REG_CFG_UPDATE, val,
+ val & ack, 1, RC_UPDATE_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ /*
+ * Configuration update success! The CFG_UPDATE register will not be
+ * cleared automatically upon applying the configuration, so we have
+ * to do that manually in order to leave the ramp controller in a
+ * predictable and clean state.
+ */
+ ret = regmap_write(r, d->cmd_reg + RC_REG_CFG_UPDATE, 0);
+ if (ret)
+ return ret;
+
+ /* Wait for the update bit cleared ack */
+ return regmap_read_poll_timeout(r, d->cmd_reg + RC_REG_CFG_UPDATE,
+ val, !(val & RC_CFG_ACK), 1,
+ RC_UPDATE_TIMEOUT_US);
+}
+
+/**
+ * rc_write_cfg - Send configuration sequence
+ * @qrc: Main driver structure
+ * @seq: Register sequence to send before asking for update
+ * @ce: Configuration SID
+ * @nsids: Total number of SIDs
+ *
+ * Returns: Zero for success or negative number for error
+ */
+static int rc_write_cfg(struct qcom_ramp_controller *qrc,
+ const struct reg_sequence *seq,
+ u16 ce, u8 nsids)
+{
+ int ret;
+ u8 i;
+
+ /* Check if, and wait until the ramp controller is ready */
+ ret = rc_wait_for_update(qrc);
+ if (ret)
+ return ret;
+
+ /* Write the sequence */
+ ret = regmap_multi_reg_write(qrc->regmap, seq, nsids);
+ if (ret)
+ return ret;
+
+ /* Pull the trigger: do config update starting from the last sid */
+ for (i = 0; i < nsids; i++) {
+ ret = rc_set_cfg_update(qrc, (u8)ce - i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * rc_ramp_ctrl_enable() - Enable Ramp up/down Control
+ * @qrc: Main driver structure
+ *
+ * Return: Zero for success or negative number for error
+ */
+static int rc_ramp_ctrl_enable(struct qcom_ramp_controller *qrc)
+{
+ const struct qcom_ramp_controller_desc *d = qrc->desc;
+ int i, ret;
+
+ for (i = 0; i < d->num_ramp_en; i++) {
+ ret = rc_write_cfg(qrc, &d->cfg_ramp_en[i], RC_DCVS_CFG_SID, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * qcom_ramp_controller_start() - Initialize and start the ramp controller
+ * @qrc: Main driver structure
+ *
+ * The Ramp Controller needs to be initialized by programming the relevant
+ * registers with SoC-specific configuration: once programming is done,
+ * the hardware will take care of the rest (no further handling required).
+ *
+ * Return: Zero for success or negative number for error
+ */
+static int qcom_ramp_controller_start(struct qcom_ramp_controller *qrc)
+{
+ const struct qcom_ramp_controller_desc *d = qrc->desc;
+ int ret;
+
+ /* Program LMH, DFS, Link SIDs */
+ ret = rc_write_cfg(qrc, d->cfg_lmh_sid, RC_LMH_SID, d->num_lmh_sids);
+ if (ret)
+ return ret;
+
+ ret = rc_write_cfg(qrc, d->cfg_dfs_sid, RC_DFS_SID, d->num_dfs_sids);
+ if (ret)
+ return ret;
+
+ ret = rc_write_cfg(qrc, d->cfg_link_sid, RC_LINK_SID, d->num_link_sids);
+ if (ret)
+ return ret;
+
+ /* Everything is ready! Enable the ramp up/down control */
+ return rc_ramp_ctrl_enable(qrc);
+}
+
+static const struct regmap_config qrc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x68,
+ .fast_io = true,
+};
+
+static const struct reg_sequence msm8976_cfg_dfs_sid[] = {
+ { 0x10, 0xfefebff7 },
+ { 0x14, 0xfdff7fef },
+ { 0x18, 0xfbffdefb },
+ { 0x1c, 0xb69b5555 },
+ { 0x20, 0x24929249 },
+ { 0x24, 0x49241112 },
+ { 0x28, 0x11112111 },
+ { 0x2c, 0x8102 }
+};
+
+static const struct reg_sequence msm8976_cfg_link_sid[] = {
+ { 0x40, 0xfc987 }
+};
+
+static const struct reg_sequence msm8976_cfg_lmh_sid[] = {
+ { 0x30, 0x77706db },
+ { 0x34, 0x5550249 },
+ { 0x38, 0x111 }
+};
+
+static const struct reg_sequence msm8976_cfg_ramp_en[] = {
+ { 0x50, 0x800 }, /* pre_en */
+ { 0x50, 0xc00 }, /* en */
+ { 0x50, 0x400 } /* post_en */
+};
+
+static const struct reg_sequence msm8976_cfg_ramp_dis[] = {
+ { 0x50, 0x0 }
+};
+
+static const struct qcom_ramp_controller_desc msm8976_rc_cfg = {
+ .cfg_dfs_sid = msm8976_cfg_dfs_sid,
+ .num_dfs_sids = ARRAY_SIZE(msm8976_cfg_dfs_sid),
+
+ .cfg_link_sid = msm8976_cfg_link_sid,
+ .num_link_sids = ARRAY_SIZE(msm8976_cfg_link_sid),
+
+ .cfg_lmh_sid = msm8976_cfg_lmh_sid,
+ .num_lmh_sids = ARRAY_SIZE(msm8976_cfg_lmh_sid),
+
+ .cfg_ramp_en = msm8976_cfg_ramp_en,
+ .num_ramp_en = ARRAY_SIZE(msm8976_cfg_ramp_en),
+
+ .cfg_ramp_dis = msm8976_cfg_ramp_dis,
+ .num_ramp_dis = ARRAY_SIZE(msm8976_cfg_ramp_dis),
+
+ .cmd_reg = 0x0,
+};
+
+static int qcom_ramp_controller_probe(struct platform_device *pdev)
+{
+ struct qcom_ramp_controller *qrc;
+ void __iomem *base;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ qrc = devm_kmalloc(&pdev->dev, sizeof(*qrc), GFP_KERNEL);
+ if (!qrc)
+ return -ENOMEM;
+
+ qrc->desc = device_get_match_data(&pdev->dev);
+ if (!qrc)
+ return -EINVAL;
+
+ qrc->regmap = devm_regmap_init_mmio(&pdev->dev, base, &qrc_regmap_config);
+ if (IS_ERR(qrc->regmap))
+ return PTR_ERR(qrc->regmap);
+
+ platform_set_drvdata(pdev, qrc);
+
+ return qcom_ramp_controller_start(qrc);
+}
+
+static int qcom_ramp_controller_remove(struct platform_device *pdev)
+{
+ struct qcom_ramp_controller *qrc = platform_get_drvdata(pdev);
+
+ return rc_write_cfg(qrc, qrc->desc->cfg_ramp_dis,
+ RC_DCVS_CFG_SID, qrc->desc->num_ramp_dis);
+}
+
+static const struct of_device_id qcom_ramp_controller_match_table[] = {
+ { .compatible = "qcom,msm8976-ramp-controller", .data = &msm8976_rc_cfg },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, qcom_ramp_controller_match_table);
+
+static struct platform_driver qcom_ramp_controller_driver = {
+ .driver = {
+ .name = "qcom-ramp-controller",
+ .of_match_table = qcom_ramp_controller_match_table,
+ .suppress_bind_attrs = true,
+ },
+ .probe = qcom_ramp_controller_probe,
+ .remove = qcom_ramp_controller_remove,
+};
+
+static int __init qcom_ramp_controller_init(void)
+{
+ return platform_driver_register(&qcom_ramp_controller_driver);
+}
+arch_initcall(qcom_ramp_controller_init);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_DESCRIPTION("Qualcomm Ramp Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
index 0feaae357821..2d3ee22b9249 100644
--- a/drivers/soc/qcom/rmtfs_mem.c
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -14,9 +14,10 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/io.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#define QCOM_RMTFS_MEM_DEV_MAX (MINORMASK + 1)
+#define NUM_MAX_VMIDS 2
static dev_t qcom_rmtfs_mem_major;
@@ -171,12 +172,12 @@ static void qcom_rmtfs_mem_release_device(struct device *dev)
static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
- struct qcom_scm_vmperm perms[2];
+ struct qcom_scm_vmperm perms[NUM_MAX_VMIDS + 1];
struct reserved_mem *rmem;
struct qcom_rmtfs_mem *rmtfs_mem;
u32 client_id;
- u32 vmid;
- int ret;
+ u32 num_vmids, vmid[NUM_MAX_VMIDS];
+ int ret, i;
rmem = of_reserved_mem_lookup(node);
if (!rmem) {
@@ -226,7 +227,18 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
goto put_device;
}
- ret = of_property_read_u32(node, "qcom,vmid", &vmid);
+ num_vmids = of_property_count_u32_elems(node, "qcom,vmid");
+ if (num_vmids < 0) {
+ dev_err(&pdev->dev, "failed to count qcom,vmid elements: %d\n", ret);
+ goto remove_cdev;
+ } else if (num_vmids > NUM_MAX_VMIDS) {
+ dev_warn(&pdev->dev,
+ "too many VMIDs (%d) specified! Only mapping first %d entries\n",
+ num_vmids, NUM_MAX_VMIDS);
+ num_vmids = NUM_MAX_VMIDS;
+ }
+
+ ret = of_property_read_u32_array(node, "qcom,vmid", vmid, num_vmids);
if (ret < 0 && ret != -EINVAL) {
dev_err(&pdev->dev, "failed to parse qcom,vmid\n");
goto remove_cdev;
@@ -238,12 +250,15 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
perms[0].vmid = QCOM_SCM_VMID_HLOS;
perms[0].perm = QCOM_SCM_PERM_RW;
- perms[1].vmid = vmid;
- perms[1].perm = QCOM_SCM_PERM_RW;
+
+ for (i = 0; i < num_vmids; i++) {
+ perms[i + 1].vmid = vmid[i];
+ perms[i + 1].perm = QCOM_SCM_PERM_RW;
+ }
rmtfs_mem->perms = BIT(QCOM_SCM_VMID_HLOS);
ret = qcom_scm_assign_mem(rmtfs_mem->addr, rmtfs_mem->size,
- &rmtfs_mem->perms, perms, 2);
+ &rmtfs_mem->perms, perms, num_vmids + 1);
if (ret < 0) {
dev_err(&pdev->dev, "assign memory failed\n");
goto remove_cdev;
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index 4c2d2c296790..f20e2a49a669 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -187,6 +187,16 @@ static struct rpmhpd nsp = {
.res_name = "nsp.lvl",
};
+static struct rpmhpd nsp0 = {
+ .pd = { .name = "nsp0", },
+ .res_name = "nsp0.lvl",
+};
+
+static struct rpmhpd nsp1 = {
+ .pd = { .name = "nsp1", },
+ .res_name = "nsp1.lvl",
+};
+
static struct rpmhpd qphy = {
.pd = { .name = "qphy", },
.res_name = "qphy.lvl",
@@ -212,6 +222,29 @@ static const struct rpmhpd_desc sa8540p_desc = {
.num_pds = ARRAY_SIZE(sa8540p_rpmhpds),
};
+/* SA8775P RPMH power domains */
+static struct rpmhpd *sa8775p_rpmhpds[] = {
+ [SA8775P_CX] = &cx,
+ [SA8775P_CX_AO] = &cx_ao,
+ [SA8775P_EBI] = &ebi,
+ [SA8775P_GFX] = &gfx,
+ [SA8775P_LCX] = &lcx,
+ [SA8775P_LMX] = &lmx,
+ [SA8775P_MMCX] = &mmcx,
+ [SA8775P_MMCX_AO] = &mmcx_ao,
+ [SA8775P_MXC] = &mxc,
+ [SA8775P_MXC_AO] = &mxc_ao,
+ [SA8775P_MX] = &mx,
+ [SA8775P_MX_AO] = &mx_ao,
+ [SA8775P_NSP0] = &nsp0,
+ [SA8775P_NSP1] = &nsp1,
+};
+
+static const struct rpmhpd_desc sa8775p_desc = {
+ .rpmhpds = sa8775p_rpmhpds,
+ .num_pds = ARRAY_SIZE(sa8775p_rpmhpds),
+};
+
/* SDM670 RPMH powerdomains */
static struct rpmhpd *sdm670_rpmhpds[] = {
[SDM670_CX] = &cx_w_mx_parent,
@@ -487,6 +520,7 @@ static const struct rpmhpd_desc sc8280xp_desc = {
static const struct of_device_id rpmhpd_match_table[] = {
{ .compatible = "qcom,qdu1000-rpmhpd", .data = &qdu1000_desc },
{ .compatible = "qcom,sa8540p-rpmhpd", .data = &sa8540p_desc },
+ { .compatible = "qcom,sa8775p-rpmhpd", .data = &sa8775p_desc },
{ .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc },
{ .compatible = "qcom,sc7280-rpmhpd", .data = &sc7280_desc },
{ .compatible = "qcom,sc8180x-rpmhpd", .data = &sc8180x_desc },
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index f0db6a10cf4e..337b1ad1cd3b 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -471,23 +471,6 @@ static const struct rpmpd_desc qcm2290_desc = {
.max_state = RPM_SMD_LEVEL_TURBO_NO_CPR,
};
-static struct rpmpd *sm4250_rpmpds[] = {
- [SM4250_VDDCX] = &sm6115_vddcx,
- [SM4250_VDDCX_AO] = &sm6115_vddcx_ao,
- [SM4250_VDDCX_VFL] = &sm6115_vddcx_vfl,
- [SM4250_VDDMX] = &sm6115_vddmx,
- [SM4250_VDDMX_AO] = &sm6115_vddmx_ao,
- [SM4250_VDDMX_VFL] = &sm6115_vddmx_vfl,
- [SM4250_VDD_LPI_CX] = &sm6115_vdd_lpi_cx,
- [SM4250_VDD_LPI_MX] = &sm6115_vdd_lpi_mx,
-};
-
-static const struct rpmpd_desc sm4250_desc = {
- .rpmpds = sm4250_rpmpds,
- .num_pds = ARRAY_SIZE(sm4250_rpmpds),
- .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR,
-};
-
static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc },
{ .compatible = "qcom,msm8226-rpmpd", .data = &msm8226_desc },
@@ -502,7 +485,6 @@ static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,qcm2290-rpmpd", .data = &qcm2290_desc },
{ .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc },
{ .compatible = "qcom,sdm660-rpmpd", .data = &sdm660_desc },
- { .compatible = "qcom,sm4250-rpmpd", .data = &sm4250_desc },
{ .compatible = "qcom,sm6115-rpmpd", .data = &sm6115_desc },
{ .compatible = "qcom,sm6125-rpmpd", .data = &sm6125_desc },
{ .compatible = "qcom,sm6375-rpmpd", .data = &sm6375_desc },
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index 7e3b6a7ea34c..523627d5d398 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -233,6 +233,7 @@ static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev)
static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-apq8084" },
{ .compatible = "qcom,rpm-ipq6018" },
+ { .compatible = "qcom,rpm-ipq9574" },
{ .compatible = "qcom,rpm-msm8226" },
{ .compatible = "qcom,rpm-msm8909" },
{ .compatible = "qcom,rpm-msm8916" },
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index ebcbf9b9c18b..e9012ca1a87b 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -169,6 +169,13 @@ struct socinfo {
__le32 ndefective_parts_array_offset;
/* Version 15 */
__le32 nmodem_supported;
+ /* Version 16 */
+ __le32 feature_code;
+ __le32 pcode;
+ __le32 npartnamemap_offset;
+ __le32 nnum_partname_mapping;
+ /* Version 17 */
+ __le32 oem_variant;
};
#ifdef CONFIG_DEBUG_FS
@@ -189,6 +196,9 @@ struct socinfo_params {
u32 num_defective_parts;
u32 ndefective_parts_array_offset;
u32 nmodem_supported;
+ u32 feature_code;
+ u32 pcode;
+ u32 oem_variant;
};
struct smem_image_version {
@@ -214,44 +224,72 @@ struct soc_id {
};
static const struct soc_id soc_id[] = {
+ { qcom_board_id(MSM8260) },
+ { qcom_board_id(MSM8660) },
+ { qcom_board_id(APQ8060) },
{ qcom_board_id(MSM8960) },
{ qcom_board_id(APQ8064) },
+ { qcom_board_id(MSM8930) },
+ { qcom_board_id(MSM8630) },
+ { qcom_board_id(MSM8230) },
+ { qcom_board_id(APQ8030) },
+ { qcom_board_id(MSM8627) },
+ { qcom_board_id(MSM8227) },
{ qcom_board_id(MSM8660A) },
{ qcom_board_id(MSM8260A) },
{ qcom_board_id(APQ8060A) },
{ qcom_board_id(MSM8974) },
+ { qcom_board_id(MSM8225) },
+ { qcom_board_id(MSM8625) },
{ qcom_board_id(MPQ8064) },
{ qcom_board_id(MSM8960AB) },
{ qcom_board_id(APQ8060AB) },
{ qcom_board_id(MSM8260AB) },
{ qcom_board_id(MSM8660AB) },
+ { qcom_board_id(MSM8930AA) },
+ { qcom_board_id(MSM8630AA) },
+ { qcom_board_id(MSM8230AA) },
{ qcom_board_id(MSM8626) },
{ qcom_board_id(MSM8610) },
{ qcom_board_id(APQ8064AB) },
+ { qcom_board_id(MSM8930AB) },
+ { qcom_board_id(MSM8630AB) },
+ { qcom_board_id(MSM8230AB) },
+ { qcom_board_id(APQ8030AB) },
{ qcom_board_id(MSM8226) },
{ qcom_board_id(MSM8526) },
+ { qcom_board_id(APQ8030AA) },
{ qcom_board_id(MSM8110) },
{ qcom_board_id(MSM8210) },
{ qcom_board_id(MSM8810) },
{ qcom_board_id(MSM8212) },
{ qcom_board_id(MSM8612) },
{ qcom_board_id(MSM8112) },
+ { qcom_board_id(MSM8125) },
{ qcom_board_id(MSM8225Q) },
{ qcom_board_id(MSM8625Q) },
{ qcom_board_id(MSM8125Q) },
{ qcom_board_id(APQ8064AA) },
{ qcom_board_id(APQ8084) },
+ { qcom_board_id(MSM8130) },
+ { qcom_board_id(MSM8130AA) },
+ { qcom_board_id(MSM8130AB) },
+ { qcom_board_id(MSM8627AA) },
+ { qcom_board_id(MSM8227AA) },
{ qcom_board_id(APQ8074) },
{ qcom_board_id(MSM8274) },
{ qcom_board_id(MSM8674) },
+ { qcom_board_id(MDM9635) },
{ qcom_board_id_named(MSM8974PRO_AC, "MSM8974PRO-AC") },
{ qcom_board_id(MSM8126) },
{ qcom_board_id(APQ8026) },
{ qcom_board_id(MSM8926) },
+ { qcom_board_id(IPQ8062) },
+ { qcom_board_id(IPQ8064) },
+ { qcom_board_id(IPQ8066) },
+ { qcom_board_id(IPQ8068) },
{ qcom_board_id(MSM8326) },
{ qcom_board_id(MSM8916) },
- { qcom_board_id(MSM8956) },
- { qcom_board_id(MSM8976) },
{ qcom_board_id(MSM8994) },
{ qcom_board_id_named(APQ8074PRO_AA, "APQ8074PRO-AA") },
{ qcom_board_id_named(APQ8074PRO_AB, "APQ8074PRO-AB") },
@@ -273,32 +311,74 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(MSM8510) },
{ qcom_board_id(MSM8512) },
{ qcom_board_id(MSM8936) },
+ { qcom_board_id(MDM9640) },
{ qcom_board_id(MSM8939) },
{ qcom_board_id(APQ8036) },
{ qcom_board_id(APQ8039) },
+ { qcom_board_id(MSM8236) },
+ { qcom_board_id(MSM8636) },
+ { qcom_board_id(MSM8909) },
{ qcom_board_id(MSM8996) },
{ qcom_board_id(APQ8016) },
{ qcom_board_id(MSM8216) },
{ qcom_board_id(MSM8116) },
{ qcom_board_id(MSM8616) },
{ qcom_board_id(MSM8992) },
+ { qcom_board_id(APQ8092) },
{ qcom_board_id(APQ8094) },
+ { qcom_board_id(MSM8209) },
+ { qcom_board_id(MSM8208) },
+ { qcom_board_id(MDM9209) },
+ { qcom_board_id(MDM9309) },
+ { qcom_board_id(MDM9609) },
+ { qcom_board_id(MSM8239) },
+ { qcom_board_id(MSM8952) },
+ { qcom_board_id(APQ8009) },
+ { qcom_board_id(MSM8956) },
+ { qcom_board_id(MSM8929) },
+ { qcom_board_id(MSM8629) },
+ { qcom_board_id(MSM8229) },
+ { qcom_board_id(APQ8029) },
+ { qcom_board_id(APQ8056) },
+ { qcom_board_id(MSM8609) },
+ { qcom_board_id(APQ8076) },
+ { qcom_board_id(MSM8976) },
+ { qcom_board_id(IPQ8065) },
+ { qcom_board_id(IPQ8069) },
+ { qcom_board_id(MDM9650) },
+ { qcom_board_id(MDM9655) },
+ { qcom_board_id(MDM9250) },
+ { qcom_board_id(MDM9255) },
+ { qcom_board_id(MDM9350) },
+ { qcom_board_id(APQ8052) },
{ qcom_board_id(MDM9607) },
{ qcom_board_id(APQ8096) },
{ qcom_board_id(MSM8998) },
{ qcom_board_id(MSM8953) },
+ { qcom_board_id(MSM8937) },
+ { qcom_board_id(APQ8037) },
{ qcom_board_id(MDM8207) },
{ qcom_board_id(MDM9207) },
{ qcom_board_id(MDM9307) },
{ qcom_board_id(MDM9628) },
+ { qcom_board_id(MSM8909W) },
+ { qcom_board_id(APQ8009W) },
+ { qcom_board_id(MSM8996L) },
+ { qcom_board_id(MSM8917) },
{ qcom_board_id(APQ8053) },
{ qcom_board_id(MSM8996SG) },
+ { qcom_board_id(APQ8017) },
+ { qcom_board_id(MSM8217) },
+ { qcom_board_id(MSM8617) },
{ qcom_board_id(MSM8996AU) },
{ qcom_board_id(APQ8096AU) },
{ qcom_board_id(APQ8096SG) },
+ { qcom_board_id(MSM8940) },
+ { qcom_board_id(SDX201) },
{ qcom_board_id(SDM660) },
{ qcom_board_id(SDM630) },
{ qcom_board_id(APQ8098) },
+ { qcom_board_id(MSM8920) },
{ qcom_board_id(SDM845) },
{ qcom_board_id(MDM9206) },
{ qcom_board_id(IPQ8074) },
@@ -306,6 +386,8 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(SDM658) },
{ qcom_board_id(SDA658) },
{ qcom_board_id(SDA630) },
+ { qcom_board_id(MSM8905) },
+ { qcom_board_id(SDX202) },
{ qcom_board_id(SDM450) },
{ qcom_board_id(SM8150) },
{ qcom_board_id(SDA845) },
@@ -317,10 +399,15 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(SDM632) },
{ qcom_board_id(SDA632) },
{ qcom_board_id(SDA450) },
+ { qcom_board_id(SDM439) },
+ { qcom_board_id(SDM429) },
{ qcom_board_id(SM8250) },
{ qcom_board_id(SA8155) },
+ { qcom_board_id(SDA439) },
+ { qcom_board_id(SDA429) },
{ qcom_board_id(IPQ8070) },
{ qcom_board_id(IPQ8071) },
+ { qcom_board_id(QM215) },
{ qcom_board_id(IPQ8072A) },
{ qcom_board_id(IPQ8074A) },
{ qcom_board_id(IPQ8076A) },
@@ -330,18 +417,20 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(IPQ8071A) },
{ qcom_board_id(IPQ6018) },
{ qcom_board_id(IPQ6028) },
+ { qcom_board_id(SDM429W) },
{ qcom_board_id(SM4250) },
{ qcom_board_id(IPQ6000) },
{ qcom_board_id(IPQ6010) },
{ qcom_board_id(SC7180) },
{ qcom_board_id(SM6350) },
+ { qcom_board_id(QCM2150) },
+ { qcom_board_id(SDA429W) },
{ qcom_board_id(SM8350) },
{ qcom_board_id(SM6115) },
{ qcom_board_id(SC8280XP) },
{ qcom_board_id(IPQ6005) },
{ qcom_board_id(QRB5165) },
{ qcom_board_id(SM8450) },
- { qcom_board_id(SM8550) },
{ qcom_board_id(SM7225) },
{ qcom_board_id(SA8295P) },
{ qcom_board_id(SA8540P) },
@@ -352,12 +441,15 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(SC7280) },
{ qcom_board_id(SC7180P) },
{ qcom_board_id(SM6375) },
+ { qcom_board_id(SM8550) },
{ qcom_board_id(QRU1000) },
{ qcom_board_id(QDU1000) },
{ qcom_board_id(QDU1010) },
{ qcom_board_id(QRU1032) },
{ qcom_board_id(QRU1052) },
{ qcom_board_id(QRU1062) },
+ { qcom_board_id(IPQ5332) },
+ { qcom_board_id(IPQ5322) },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
@@ -512,6 +604,20 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
&qcom_socinfo->info.fmt);
switch (qcom_socinfo->info.fmt) {
+ case SOCINFO_VERSION(0, 17):
+ qcom_socinfo->info.oem_variant = __le32_to_cpu(info->oem_variant);
+ debugfs_create_u32("oem_variant", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.oem_variant);
+ fallthrough;
+ case SOCINFO_VERSION(0, 16):
+ qcom_socinfo->info.feature_code = __le32_to_cpu(info->feature_code);
+ qcom_socinfo->info.pcode = __le32_to_cpu(info->pcode);
+
+ debugfs_create_u32("feature_code", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.feature_code);
+ debugfs_create_u32("pcode", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.pcode);
+ fallthrough;
case SOCINFO_VERSION(0, 15):
qcom_socinfo->info.nmodem_supported = __le32_to_cpu(info->nmodem_supported);
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 660498252ec5..4e8b51ba2266 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -330,6 +330,7 @@ config ARCH_R9A09G011
bool "ARM64 Platform support for RZ/V2M"
select PM
select PM_GENERIC_DOMAINS
+ select PWC_RZV2M
help
This enables support for the Renesas RZ/V2M SoC.
@@ -345,6 +346,9 @@ config ARCH_R9A07G043
endif # RISCV
+config PWC_RZV2M
+ bool "Renesas RZ/V2M PWC support" if COMPILE_TEST
+
config RST_RCAR
bool "Reset Controller support for R-Car" if COMPILE_TEST
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 535868c9c7e4..6e4e77b0afff 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o
endif
# Family
+obj-$(CONFIG_PWC_RZV2M) += pwc-rzv2m.o
obj-$(CONFIG_RST_RCAR) += rcar-rst.o
obj-$(CONFIG_SYSC_RCAR) += rcar-sysc.o
obj-$(CONFIG_SYSC_RCAR_GEN4) += rcar-gen4-sysc.o
diff --git a/drivers/soc/renesas/pwc-rzv2m.c b/drivers/soc/renesas/pwc-rzv2m.c
new file mode 100644
index 000000000000..c83bdbdabb64
--- /dev/null
+++ b/drivers/soc/renesas/pwc-rzv2m.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+
+#define PWC_PWCRST 0x00
+#define PWC_PWCCKEN 0x04
+#define PWC_PWCCTL 0x50
+#define PWC_GPIO 0x80
+
+#define PWC_PWCRST_RSTSOFTAX 0x1
+#define PWC_PWCCKEN_ENGCKMAIN 0x1
+#define PWC_PWCCTL_PWOFF 0x1
+
+struct rzv2m_pwc_priv {
+ void __iomem *base;
+ struct device *dev;
+ struct gpio_chip gp;
+ DECLARE_BITMAP(ch_en_bits, 2);
+};
+
+static void rzv2m_pwc_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct rzv2m_pwc_priv *priv = gpiochip_get_data(chip);
+ u32 reg;
+
+ /* BIT 16 enables write to BIT 0, and BIT 17 enables write to BIT 1 */
+ reg = BIT(offset + 16);
+ if (value)
+ reg |= BIT(offset);
+
+ writel(reg, priv->base + PWC_GPIO);
+
+ assign_bit(offset, priv->ch_en_bits, value);
+}
+
+static int rzv2m_pwc_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzv2m_pwc_priv *priv = gpiochip_get_data(chip);
+
+ return test_bit(offset, priv->ch_en_bits);
+}
+
+static int rzv2m_pwc_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int nr, int value)
+{
+ if (nr > 1)
+ return -EINVAL;
+
+ rzv2m_pwc_gpio_set(gc, nr, value);
+
+ return 0;
+}
+
+static const struct gpio_chip rzv2m_pwc_gc = {
+ .label = "gpio_rzv2m_pwc",
+ .owner = THIS_MODULE,
+ .get = rzv2m_pwc_gpio_get,
+ .set = rzv2m_pwc_gpio_set,
+ .direction_output = rzv2m_pwc_gpio_direction_output,
+ .can_sleep = false,
+ .ngpio = 2,
+ .base = -1,
+};
+
+static int rzv2m_pwc_poweroff(struct sys_off_data *data)
+{
+ struct rzv2m_pwc_priv *priv = data->cb_data;
+
+ writel(PWC_PWCRST_RSTSOFTAX, priv->base + PWC_PWCRST);
+ writel(PWC_PWCCKEN_ENGCKMAIN, priv->base + PWC_PWCCKEN);
+ writel(PWC_PWCCTL_PWOFF, priv->base + PWC_PWCCTL);
+
+ mdelay(150);
+
+ dev_err(priv->dev, "Failed to power off the system");
+
+ return NOTIFY_DONE;
+}
+
+static int rzv2m_pwc_probe(struct platform_device *pdev)
+{
+ struct rzv2m_pwc_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ /*
+ * The register used by this driver cannot be read, therefore set the
+ * outputs to their default values and initialize priv->ch_en_bits
+ * accordingly. BIT 16 enables write to BIT 0, BIT 17 enables write to
+ * BIT 1, and the default value of both BIT 0 and BIT 1 is 0.
+ */
+ writel(BIT(17) | BIT(16), priv->base + PWC_GPIO);
+ bitmap_zero(priv->ch_en_bits, 2);
+
+ priv->gp = rzv2m_pwc_gc;
+ priv->gp.parent = pdev->dev.parent;
+ priv->gp.fwnode = dev_fwnode(&pdev->dev);
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &priv->gp, priv);
+ if (ret)
+ return ret;
+
+ if (device_property_read_bool(&pdev->dev, "renesas,rzv2m-pwc-power"))
+ ret = devm_register_power_off_handler(&pdev->dev,
+ rzv2m_pwc_poweroff, priv);
+
+ return ret;
+}
+
+static const struct of_device_id rzv2m_pwc_of_match[] = {
+ { .compatible = "renesas,rzv2m-pwc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rzv2m_pwc_of_match);
+
+static struct platform_driver rzv2m_pwc_driver = {
+ .probe = rzv2m_pwc_probe,
+ .driver = {
+ .name = "rzv2m_pwc",
+ .of_match_table = of_match_ptr(rzv2m_pwc_of_match),
+ },
+};
+module_platform_driver(rzv2m_pwc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Fabrizio Castro <castro.fabrizio.jz@renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/V2M PWC driver");
diff --git a/drivers/soc/renesas/r8a779g0-sysc.c b/drivers/soc/renesas/r8a779g0-sysc.c
index a452709f066d..b932eba1b804 100644
--- a/drivers/soc/renesas/r8a779g0-sysc.c
+++ b/drivers/soc/renesas/r8a779g0-sysc.c
@@ -37,6 +37,7 @@ static struct rcar_gen4_sysc_area r8a779g0_areas[] __initdata = {
{ "a3vip0", R8A779G0_PD_A3VIP0, R8A779G0_PD_ALWAYS_ON },
{ "a3vip1", R8A779G0_PD_A3VIP1, R8A779G0_PD_ALWAYS_ON },
{ "a3vip2", R8A779G0_PD_A3VIP2, R8A779G0_PD_ALWAYS_ON },
+ { "a3dul", R8A779G0_PD_A3DUL, R8A779G0_PD_ALWAYS_ON },
{ "a3isp0", R8A779G0_PD_A3ISP0, R8A779G0_PD_ALWAYS_ON },
{ "a3isp1", R8A779G0_PD_A3ISP1, R8A779G0_PD_ALWAYS_ON },
{ "a3ir", R8A779G0_PD_A3IR, R8A779G0_PD_ALWAYS_ON },
diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig
index ed4c571f8771..e86870be34c9 100644
--- a/drivers/soc/sifive/Kconfig
+++ b/drivers/soc/sifive/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-if SOC_SIFIVE
+if SOC_SIFIVE || SOC_STARFIVE
config SIFIVE_CCACHE
bool "Sifive Composable Cache controller"
diff --git a/drivers/soc/starfive/Kconfig b/drivers/soc/starfive/Kconfig
new file mode 100644
index 000000000000..bdb96dc4c989
--- /dev/null
+++ b/drivers/soc/starfive/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config JH71XX_PMU
+ bool "Support PMU for StarFive JH71XX Soc"
+ depends on PM
+ depends on SOC_STARFIVE || COMPILE_TEST
+ default SOC_STARFIVE
+ select PM_GENERIC_DOMAINS
+ help
+ Say 'y' here to enable support power domain support.
+ In order to meet low power requirements, a Power Management Unit (PMU)
+ is designed for controlling power resources in StarFive JH71XX SoCs.
diff --git a/drivers/soc/starfive/Makefile b/drivers/soc/starfive/Makefile
new file mode 100644
index 000000000000..13b589d6b5f3
--- /dev/null
+++ b/drivers/soc/starfive/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_JH71XX_PMU) += jh71xx_pmu.o
diff --git a/drivers/soc/starfive/jh71xx_pmu.c b/drivers/soc/starfive/jh71xx_pmu.c
new file mode 100644
index 000000000000..7d5f50d71c0d
--- /dev/null
+++ b/drivers/soc/starfive/jh71xx_pmu.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * StarFive JH71XX PMU (Power Management Unit) Controller Driver
+ *
+ * Copyright (C) 2022 StarFive Technology Co., Ltd.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <dt-bindings/power/starfive,jh7110-pmu.h>
+
+/* register offset */
+#define JH71XX_PMU_SW_TURN_ON_POWER 0x0C
+#define JH71XX_PMU_SW_TURN_OFF_POWER 0x10
+#define JH71XX_PMU_SW_ENCOURAGE 0x44
+#define JH71XX_PMU_TIMER_INT_MASK 0x48
+#define JH71XX_PMU_CURR_POWER_MODE 0x80
+#define JH71XX_PMU_EVENT_STATUS 0x88
+#define JH71XX_PMU_INT_STATUS 0x8C
+
+/* sw encourage cfg */
+#define JH71XX_PMU_SW_ENCOURAGE_EN_LO 0x05
+#define JH71XX_PMU_SW_ENCOURAGE_EN_HI 0x50
+#define JH71XX_PMU_SW_ENCOURAGE_DIS_LO 0x0A
+#define JH71XX_PMU_SW_ENCOURAGE_DIS_HI 0xA0
+#define JH71XX_PMU_SW_ENCOURAGE_ON 0xFF
+
+/* pmu int status */
+#define JH71XX_PMU_INT_SEQ_DONE BIT(0)
+#define JH71XX_PMU_INT_HW_REQ BIT(1)
+#define JH71XX_PMU_INT_SW_FAIL GENMASK(3, 2)
+#define JH71XX_PMU_INT_HW_FAIL GENMASK(5, 4)
+#define JH71XX_PMU_INT_PCH_FAIL GENMASK(8, 6)
+#define JH71XX_PMU_INT_ALL_MASK GENMASK(8, 0)
+
+/*
+ * The time required for switching power status is based on the time
+ * to turn on the largest domain's power, which is at microsecond level
+ */
+#define JH71XX_PMU_TIMEOUT_US 100
+
+struct jh71xx_domain_info {
+ const char * const name;
+ unsigned int flags;
+ u8 bit;
+};
+
+struct jh71xx_pmu_match_data {
+ const struct jh71xx_domain_info *domain_info;
+ int num_domains;
+};
+
+struct jh71xx_pmu {
+ struct device *dev;
+ const struct jh71xx_pmu_match_data *match_data;
+ void __iomem *base;
+ struct generic_pm_domain **genpd;
+ struct genpd_onecell_data genpd_data;
+ int irq;
+ spinlock_t lock; /* protects pmu reg */
+};
+
+struct jh71xx_pmu_dev {
+ const struct jh71xx_domain_info *domain_info;
+ struct jh71xx_pmu *pmu;
+ struct generic_pm_domain genpd;
+};
+
+static int jh71xx_pmu_get_state(struct jh71xx_pmu_dev *pmd, u32 mask, bool *is_on)
+{
+ struct jh71xx_pmu *pmu = pmd->pmu;
+
+ if (!mask)
+ return -EINVAL;
+
+ *is_on = readl(pmu->base + JH71XX_PMU_CURR_POWER_MODE) & mask;
+
+ return 0;
+}
+
+static int jh71xx_pmu_set_state(struct jh71xx_pmu_dev *pmd, u32 mask, bool on)
+{
+ struct jh71xx_pmu *pmu = pmd->pmu;
+ unsigned long flags;
+ u32 val;
+ u32 mode;
+ u32 encourage_lo;
+ u32 encourage_hi;
+ bool is_on;
+ int ret;
+
+ ret = jh71xx_pmu_get_state(pmd, mask, &is_on);
+ if (ret) {
+ dev_dbg(pmu->dev, "unable to get current state for %s\n",
+ pmd->genpd.name);
+ return ret;
+ }
+
+ if (is_on == on) {
+ dev_dbg(pmu->dev, "pm domain [%s] is already %sable status.\n",
+ pmd->genpd.name, on ? "en" : "dis");
+ return 0;
+ }
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ /*
+ * The PMU accepts software encourage to switch power mode in the following 2 steps:
+ *
+ * 1.Configure the register SW_TURN_ON_POWER (offset 0x0c) by writing 1 to
+ * the bit corresponding to the power domain that will be turned on
+ * and writing 0 to the others.
+ * Likewise, configure the register SW_TURN_OFF_POWER (offset 0x10) by
+ * writing 1 to the bit corresponding to the power domain that will be
+ * turned off and writing 0 to the others.
+ */
+ if (on) {
+ mode = JH71XX_PMU_SW_TURN_ON_POWER;
+ encourage_lo = JH71XX_PMU_SW_ENCOURAGE_EN_LO;
+ encourage_hi = JH71XX_PMU_SW_ENCOURAGE_EN_HI;
+ } else {
+ mode = JH71XX_PMU_SW_TURN_OFF_POWER;
+ encourage_lo = JH71XX_PMU_SW_ENCOURAGE_DIS_LO;
+ encourage_hi = JH71XX_PMU_SW_ENCOURAGE_DIS_HI;
+ }
+
+ writel(mask, pmu->base + mode);
+
+ /*
+ * 2.Write SW encourage command sequence to the Software Encourage Reg (offset 0x44)
+ * First write SW_MODE_ENCOURAGE_ON to JH71XX_PMU_SW_ENCOURAGE. This will reset
+ * the state machine which parses the command sequence. This register must be
+ * written every time software wants to power on/off a domain.
+ * Then write the lower bits of the command sequence, followed by the upper
+ * bits. The sequence differs between powering on & off a domain.
+ */
+ writel(JH71XX_PMU_SW_ENCOURAGE_ON, pmu->base + JH71XX_PMU_SW_ENCOURAGE);
+ writel(encourage_lo, pmu->base + JH71XX_PMU_SW_ENCOURAGE);
+ writel(encourage_hi, pmu->base + JH71XX_PMU_SW_ENCOURAGE);
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ /* Wait for the power domain bit to be enabled / disabled */
+ if (on) {
+ ret = readl_poll_timeout_atomic(pmu->base + JH71XX_PMU_CURR_POWER_MODE,
+ val, val & mask,
+ 1, JH71XX_PMU_TIMEOUT_US);
+ } else {
+ ret = readl_poll_timeout_atomic(pmu->base + JH71XX_PMU_CURR_POWER_MODE,
+ val, !(val & mask),
+ 1, JH71XX_PMU_TIMEOUT_US);
+ }
+
+ if (ret) {
+ dev_err(pmu->dev, "%s: failed to power %s\n",
+ pmd->genpd.name, on ? "on" : "off");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int jh71xx_pmu_on(struct generic_pm_domain *genpd)
+{
+ struct jh71xx_pmu_dev *pmd = container_of(genpd,
+ struct jh71xx_pmu_dev, genpd);
+ u32 pwr_mask = BIT(pmd->domain_info->bit);
+
+ return jh71xx_pmu_set_state(pmd, pwr_mask, true);
+}
+
+static int jh71xx_pmu_off(struct generic_pm_domain *genpd)
+{
+ struct jh71xx_pmu_dev *pmd = container_of(genpd,
+ struct jh71xx_pmu_dev, genpd);
+ u32 pwr_mask = BIT(pmd->domain_info->bit);
+
+ return jh71xx_pmu_set_state(pmd, pwr_mask, false);
+}
+
+static void jh71xx_pmu_int_enable(struct jh71xx_pmu *pmu, u32 mask, bool enable)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+ val = readl(pmu->base + JH71XX_PMU_TIMER_INT_MASK);
+
+ if (enable)
+ val &= ~mask;
+ else
+ val |= mask;
+
+ writel(val, pmu->base + JH71XX_PMU_TIMER_INT_MASK);
+ spin_unlock_irqrestore(&pmu->lock, flags);
+}
+
+static irqreturn_t jh71xx_pmu_interrupt(int irq, void *data)
+{
+ struct jh71xx_pmu *pmu = data;
+ u32 val;
+
+ val = readl(pmu->base + JH71XX_PMU_INT_STATUS);
+
+ if (val & JH71XX_PMU_INT_SEQ_DONE)
+ dev_dbg(pmu->dev, "sequence done.\n");
+ if (val & JH71XX_PMU_INT_HW_REQ)
+ dev_dbg(pmu->dev, "hardware encourage requestion.\n");
+ if (val & JH71XX_PMU_INT_SW_FAIL)
+ dev_err(pmu->dev, "software encourage fail.\n");
+ if (val & JH71XX_PMU_INT_HW_FAIL)
+ dev_err(pmu->dev, "hardware encourage fail.\n");
+ if (val & JH71XX_PMU_INT_PCH_FAIL)
+ dev_err(pmu->dev, "p-channel fail event.\n");
+
+ /* clear interrupts */
+ writel(val, pmu->base + JH71XX_PMU_INT_STATUS);
+ writel(val, pmu->base + JH71XX_PMU_EVENT_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static int jh71xx_pmu_init_domain(struct jh71xx_pmu *pmu, int index)
+{
+ struct jh71xx_pmu_dev *pmd;
+ u32 pwr_mask;
+ int ret;
+ bool is_on = false;
+
+ pmd = devm_kzalloc(pmu->dev, sizeof(*pmd), GFP_KERNEL);
+ if (!pmd)
+ return -ENOMEM;
+
+ pmd->domain_info = &pmu->match_data->domain_info[index];
+ pmd->pmu = pmu;
+ pwr_mask = BIT(pmd->domain_info->bit);
+
+ pmd->genpd.name = pmd->domain_info->name;
+ pmd->genpd.flags = pmd->domain_info->flags;
+
+ ret = jh71xx_pmu_get_state(pmd, pwr_mask, &is_on);
+ if (ret)
+ dev_warn(pmu->dev, "unable to get current state for %s\n",
+ pmd->genpd.name);
+
+ pmd->genpd.power_on = jh71xx_pmu_on;
+ pmd->genpd.power_off = jh71xx_pmu_off;
+ pm_genpd_init(&pmd->genpd, NULL, !is_on);
+
+ pmu->genpd_data.domains[index] = &pmd->genpd;
+
+ return 0;
+}
+
+static int jh71xx_pmu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct jh71xx_pmu_match_data *match_data;
+ struct jh71xx_pmu *pmu;
+ unsigned int i;
+ int ret;
+
+ pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+
+ pmu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pmu->base))
+ return PTR_ERR(pmu->base);
+
+ pmu->irq = platform_get_irq(pdev, 0);
+ if (pmu->irq < 0)
+ return pmu->irq;
+
+ ret = devm_request_irq(dev, pmu->irq, jh71xx_pmu_interrupt,
+ 0, pdev->name, pmu);
+ if (ret)
+ dev_err(dev, "failed to request irq\n");
+
+ match_data = of_device_get_match_data(dev);
+ if (!match_data)
+ return -EINVAL;
+
+ pmu->genpd = devm_kcalloc(dev, match_data->num_domains,
+ sizeof(struct generic_pm_domain *),
+ GFP_KERNEL);
+ if (!pmu->genpd)
+ return -ENOMEM;
+
+ pmu->dev = dev;
+ pmu->match_data = match_data;
+ pmu->genpd_data.domains = pmu->genpd;
+ pmu->genpd_data.num_domains = match_data->num_domains;
+
+ for (i = 0; i < match_data->num_domains; i++) {
+ ret = jh71xx_pmu_init_domain(pmu, i);
+ if (ret) {
+ dev_err(dev, "failed to initialize power domain\n");
+ return ret;
+ }
+ }
+
+ spin_lock_init(&pmu->lock);
+ jh71xx_pmu_int_enable(pmu, JH71XX_PMU_INT_ALL_MASK & ~JH71XX_PMU_INT_PCH_FAIL, true);
+
+ ret = of_genpd_add_provider_onecell(np, &pmu->genpd_data);
+ if (ret) {
+ dev_err(dev, "failed to register genpd driver: %d\n", ret);
+ return ret;
+ }
+
+ dev_dbg(dev, "registered %u power domains\n", i);
+
+ return 0;
+}
+
+static const struct jh71xx_domain_info jh7110_power_domains[] = {
+ [JH7110_PD_SYSTOP] = {
+ .name = "SYSTOP",
+ .bit = 0,
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ },
+ [JH7110_PD_CPU] = {
+ .name = "CPU",
+ .bit = 1,
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ },
+ [JH7110_PD_GPUA] = {
+ .name = "GPUA",
+ .bit = 2,
+ },
+ [JH7110_PD_VDEC] = {
+ .name = "VDEC",
+ .bit = 3,
+ },
+ [JH7110_PD_VOUT] = {
+ .name = "VOUT",
+ .bit = 4,
+ },
+ [JH7110_PD_ISP] = {
+ .name = "ISP",
+ .bit = 5,
+ },
+ [JH7110_PD_VENC] = {
+ .name = "VENC",
+ .bit = 6,
+ },
+};
+
+static const struct jh71xx_pmu_match_data jh7110_pmu = {
+ .num_domains = ARRAY_SIZE(jh7110_power_domains),
+ .domain_info = jh7110_power_domains,
+};
+
+static const struct of_device_id jh71xx_pmu_of_match[] = {
+ {
+ .compatible = "starfive,jh7110-pmu",
+ .data = (void *)&jh7110_pmu,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver jh71xx_pmu_driver = {
+ .probe = jh71xx_pmu_probe,
+ .driver = {
+ .name = "jh71xx-pmu",
+ .of_match_table = jh71xx_pmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(jh71xx_pmu_driver);
+
+MODULE_AUTHOR("Walker Chen <walker.chen@starfivetech.com>");
+MODULE_DESCRIPTION("StarFive JH71XX PMU Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/sunxi/Kconfig b/drivers/soc/sunxi/Kconfig
index 8aecbc9b1976..c5070914fc6a 100644
--- a/drivers/soc/sunxi/Kconfig
+++ b/drivers/soc/sunxi/Kconfig
@@ -19,3 +19,12 @@ config SUNXI_SRAM
Say y here to enable the SRAM controller support. This
device is responsible on mapping the SRAM in the sunXi SoCs
whether to the CPU/DMA, or to the devices.
+
+config SUN20I_PPU
+ bool "Allwinner D1 PPU power domain driver"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on PM
+ select PM_GENERIC_DOMAINS
+ help
+ Say y to enable the PPU power domain driver. This saves power
+ when certain peripherals, such as the video engine, are idle.
diff --git a/drivers/soc/sunxi/Makefile b/drivers/soc/sunxi/Makefile
index 549159571d4f..90ff2ebe7655 100644
--- a/drivers/soc/sunxi/Makefile
+++ b/drivers/soc/sunxi/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_SUNXI_MBUS) += sunxi_mbus.o
obj-$(CONFIG_SUNXI_SRAM) += sunxi_sram.o
+obj-$(CONFIG_SUN20I_PPU) += sun20i-ppu.o
diff --git a/drivers/soc/sunxi/sun20i-ppu.c b/drivers/soc/sunxi/sun20i-ppu.c
new file mode 100644
index 000000000000..98cb41d36560
--- /dev/null
+++ b/drivers/soc/sunxi/sun20i-ppu.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/reset.h>
+
+#define PD_STATE_ON 1
+#define PD_STATE_OFF 2
+
+#define PD_RSTN_REG 0x00
+#define PD_CLK_GATE_REG 0x04
+#define PD_PWROFF_GATE_REG 0x08
+#define PD_PSW_ON_REG 0x0c
+#define PD_PSW_OFF_REG 0x10
+#define PD_PSW_DELAY_REG 0x14
+#define PD_OFF_DELAY_REG 0x18
+#define PD_ON_DELAY_REG 0x1c
+#define PD_COMMAND_REG 0x20
+#define PD_STATUS_REG 0x24
+#define PD_STATUS_COMPLETE BIT(1)
+#define PD_STATUS_BUSY BIT(3)
+#define PD_STATUS_STATE GENMASK(17, 16)
+#define PD_ACTIVE_CTRL_REG 0x2c
+#define PD_GATE_STATUS_REG 0x30
+#define PD_RSTN_STATUS BIT(0)
+#define PD_CLK_GATE_STATUS BIT(1)
+#define PD_PWROFF_GATE_STATUS BIT(2)
+#define PD_PSW_STATUS_REG 0x34
+
+#define PD_REGS_SIZE 0x80
+
+struct sun20i_ppu_desc {
+ const char *const *names;
+ unsigned int num_domains;
+};
+
+struct sun20i_ppu_pd {
+ struct generic_pm_domain genpd;
+ void __iomem *base;
+};
+
+#define to_sun20i_ppu_pd(_genpd) \
+ container_of(_genpd, struct sun20i_ppu_pd, genpd)
+
+static bool sun20i_ppu_pd_is_on(const struct sun20i_ppu_pd *pd)
+{
+ u32 status = readl(pd->base + PD_STATUS_REG);
+
+ return FIELD_GET(PD_STATUS_STATE, status) == PD_STATE_ON;
+}
+
+static int sun20i_ppu_pd_set_power(const struct sun20i_ppu_pd *pd, bool power_on)
+{
+ u32 state, status;
+ int ret;
+
+ if (sun20i_ppu_pd_is_on(pd) == power_on)
+ return 0;
+
+ /* Wait for the power controller to be idle. */
+ ret = readl_poll_timeout(pd->base + PD_STATUS_REG, status,
+ !(status & PD_STATUS_BUSY), 100, 1000);
+ if (ret)
+ return ret;
+
+ state = power_on ? PD_STATE_ON : PD_STATE_OFF;
+ writel(state, pd->base + PD_COMMAND_REG);
+
+ /* Wait for the state transition to complete. */
+ ret = readl_poll_timeout(pd->base + PD_STATUS_REG, status,
+ FIELD_GET(PD_STATUS_STATE, status) == state &&
+ (status & PD_STATUS_COMPLETE), 100, 1000);
+ if (ret)
+ return ret;
+
+ /* Clear the completion flag. */
+ writel(status, pd->base + PD_STATUS_REG);
+
+ return 0;
+}
+
+static int sun20i_ppu_pd_power_on(struct generic_pm_domain *genpd)
+{
+ const struct sun20i_ppu_pd *pd = to_sun20i_ppu_pd(genpd);
+
+ return sun20i_ppu_pd_set_power(pd, true);
+}
+
+static int sun20i_ppu_pd_power_off(struct generic_pm_domain *genpd)
+{
+ const struct sun20i_ppu_pd *pd = to_sun20i_ppu_pd(genpd);
+
+ return sun20i_ppu_pd_set_power(pd, false);
+}
+
+static int sun20i_ppu_probe(struct platform_device *pdev)
+{
+ const struct sun20i_ppu_desc *desc;
+ struct device *dev = &pdev->dev;
+ struct genpd_onecell_data *ppu;
+ struct sun20i_ppu_pd *pds;
+ struct reset_control *rst;
+ void __iomem *base;
+ struct clk *clk;
+ int ret;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ pds = devm_kcalloc(dev, desc->num_domains, sizeof(*pds), GFP_KERNEL);
+ if (!pds)
+ return -ENOMEM;
+
+ ppu = devm_kzalloc(dev, sizeof(*ppu), GFP_KERNEL);
+ if (!ppu)
+ return -ENOMEM;
+
+ ppu->domains = devm_kcalloc(dev, desc->num_domains,
+ sizeof(*ppu->domains), GFP_KERNEL);
+ if (!ppu->domains)
+ return -ENOMEM;
+
+ ppu->num_domains = desc->num_domains;
+ platform_set_drvdata(pdev, ppu);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(rst))
+ return PTR_ERR(rst);
+
+ ret = reset_control_deassert(rst);
+ if (ret)
+ return ret;
+
+ for (unsigned int i = 0; i < ppu->num_domains; ++i) {
+ struct sun20i_ppu_pd *pd = &pds[i];
+
+ pd->genpd.name = desc->names[i];
+ pd->genpd.power_off = sun20i_ppu_pd_power_off;
+ pd->genpd.power_on = sun20i_ppu_pd_power_on;
+ pd->base = base + PD_REGS_SIZE * i;
+
+ ret = pm_genpd_init(&pd->genpd, NULL, sun20i_ppu_pd_is_on(pd));
+ if (ret) {
+ dev_warn(dev, "Failed to add '%s' domain: %d\n",
+ pd->genpd.name, ret);
+ continue;
+ }
+
+ ppu->domains[i] = &pd->genpd;
+ }
+
+ ret = of_genpd_add_provider_onecell(dev->of_node, ppu);
+ if (ret)
+ dev_warn(dev, "Failed to add provider: %d\n", ret);
+
+ return 0;
+}
+
+static const char *const sun20i_d1_ppu_pd_names[] = {
+ "CPU",
+ "VE",
+ "DSP",
+};
+
+static const struct sun20i_ppu_desc sun20i_d1_ppu_desc = {
+ .names = sun20i_d1_ppu_pd_names,
+ .num_domains = ARRAY_SIZE(sun20i_d1_ppu_pd_names),
+};
+
+static const struct of_device_id sun20i_ppu_of_match[] = {
+ {
+ .compatible = "allwinner,sun20i-d1-ppu",
+ .data = &sun20i_d1_ppu_desc,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun20i_ppu_of_match);
+
+static struct platform_driver sun20i_ppu_driver = {
+ .probe = sun20i_ppu_probe,
+ .driver = {
+ .name = "sun20i-ppu",
+ .of_match_table = sun20i_ppu_of_match,
+ /* Power domains cannot be removed while they are in use. */
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(sun20i_ppu_driver);
+
+MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>");
+MODULE_DESCRIPTION("Allwinner D1 PPU power domain driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index 92f9186c1c42..f09918c59042 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -120,6 +120,9 @@ static int sunxi_sram_show(struct seq_file *s, void *data)
seq_puts(s, "--------------------\n\n");
for_each_child_of_node(sram_dev->of_node, sram_node) {
+ if (!of_device_is_compatible(sram_node, "mmio-sram"))
+ continue;
+
sram_addr_p = of_get_address(sram_node, 0, NULL, NULL);
seq_printf(s, "sram@%08x\n",
diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
index 2de082765bef..c76381899ef4 100644
--- a/drivers/soc/xilinx/xlnx_event_manager.c
+++ b/drivers/soc/xilinx/xlnx_event_manager.c
@@ -116,8 +116,10 @@ static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, cons
INIT_LIST_HEAD(&eve_data->cb_list_head);
cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
- if (!cb_data)
+ if (!cb_data) {
+ kfree(eve_data);
return -ENOMEM;
+ }
cb_data->eve_cb = cb_fun;
cb_data->agent_data = data;
diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c
index fcce2433bd6d..69d03ad4cf1e 100644
--- a/drivers/soc/xilinx/zynqmp_pm_domains.c
+++ b/drivers/soc/xilinx/zynqmp_pm_domains.c
@@ -227,7 +227,7 @@ static struct generic_pm_domain *zynqmp_gpd_xlate
goto done;
}
- /**
+ /*
* Add index in empty node_id of power domain list as no existing
* power domain found for current index.
*/
diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c
index 24ec1c83f379..ec0904faf3a1 100644
--- a/drivers/spi/spi-rpc-if.c
+++ b/drivers/spi/spi-rpc-if.c
@@ -58,7 +58,7 @@ static void rpcif_spi_mem_prepare(struct spi_device *spi_dev,
rpc_op.data.dir = RPCIF_NO_DATA;
}
- rpcif_prepare(rpc, &rpc_op, offs, len);
+ rpcif_prepare(rpc->dev, &rpc_op, offs, len);
}
static bool rpcif_spi_mem_supports_op(struct spi_mem *mem,
@@ -86,7 +86,7 @@ static ssize_t rpcif_spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
rpcif_spi_mem_prepare(desc->mem->spi, &desc->info.op_tmpl, &offs, &len);
- return rpcif_dirmap_read(rpc, offs, len, buf);
+ return rpcif_dirmap_read(rpc->dev, offs, len, buf);
}
static int rpcif_spi_mem_dirmap_create(struct spi_mem_dirmap_desc *desc)
@@ -117,7 +117,7 @@ static int rpcif_spi_mem_exec_op(struct spi_mem *mem,
rpcif_spi_mem_prepare(mem->spi, op, NULL, NULL);
- return rpcif_manual_xfer(rpc);
+ return rpcif_manual_xfer(rpc->dev);
}
static const struct spi_controller_mem_ops rpcif_spi_mem_ops = {
@@ -147,7 +147,7 @@ static int rpcif_spi_probe(struct platform_device *pdev)
ctlr->dev.of_node = parent->of_node;
- rpcif_enable_rpm(rpc);
+ pm_runtime_enable(rpc->dev);
ctlr->num_chipselect = 1;
ctlr->mem_ops = &rpcif_spi_mem_ops;
@@ -156,7 +156,7 @@ static int rpcif_spi_probe(struct platform_device *pdev)
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_QUAD | SPI_RX_QUAD;
ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
- error = rpcif_hw_init(rpc, false);
+ error = rpcif_hw_init(rpc->dev, false);
if (error)
goto out_disable_rpm;
@@ -169,7 +169,7 @@ static int rpcif_spi_probe(struct platform_device *pdev)
return 0;
out_disable_rpm:
- rpcif_disable_rpm(rpc);
+ pm_runtime_disable(rpc->dev);
return error;
}
@@ -179,7 +179,7 @@ static int rpcif_spi_remove(struct platform_device *pdev)
struct rpcif *rpc = spi_controller_get_devdata(ctlr);
spi_unregister_controller(ctlr);
- rpcif_disable_rpm(rpc);
+ pm_runtime_disable(rpc->dev);
return 0;
}
diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
index 4122a51e9874..f6edb12ec004 100644
--- a/drivers/thermal/qcom/lmh.c
+++ b/drivers/thermal/qcom/lmh.c
@@ -10,7 +10,7 @@
#include <linux/platform_device.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#define LMH_NODE_DCVS 0x44435653
#define LMH_CLUSTER0_NODE_ID 0x6370302D
diff --git a/drivers/ufs/host/ufs-qcom-ice.c b/drivers/ufs/host/ufs-qcom-ice.c
index 62387ccd5b30..453978877ae9 100644
--- a/drivers/ufs/host/ufs-qcom-ice.c
+++ b/drivers/ufs/host/ufs-qcom-ice.c
@@ -8,7 +8,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <linux/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#include "ufs-qcom.h"
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index b54f470e0d03..1f971c880dde 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -899,6 +899,7 @@ ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
return ret;
}
+EXPORT_SYMBOL_GPL(debugfs_create_str);
static ssize_t debugfs_write_file_str(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h
index f7aef3f310d7..aa95439708dc 100644
--- a/include/dt-bindings/arm/qcom,ids.h
+++ b/include/dt-bindings/arm/qcom,ids.h
@@ -11,40 +11,70 @@
* The MSM chipset and hardware revision used by Qualcomm bootloaders, DTS for
* older chipsets (qcom,msm-id) and in socinfo driver:
*/
+#define QCOM_ID_MSM8260 70
+#define QCOM_ID_MSM8660 71
+#define QCOM_ID_APQ8060 86
#define QCOM_ID_MSM8960 87
#define QCOM_ID_APQ8064 109
+#define QCOM_ID_MSM8930 116
+#define QCOM_ID_MSM8630 117
+#define QCOM_ID_MSM8230 118
+#define QCOM_ID_APQ8030 119
+#define QCOM_ID_MSM8627 120
+#define QCOM_ID_MSM8227 121
#define QCOM_ID_MSM8660A 122
#define QCOM_ID_MSM8260A 123
#define QCOM_ID_APQ8060A 124
#define QCOM_ID_MSM8974 126
+#define QCOM_ID_MSM8225 127
+#define QCOM_ID_MSM8625 129
#define QCOM_ID_MPQ8064 130
#define QCOM_ID_MSM8960AB 138
#define QCOM_ID_APQ8060AB 139
#define QCOM_ID_MSM8260AB 140
#define QCOM_ID_MSM8660AB 141
+#define QCOM_ID_MSM8930AA 142
+#define QCOM_ID_MSM8630AA 143
+#define QCOM_ID_MSM8230AA 144
#define QCOM_ID_MSM8626 145
#define QCOM_ID_MSM8610 147
#define QCOM_ID_APQ8064AB 153
+#define QCOM_ID_MSM8930AB 154
+#define QCOM_ID_MSM8630AB 155
+#define QCOM_ID_MSM8230AB 156
+#define QCOM_ID_APQ8030AB 157
#define QCOM_ID_MSM8226 158
#define QCOM_ID_MSM8526 159
+#define QCOM_ID_APQ8030AA 160
#define QCOM_ID_MSM8110 161
#define QCOM_ID_MSM8210 162
#define QCOM_ID_MSM8810 163
#define QCOM_ID_MSM8212 164
#define QCOM_ID_MSM8612 165
#define QCOM_ID_MSM8112 166
+#define QCOM_ID_MSM8125 167
#define QCOM_ID_MSM8225Q 168
#define QCOM_ID_MSM8625Q 169
#define QCOM_ID_MSM8125Q 170
#define QCOM_ID_APQ8064AA 172
#define QCOM_ID_APQ8084 178
+#define QCOM_ID_MSM8130 179
+#define QCOM_ID_MSM8130AA 180
+#define QCOM_ID_MSM8130AB 181
+#define QCOM_ID_MSM8627AA 182
+#define QCOM_ID_MSM8227AA 183
#define QCOM_ID_APQ8074 184
#define QCOM_ID_MSM8274 185
#define QCOM_ID_MSM8674 186
+#define QCOM_ID_MDM9635 187
#define QCOM_ID_MSM8974PRO_AC 194
#define QCOM_ID_MSM8126 198
#define QCOM_ID_APQ8026 199
#define QCOM_ID_MSM8926 200
+#define QCOM_ID_IPQ8062 201
+#define QCOM_ID_IPQ8064 202
+#define QCOM_ID_IPQ8066 203
+#define QCOM_ID_IPQ8068 204
#define QCOM_ID_MSM8326 205
#define QCOM_ID_MSM8916 206
#define QCOM_ID_MSM8994 207
@@ -68,34 +98,74 @@
#define QCOM_ID_MSM8510 225
#define QCOM_ID_MSM8512 226
#define QCOM_ID_MSM8936 233
+#define QCOM_ID_MDM9640 234
#define QCOM_ID_MSM8939 239
#define QCOM_ID_APQ8036 240
#define QCOM_ID_APQ8039 241
+#define QCOM_ID_MSM8236 242
+#define QCOM_ID_MSM8636 243
+#define QCOM_ID_MSM8909 245
#define QCOM_ID_MSM8996 246
#define QCOM_ID_APQ8016 247
#define QCOM_ID_MSM8216 248
#define QCOM_ID_MSM8116 249
#define QCOM_ID_MSM8616 250
#define QCOM_ID_MSM8992 251
+#define QCOM_ID_APQ8092 252
#define QCOM_ID_APQ8094 253
+#define QCOM_ID_MSM8209 258
+#define QCOM_ID_MSM8208 259
+#define QCOM_ID_MDM9209 260
+#define QCOM_ID_MDM9309 261
+#define QCOM_ID_MDM9609 262
+#define QCOM_ID_MSM8239 263
+#define QCOM_ID_MSM8952 264
+#define QCOM_ID_APQ8009 265
#define QCOM_ID_MSM8956 266
+#define QCOM_ID_MSM8929 268
+#define QCOM_ID_MSM8629 269
+#define QCOM_ID_MSM8229 270
+#define QCOM_ID_APQ8029 271
+#define QCOM_ID_APQ8056 274
+#define QCOM_ID_MSM8609 275
+#define QCOM_ID_APQ8076 277
#define QCOM_ID_MSM8976 278
+#define QCOM_ID_MDM9650 279
+#define QCOM_ID_IPQ8065 280
+#define QCOM_ID_IPQ8069 281
+#define QCOM_ID_MDM9655 283
+#define QCOM_ID_MDM9250 284
+#define QCOM_ID_MDM9255 285
+#define QCOM_ID_MDM9350 286
+#define QCOM_ID_APQ8052 289
#define QCOM_ID_MDM9607 290
#define QCOM_ID_APQ8096 291
#define QCOM_ID_MSM8998 292
#define QCOM_ID_MSM8953 293
+#define QCOM_ID_MSM8937 294
+#define QCOM_ID_APQ8037 295
#define QCOM_ID_MDM8207 296
#define QCOM_ID_MDM9207 297
#define QCOM_ID_MDM9307 298
#define QCOM_ID_MDM9628 299
+#define QCOM_ID_MSM8909W 300
+#define QCOM_ID_APQ8009W 301
+#define QCOM_ID_MSM8996L 302
+#define QCOM_ID_MSM8917 303
#define QCOM_ID_APQ8053 304
#define QCOM_ID_MSM8996SG 305
+#define QCOM_ID_APQ8017 307
+#define QCOM_ID_MSM8217 308
+#define QCOM_ID_MSM8617 309
#define QCOM_ID_MSM8996AU 310
#define QCOM_ID_APQ8096AU 311
#define QCOM_ID_APQ8096SG 312
+#define QCOM_ID_MSM8940 313
+#define QCOM_ID_SDX201 314
#define QCOM_ID_SDM660 317
#define QCOM_ID_SDM630 318
#define QCOM_ID_APQ8098 319
+#define QCOM_ID_MSM8920 320
#define QCOM_ID_SDM845 321
#define QCOM_ID_MDM9206 322
#define QCOM_ID_IPQ8074 323
@@ -103,6 +173,8 @@
#define QCOM_ID_SDM658 325
#define QCOM_ID_SDA658 326
#define QCOM_ID_SDA630 327
+#define QCOM_ID_MSM8905 331
+#define QCOM_ID_SDX202 333
#define QCOM_ID_SDM450 338
#define QCOM_ID_SM8150 339
#define QCOM_ID_SDA845 341
@@ -114,10 +186,15 @@
#define QCOM_ID_SDM632 349
#define QCOM_ID_SDA632 350
#define QCOM_ID_SDA450 351
+#define QCOM_ID_SDM439 353
+#define QCOM_ID_SDM429 354
#define QCOM_ID_SM8250 356
#define QCOM_ID_SA8155 362
+#define QCOM_ID_SDA439 363
+#define QCOM_ID_SDA429 364
#define QCOM_ID_IPQ8070 375
#define QCOM_ID_IPQ8071 376
+#define QCOM_ID_QM215 386
#define QCOM_ID_IPQ8072A 389
#define QCOM_ID_IPQ8074A 390
#define QCOM_ID_IPQ8076A 391
@@ -127,11 +204,14 @@
#define QCOM_ID_IPQ8071A 396
#define QCOM_ID_IPQ6018 402
#define QCOM_ID_IPQ6028 403
+#define QCOM_ID_SDM429W 416
#define QCOM_ID_SM4250 417
#define QCOM_ID_IPQ6000 421
#define QCOM_ID_IPQ6010 422
#define QCOM_ID_SC7180 425
#define QCOM_ID_SM6350 434
+#define QCOM_ID_QCM2150 436
+#define QCOM_ID_SDA429W 437
#define QCOM_ID_SM8350 439
#define QCOM_ID_SM6115 444
#define QCOM_ID_SC8280XP 449
@@ -155,6 +235,8 @@
#define QCOM_ID_QRU1032 588
#define QCOM_ID_QRU1052 589
#define QCOM_ID_QRU1062 590
+#define QCOM_ID_IPQ5332 592
+#define QCOM_ID_IPQ5322 593
/*
* The board type and revision information, used by Qualcomm bootloaders and
@@ -165,6 +247,7 @@
#define QCOM_BOARD_ID_MTP 8
#define QCOM_BOARD_ID_DRAGONBOARD 10
+#define QCOM_BOARD_ID_QRD 11
#define QCOM_BOARD_ID_SBC 24
#endif /* _DT_BINDINGS_ARM_QCOM_IDS_H */
diff --git a/include/dt-bindings/firmware/qcom,scm.h b/include/dt-bindings/firmware/qcom,scm.h
new file mode 100644
index 000000000000..1a4e68fa0744
--- /dev/null
+++ b/include/dt-bindings/firmware/qcom,scm.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2010-2015, 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_FIRMWARE_QCOM_SCM_H
+#define _DT_BINDINGS_FIRMWARE_QCOM_SCM_H
+
+#define QCOM_SCM_VMID_HLOS 0x3
+#define QCOM_SCM_VMID_MSS_MSA 0xF
+#define QCOM_SCM_VMID_WLAN 0x18
+#define QCOM_SCM_VMID_WLAN_CE 0x19
+#define QCOM_SCM_VMID_NAV 0x2B
+
+#endif
diff --git a/include/dt-bindings/power/allwinner,sun20i-d1-ppu.h b/include/dt-bindings/power/allwinner,sun20i-d1-ppu.h
new file mode 100644
index 000000000000..23cfb57256d6
--- /dev/null
+++ b/include/dt-bindings/power/allwinner,sun20i-d1-ppu.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_POWER_SUN20I_D1_PPU_H_
+#define _DT_BINDINGS_POWER_SUN20I_D1_PPU_H_
+
+#define PD_CPU 0
+#define PD_VE 1
+#define PD_DSP 2
+
+#endif /* _DT_BINDINGS_POWER_SUN20I_D1_PPU_H_ */
diff --git a/include/dt-bindings/power/mediatek,mt8188-power.h b/include/dt-bindings/power/mediatek,mt8188-power.h
new file mode 100644
index 000000000000..57e75cf3aa2c
--- /dev/null
+++ b/include/dt-bindings/power/mediatek,mt8188-power.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8188_POWER_H
+#define _DT_BINDINGS_POWER_MT8188_POWER_H
+
+#define MT8188_POWER_DOMAIN_MFG0 0
+#define MT8188_POWER_DOMAIN_MFG1 1
+#define MT8188_POWER_DOMAIN_MFG2 2
+#define MT8188_POWER_DOMAIN_MFG3 3
+#define MT8188_POWER_DOMAIN_MFG4 4
+#define MT8188_POWER_DOMAIN_PEXTP_MAC_P0 5
+#define MT8188_POWER_DOMAIN_PEXTP_PHY_TOP 6
+#define MT8188_POWER_DOMAIN_CSIRX_TOP 7
+#define MT8188_POWER_DOMAIN_ETHER 8
+#define MT8188_POWER_DOMAIN_HDMI_TX 9
+#define MT8188_POWER_DOMAIN_ADSP_AO 10
+#define MT8188_POWER_DOMAIN_ADSP_INFRA 11
+#define MT8188_POWER_DOMAIN_ADSP 12
+#define MT8188_POWER_DOMAIN_AUDIO 13
+#define MT8188_POWER_DOMAIN_AUDIO_ASRC 14
+#define MT8188_POWER_DOMAIN_VPPSYS0 15
+#define MT8188_POWER_DOMAIN_VDOSYS0 16
+#define MT8188_POWER_DOMAIN_VDOSYS1 17
+#define MT8188_POWER_DOMAIN_DP_TX 18
+#define MT8188_POWER_DOMAIN_EDP_TX 19
+#define MT8188_POWER_DOMAIN_VPPSYS1 20
+#define MT8188_POWER_DOMAIN_WPE 21
+#define MT8188_POWER_DOMAIN_VDEC0 22
+#define MT8188_POWER_DOMAIN_VDEC1 23
+#define MT8188_POWER_DOMAIN_VENC 24
+#define MT8188_POWER_DOMAIN_IMG_VCORE 25
+#define MT8188_POWER_DOMAIN_IMG_MAIN 26
+#define MT8188_POWER_DOMAIN_DIP 27
+#define MT8188_POWER_DOMAIN_IPE 28
+#define MT8188_POWER_DOMAIN_CAM_VCORE 29
+#define MT8188_POWER_DOMAIN_CAM_MAIN 30
+#define MT8188_POWER_DOMAIN_CAM_SUBA 31
+#define MT8188_POWER_DOMAIN_CAM_SUBB 32
+
+#endif /* _DT_BINDINGS_POWER_MT8188_POWER_H */
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index 278de6df425e..1bf8e87ecd7e 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -4,6 +4,25 @@
#ifndef _DT_BINDINGS_POWER_QCOM_RPMPD_H
#define _DT_BINDINGS_POWER_QCOM_RPMPD_H
+/* SA8775P Power Domain Indexes */
+#define SA8775P_CX 0
+#define SA8775P_CX_AO 1
+#define SA8775P_DDR 2
+#define SA8775P_EBI 3
+#define SA8775P_GFX 4
+#define SA8775P_LCX 5
+#define SA8775P_LMX 6
+#define SA8775P_MMCX 7
+#define SA8775P_MMCX_AO 8
+#define SA8775P_MSS 9
+#define SA8775P_MX 10
+#define SA8775P_MX_AO 11
+#define SA8775P_MXC 12
+#define SA8775P_MXC_AO 13
+#define SA8775P_NSP0 14
+#define SA8775P_NSP1 15
+#define SA8775P_XO 16
+
/* SDM670 Power Domain Indexes */
#define SDM670_MX 0
#define SDM670_MX_AO 1
@@ -192,6 +211,7 @@
#define RPMH_REGULATOR_LEVEL_MIN_SVS 48
#define RPMH_REGULATOR_LEVEL_LOW_SVS_D1 56
#define RPMH_REGULATOR_LEVEL_LOW_SVS 64
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_L1 80
#define RPMH_REGULATOR_LEVEL_SVS 128
#define RPMH_REGULATOR_LEVEL_SVS_L0 144
#define RPMH_REGULATOR_LEVEL_SVS_L1 192
@@ -307,16 +327,6 @@
#define SDM660_SSCMX 8
#define SDM660_SSCMX_VFL 9
-/* SM4250 Power Domains */
-#define SM4250_VDDCX 0
-#define SM4250_VDDCX_AO 1
-#define SM4250_VDDCX_VFL 2
-#define SM4250_VDDMX 3
-#define SM4250_VDDMX_AO 4
-#define SM4250_VDDMX_VFL 5
-#define SM4250_VDD_LPI_CX 6
-#define SM4250_VDD_LPI_MX 7
-
/* SM6115 Power Domains */
#define SM6115_VDDCX 0
#define SM6115_VDDCX_AO 1
diff --git a/include/dt-bindings/power/r8a779g0-sysc.h b/include/dt-bindings/power/r8a779g0-sysc.h
index 7daa70f1814e..c7b139fb075f 100644
--- a/include/dt-bindings/power/r8a779g0-sysc.h
+++ b/include/dt-bindings/power/r8a779g0-sysc.h
@@ -38,6 +38,7 @@
#define R8A779G0_PD_A3VIP2 58
#define R8A779G0_PD_A3ISP0 60
#define R8A779G0_PD_A3ISP1 61
+#define R8A779G0_PD_A3DUL 62
/* Always-on power area */
#define R8A779G0_PD_ALWAYS_ON 64
diff --git a/include/dt-bindings/power/starfive,jh7110-pmu.h b/include/dt-bindings/power/starfive,jh7110-pmu.h
new file mode 100644
index 000000000000..132bfe401fc8
--- /dev/null
+++ b/include/dt-bindings/power/starfive,jh7110-pmu.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 StarFive Technology Co., Ltd.
+ * Author: Walker Chen <walker.chen@starfivetech.com>
+ */
+#ifndef __DT_BINDINGS_POWER_JH7110_POWER_H__
+#define __DT_BINDINGS_POWER_JH7110_POWER_H__
+
+#define JH7110_PD_SYSTOP 0
+#define JH7110_PD_CPU 1
+#define JH7110_PD_GPUA 2
+#define JH7110_PD_VDEC 3
+#define JH7110_PD_VOUT 4
+#define JH7110_PD_ISP 5
+#define JH7110_PD_VENC 6
+
+#endif
diff --git a/include/dt-bindings/reset/mt8195-resets.h b/include/dt-bindings/reset/mt8195-resets.h
index 24ab3631dcea..e61660438d61 100644
--- a/include/dt-bindings/reset/mt8195-resets.h
+++ b/include/dt-bindings/reset/mt8195-resets.h
@@ -35,4 +35,49 @@
#define MT8195_INFRA_RST2_PCIE_P1_SWRST 4
#define MT8195_INFRA_RST2_USBSIF_P1_SWRST 5
+/* VDOSYS1 */
+#define MT8195_VDOSYS1_SW0_RST_B_SMI_LARB2 0
+#define MT8195_VDOSYS1_SW0_RST_B_SMI_LARB3 1
+#define MT8195_VDOSYS1_SW0_RST_B_GALS 2
+#define MT8195_VDOSYS1_SW0_RST_B_FAKE_ENG0 3
+#define MT8195_VDOSYS1_SW0_RST_B_FAKE_ENG1 4
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA0 5
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA1 6
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA2 7
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA3 8
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE0 9
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE1 10
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE2 11
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE3 12
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE4 13
+#define MT8195_VDOSYS1_SW0_RST_B_VPP2_TO_VDO1_DL_ASYNC 14
+#define MT8195_VDOSYS1_SW0_RST_B_VPP3_TO_VDO1_DL_ASYNC 15
+#define MT8195_VDOSYS1_SW0_RST_B_DISP_MUTEX 16
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA4 17
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA5 18
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA6 19
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA7 20
+#define MT8195_VDOSYS1_SW0_RST_B_DP_INTF0 21
+#define MT8195_VDOSYS1_SW0_RST_B_DPI0 22
+#define MT8195_VDOSYS1_SW0_RST_B_DPI1 23
+#define MT8195_VDOSYS1_SW0_RST_B_DISP_MONITOR 24
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE0_DL_ASYNC 25
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE1_DL_ASYNC 26
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE2_DL_ASYNC 27
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE3_DL_ASYNC 28
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE4_DL_ASYNC 29
+#define MT8195_VDOSYS1_SW0_RST_B_VDO0_DSC_TO_VDO1_DL_ASYNC 30
+#define MT8195_VDOSYS1_SW0_RST_B_VDO0_MERGE_TO_VDO1_DL_ASYNC 31
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE0 32
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE0 33
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_BE 34
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE1 48
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE1 49
+#define MT8195_VDOSYS1_SW1_RST_B_DISP_MIXER 50
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE0_DL_ASYNC 51
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE1_DL_ASYNC 52
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE0_DL_ASYNC 53
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE1_DL_ASYNC 54
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_BE_DL_ASYNC 55
+
#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8195 */
diff --git a/include/linux/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
index f8335644a01a..1e449a5d7f5c 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -9,6 +9,8 @@
#include <linux/types.h>
#include <linux/cpumask.h>
+#include <dt-bindings/firmware/qcom,scm.h>
+
#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
@@ -51,10 +53,6 @@ enum qcom_scm_ice_cipher {
QCOM_SCM_ICE_CIPHER_AES_256_CBC = 4,
};
-#define QCOM_SCM_VMID_HLOS 0x3
-#define QCOM_SCM_VMID_MSS_MSA 0xF
-#define QCOM_SCM_VMID_WLAN 0x18
-#define QCOM_SCM_VMID_WLAN_CE 0x19
#define QCOM_SCM_PERM_READ 0x4
#define QCOM_SCM_PERM_WRITE 0x2
#define QCOM_SCM_PERM_EXEC 0x1
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index b09f443d3ab9..0e4c70987e6a 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -79,6 +79,10 @@
#define EVENT_ERROR_PSM_ERR1 (0x28108000U)
#define EVENT_ERROR_PSM_ERR2 (0x2810C000U)
+/* ZynqMP SD tap delay tuning */
+#define SD_ITAPDLY 0xFF180314
+#define SD_OTAPDLYSEL 0xFF180318
+
enum pm_api_cb_id {
PM_INIT_SUSPEND_CB = 30,
PM_ACKNOWLEDGE_CB = 31,
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 4f765bc788ff..0ce5746a4470 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -804,11 +804,6 @@ struct scmi_device {
#define to_scmi_dev(d) container_of(d, struct scmi_device, dev)
-struct scmi_device *
-scmi_device_create(struct device_node *np, struct device *parent, int protocol,
- const char *name);
-void scmi_device_destroy(struct scmi_device *scmi_dev);
-
struct scmi_device_id {
u8 protocol_id;
const char *name;
diff --git a/include/linux/soc/apple/rtkit.h b/include/linux/soc/apple/rtkit.h
index c9cabb679cd1..fc456f75c131 100644
--- a/include/linux/soc/apple/rtkit.h
+++ b/include/linux/soc/apple/rtkit.h
@@ -22,6 +22,7 @@
* @size: Size of the shared memory buffer.
* @iova: Device VA of shared memory buffer.
* @is_mapped: Shared memory buffer is managed by the co-processor.
+ * @private: Private data pointer for the parent driver.
*/
struct apple_rtkit_shmem {
@@ -30,6 +31,7 @@ struct apple_rtkit_shmem {
size_t size;
dma_addr_t iova;
bool is_mapped;
+ void *private;
};
/*
@@ -78,6 +80,25 @@ struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
const struct apple_rtkit_ops *ops);
/*
+ * Non-devm version of devm_apple_rtkit_init. Must be freed with
+ * apple_rtkit_free.
+ *
+ * @dev: Pointer to the device node this coprocessor is assocated with
+ * @cookie: opaque cookie passed to all functions defined in rtkit_ops
+ * @mbox_name: mailbox name used to communicate with the co-processor
+ * @mbox_idx: mailbox index to be used if mbox_name is NULL
+ * @ops: pointer to rtkit_ops to be used for this co-processor
+ */
+struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
+ const char *mbox_name, int mbox_idx,
+ const struct apple_rtkit_ops *ops);
+
+/*
+ * Free an instance of apple_rtkit.
+ */
+void apple_rtkit_free(struct apple_rtkit *rtk);
+
+/*
* Reinitialize internal structures. Must only be called with the co-processor
* is held in reset.
*/
@@ -105,6 +126,11 @@ int apple_rtkit_wake(struct apple_rtkit *rtk);
int apple_rtkit_shutdown(struct apple_rtkit *rtk);
/*
+ * Put the co-processor into idle mode
+ */
+int apple_rtkit_idle(struct apple_rtkit *rtk);
+
+/*
* Checks if RTKit is running and ready to handle messages.
*/
bool apple_rtkit_is_running(struct apple_rtkit *rtk);
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
index 50804ac748bd..07f67b3d8e97 100644
--- a/include/linux/soc/mediatek/infracfg.h
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -140,6 +140,127 @@
#define MT8192_TOP_AXI_PROT_EN_MM_2_MDP_2ND BIT(13)
#define MT8192_TOP_AXI_PROT_EN_VDNR_CAM BIT(21)
+#define MT8188_TOP_AXI_PROT_EN_SET 0x2A0
+#define MT8188_TOP_AXI_PROT_EN_CLR 0x2A4
+#define MT8188_TOP_AXI_PROT_EN_STA 0x228
+#define MT8188_TOP_AXI_PROT_EN_1_SET 0x2A8
+#define MT8188_TOP_AXI_PROT_EN_1_CLR 0x2AC
+#define MT8188_TOP_AXI_PROT_EN_1_STA 0x258
+#define MT8188_TOP_AXI_PROT_EN_2_SET 0x714
+#define MT8188_TOP_AXI_PROT_EN_2_CLR 0x718
+#define MT8188_TOP_AXI_PROT_EN_2_STA 0x724
+
+#define MT8188_TOP_AXI_PROT_EN_MM_SET 0x2D4
+#define MT8188_TOP_AXI_PROT_EN_MM_CLR 0x2D8
+#define MT8188_TOP_AXI_PROT_EN_MM_STA 0x2EC
+#define MT8188_TOP_AXI_PROT_EN_MM_2_SET 0xDCC
+#define MT8188_TOP_AXI_PROT_EN_MM_2_CLR 0xDD0
+#define MT8188_TOP_AXI_PROT_EN_MM_2_STA 0xDD8
+
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_SET 0xB84
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_CLR 0xB88
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_STA 0xB90
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET 0xBCC
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR 0xBD0
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA 0xBD8
+
+#define MT8188_TOP_AXI_PROT_EN_MFG1_STEP1 BIT(11)
+#define MT8188_TOP_AXI_PROT_EN_2_MFG1_STEP2 BIT(7)
+#define MT8188_TOP_AXI_PROT_EN_1_MFG1_STEP3 BIT(19)
+#define MT8188_TOP_AXI_PROT_EN_2_MFG1_STEP4 BIT(5)
+#define MT8188_TOP_AXI_PROT_EN_MFG1_STEP5 GENMASK(22, 21)
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MFG1_STEP6 BIT(17)
+
+#define MT8188_TOP_AXI_PROT_EN_PEXTP_MAC_P0_STEP1 BIT(2)
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_PEXTP_MAC_P0_STEP2 (BIT(8) | BIT(18) | BIT(30))
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_ETHER_STEP1 BIT(24)
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_HDMI_TX_STEP1 BIT(20)
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_AO_STEP1 GENMASK(31, 29)
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_AO_STEP2 (GENMASK(4, 3) | BIT(28))
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_INFRA_STEP1 (GENMASK(16, 14) | BIT(23) | \
+ BIT(27))
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_INFRA_STEP2 (GENMASK(19, 17) | GENMASK(26, 25))
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_STEP1 GENMASK(11, 8)
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_STEP2 GENMASK(22, 21)
+#define MT8188_TOP_AXI_PROT_EN_2_AUDIO_STEP1 BIT(20)
+#define MT8188_TOP_AXI_PROT_EN_2_AUDIO_STEP2 BIT(12)
+#define MT8188_TOP_AXI_PROT_EN_2_AUDIO_ASRC_STEP1 BIT(24)
+#define MT8188_TOP_AXI_PROT_EN_2_AUDIO_ASRC_STEP2 BIT(13)
+
+#define MT8188_TOP_AXI_PROT_EN_VPPSYS0_STEP1 BIT(10)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VPPSYS0_STEP2 GENMASK(9, 8)
+#define MT8188_TOP_AXI_PROT_EN_VPPSYS0_STEP3 BIT(23)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VPPSYS0_STEP4 (BIT(1) | BIT(4) | BIT(11))
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VPPSYS0_STEP5 (BIT(20))
+#define MT8188_TOP_AXI_PROT_EN_MM_VDOSYS0_STEP1 (GENMASK(18, 17) | GENMASK(21, 20))
+#define MT8188_TOP_AXI_PROT_EN_VDOSYS0_STEP2 BIT(6)
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VDOSYS0_STEP3 BIT(21)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDOSYS1_STEP1 GENMASK(31, 30)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDOSYS1_STEP2 BIT(22)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VDOSYS1_STEP3 BIT(10)
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_DP_TX_STEP1 BIT(23)
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_EDP_TX_STEP1 BIT(22)
+
+#define MT8188_TOP_AXI_PROT_EN_MM_VPPSYS1_STEP1 GENMASK(6, 5)
+#define MT8188_TOP_AXI_PROT_EN_MM_VPPSYS1_STEP2 BIT(23)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VPPSYS1_STEP3 BIT(18)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_WPE_STEP1 BIT(23)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_WPE_STEP2 BIT(21)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDEC0_STEP1 BIT(13)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VDEC0_STEP2 BIT(13)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDEC1_STEP1 BIT(14)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDEC1_STEP2 BIT(29)
+#define MT8188_TOP_AXI_PROT_EN_MM_VENC_STEP1 (BIT(9) | BIT(11))
+#define MT8188_TOP_AXI_PROT_EN_MM_VENC_STEP2 BIT(26)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VENC_STEP3 BIT(2)
+#define MT8188_TOP_AXI_PROT_EN_MM_IMG_VCORE_STEP1 (BIT(1) | BIT(3))
+#define MT8188_TOP_AXI_PROT_EN_MM_IMG_VCORE_STEP2 BIT(25)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_IMG_VCORE_STEP3 BIT(16)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_IMG_MAIN_STEP1 GENMASK(27, 26)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_IMG_MAIN_STEP2 GENMASK(25, 24)
+#define MT8188_TOP_AXI_PROT_EN_MM_CAM_VCORE_STEP1 (BIT(2) | BIT(4))
+#define MT8188_TOP_AXI_PROT_EN_2_CAM_VCORE_STEP2 BIT(0)
+#define MT8188_TOP_AXI_PROT_EN_1_CAM_VCORE_STEP3 BIT(22)
+#define MT8188_TOP_AXI_PROT_EN_MM_CAM_VCORE_STEP4 BIT(24)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_CAM_VCORE_STEP5 BIT(17)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_CAM_MAIN_STEP1 GENMASK(31, 30)
+#define MT8188_TOP_AXI_PROT_EN_2_CAM_MAIN_STEP2 BIT(2)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_CAM_MAIN_STEP3 GENMASK(29, 28)
+#define MT8188_TOP_AXI_PROT_EN_2_CAM_MAIN_STEP4 BIT(1)
+
+#define MT8188_SMI_COMMON_CLAMP_EN_STA 0x3C0
+#define MT8188_SMI_COMMON_CLAMP_EN_SET 0x3C4
+#define MT8188_SMI_COMMON_CLAMP_EN_CLR 0x3C8
+
+#define MT8188_SMI_COMMON_SMI_CLAMP_DIP_TO_VDO0 GENMASK(3, 1)
+#define MT8188_SMI_COMMON_SMI_CLAMP_DIP_TO_VPP1 GENMASK(2, 1)
+#define MT8188_SMI_COMMON_SMI_CLAMP_IPE_TO_VPP1 BIT(0)
+
+#define MT8188_SMI_COMMON_SMI_CLAMP_CAM_SUBA_TO_VPP0 GENMASK(3, 2)
+#define MT8188_SMI_COMMON_SMI_CLAMP_CAM_SUBB_TO_VDO0 GENMASK(3, 2)
+
+#define MT8188_SMI_LARB10_RESET_ADDR 0xC
+#define MT8188_SMI_LARB11A_RESET_ADDR 0xC
+#define MT8188_SMI_LARB11C_RESET_ADDR 0xC
+#define MT8188_SMI_LARB12_RESET_ADDR 0xC
+#define MT8188_SMI_LARB11B_RESET_ADDR 0xC
+#define MT8188_SMI_LARB15_RESET_ADDR 0xC
+#define MT8188_SMI_LARB16B_RESET_ADDR 0xA0
+#define MT8188_SMI_LARB17B_RESET_ADDR 0xA0
+#define MT8188_SMI_LARB16A_RESET_ADDR 0xA0
+#define MT8188_SMI_LARB17A_RESET_ADDR 0xA0
+
+#define MT8188_SMI_LARB10_RESET BIT(0)
+#define MT8188_SMI_LARB11A_RESET BIT(0)
+#define MT8188_SMI_LARB11C_RESET BIT(0)
+#define MT8188_SMI_LARB12_RESET BIT(8)
+#define MT8188_SMI_LARB11B_RESET BIT(0)
+#define MT8188_SMI_LARB15_RESET BIT(0)
+#define MT8188_SMI_LARB16B_RESET BIT(4)
+#define MT8188_SMI_LARB17B_RESET BIT(4)
+#define MT8188_SMI_LARB16A_RESET BIT(4)
+#define MT8188_SMI_LARB17A_RESET BIT(4)
+
#define MT8186_TOP_AXI_PROT_EN_SET (0x2A0)
#define MT8186_TOP_AXI_PROT_EN_CLR (0x2A4)
#define MT8186_TOP_AXI_PROT_EN_STA (0x228)
diff --git a/include/linux/soc/mediatek/mtk-mmsys.h b/include/linux/soc/mediatek/mtk-mmsys.h
index b85f66db33e1..dc2963a0a0f7 100644
--- a/include/linux/soc/mediatek/mtk-mmsys.h
+++ b/include/linux/soc/mediatek/mtk-mmsys.h
@@ -6,6 +6,10 @@
#ifndef __MTK_MMSYS_H
#define __MTK_MMSYS_H
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/mtk-cmdq-mailbox.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
enum mtk_ddp_comp_id;
struct device;
@@ -36,7 +40,16 @@ enum mtk_ddp_comp_id {
DDP_COMPONENT_DSI1,
DDP_COMPONENT_DSI2,
DDP_COMPONENT_DSI3,
+ DDP_COMPONENT_ETHDR_MIXER,
DDP_COMPONENT_GAMMA,
+ DDP_COMPONENT_MDP_RDMA0,
+ DDP_COMPONENT_MDP_RDMA1,
+ DDP_COMPONENT_MDP_RDMA2,
+ DDP_COMPONENT_MDP_RDMA3,
+ DDP_COMPONENT_MDP_RDMA4,
+ DDP_COMPONENT_MDP_RDMA5,
+ DDP_COMPONENT_MDP_RDMA6,
+ DDP_COMPONENT_MDP_RDMA7,
DDP_COMPONENT_MERGE0,
DDP_COMPONENT_MERGE1,
DDP_COMPONENT_MERGE2,
@@ -74,4 +87,16 @@ void mtk_mmsys_ddp_disconnect(struct device *dev,
void mtk_mmsys_ddp_dpi_fmt_config(struct device *dev, u32 val);
+void mtk_mmsys_merge_async_config(struct device *dev, int idx, int width,
+ int height, struct cmdq_pkt *cmdq_pkt);
+
+void mtk_mmsys_hdr_config(struct device *dev, int be_width, int be_height,
+ struct cmdq_pkt *cmdq_pkt);
+
+void mtk_mmsys_mixer_in_config(struct device *dev, int idx, bool alpha_sel, u16 alpha,
+ u8 mode, u32 biwidth, struct cmdq_pkt *cmdq_pkt);
+
+void mtk_mmsys_mixer_in_channel_swap(struct device *dev, int idx, bool channel_swap,
+ struct cmdq_pkt *cmdq_pkt);
+
#endif /* __MTK_MMSYS_H */
diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h
index 23c5b30f3511..be98aebcb3e1 100644
--- a/include/linux/soc/qcom/apr.h
+++ b/include/linux/soc/qcom/apr.h
@@ -153,7 +153,7 @@ typedef struct apr_device gpr_device_t;
struct apr_driver {
int (*probe)(struct apr_device *sl);
- int (*remove)(struct apr_device *sl);
+ void (*remove)(struct apr_device *sl);
int (*callback)(struct apr_device *a,
struct apr_resp_pkt *d);
int (*gpr_callback)(struct gpr_resp_pkt *d, void *data, int op);
diff --git a/include/linux/soc/qcom/pmic_glink.h b/include/linux/soc/qcom/pmic_glink.h
new file mode 100644
index 000000000000..fd124aa18c81
--- /dev/null
+++ b/include/linux/soc/qcom/pmic_glink.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Linaro Ltd
+ */
+#ifndef __SOC_QCOM_PMIC_GLINK_H__
+#define __SOC_QCOM_PMIC_GLINK_H__
+
+struct pmic_glink;
+struct pmic_glink_client;
+
+#define PMIC_GLINK_OWNER_BATTMGR 32778
+#define PMIC_GLINK_OWNER_USBC 32779
+#define PMIC_GLINK_OWNER_USBC_PAN 32780
+
+#define PMIC_GLINK_REQ_RESP 1
+#define PMIC_GLINK_NOTIFY 2
+
+struct pmic_glink_hdr {
+ __le32 owner;
+ __le32 type;
+ __le32 opcode;
+};
+
+int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len);
+
+struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
+ unsigned int id,
+ void (*cb)(const void *, size_t, void *),
+ void (*pdr)(void *, int),
+ void *priv);
+
+#endif
diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h
index 862eff613dc7..b8fa30fd6b50 100644
--- a/include/memory/renesas-rpc-if.h
+++ b/include/memory/renesas-rpc-if.h
@@ -65,41 +65,15 @@ enum rpcif_type {
struct rpcif {
struct device *dev;
- void __iomem *base;
void __iomem *dirmap;
- struct regmap *regmap;
- struct reset_control *rstc;
size_t size;
- enum rpcif_type type;
- enum rpcif_data_dir dir;
- u8 bus_size;
- u8 xfer_size;
- void *buffer;
- u32 xferlen;
- u32 smcr;
- u32 smadr;
- u32 command; /* DRCMR or SMCMR */
- u32 option; /* DROPR or SMOPR */
- u32 enable; /* DRENR or SMENR */
- u32 dummy; /* DRDMCR or SMDMCR */
- u32 ddr; /* DRDRENR or SMDRENR */
};
int rpcif_sw_init(struct rpcif *rpc, struct device *dev);
-int rpcif_hw_init(struct rpcif *rpc, bool hyperflash);
-void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs,
+int rpcif_hw_init(struct device *dev, bool hyperflash);
+void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs,
size_t *len);
-int rpcif_manual_xfer(struct rpcif *rpc);
-ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf);
-
-static inline void rpcif_enable_rpm(struct rpcif *rpc)
-{
- pm_runtime_enable(rpc->dev);
-}
-
-static inline void rpcif_disable_rpm(struct rpcif *rpc)
-{
- pm_runtime_disable(rpc->dev);
-}
+int rpcif_manual_xfer(struct device *dev);
+ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf);
#endif // __RENESAS_RPC_IF_H
diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h
index f160d68f961d..422c1ad9484d 100644
--- a/include/trace/events/scmi.h
+++ b/include/trace/events/scmi.h
@@ -139,11 +139,15 @@ TRACE_EVENT(scmi_rx_done,
);
TRACE_EVENT(scmi_msg_dump,
- TP_PROTO(u8 protocol_id, u8 msg_id, unsigned char *tag, u16 seq,
- int status, void *buf, size_t len),
- TP_ARGS(protocol_id, msg_id, tag, seq, status, buf, len),
+ TP_PROTO(int id, u8 channel_id, u8 protocol_id, u8 msg_id,
+ unsigned char *tag, u16 seq, int status,
+ void *buf, size_t len),
+ TP_ARGS(id, channel_id, protocol_id, msg_id, tag, seq, status,
+ buf, len),
TP_STRUCT__entry(
+ __field(int, id)
+ __field(u8, channel_id)
__field(u8, protocol_id)
__field(u8, msg_id)
__array(char, tag, 5)
@@ -154,6 +158,8 @@ TRACE_EVENT(scmi_msg_dump,
),
TP_fast_assign(
+ __entry->id = id;
+ __entry->channel_id = channel_id;
__entry->protocol_id = protocol_id;
__entry->msg_id = msg_id;
strscpy(__entry->tag, tag, 5);
@@ -163,9 +169,9 @@ TRACE_EVENT(scmi_msg_dump,
memcpy(__get_dynamic_array(cmd), buf, __entry->len);
),
- TP_printk("pt=%02X t=%s msg_id=%02X seq=%04X s=%d pyld=%s",
- __entry->protocol_id, __entry->tag, __entry->msg_id,
- __entry->seq, __entry->status,
+ TP_printk("id=%d ch=%02X pt=%02X t=%s msg_id=%02X seq=%04X s=%d pyld=%s",
+ __entry->id, __entry->channel_id, __entry->protocol_id,
+ __entry->tag, __entry->msg_id, __entry->seq, __entry->status,
__print_hex_str(__get_dynamic_array(cmd), __entry->len))
);
#endif /* _TRACE_SCMI_H */
diff --git a/sound/soc/qcom/qdsp6/q6core.c b/sound/soc/qcom/qdsp6/q6core.c
index 5358fefd4210..49cfb32cd209 100644
--- a/sound/soc/qcom/qdsp6/q6core.c
+++ b/sound/soc/qcom/qdsp6/q6core.c
@@ -339,7 +339,7 @@ static int q6core_probe(struct apr_device *adev)
return 0;
}
-static int q6core_exit(struct apr_device *adev)
+static void q6core_exit(struct apr_device *adev)
{
struct q6core *core = dev_get_drvdata(&adev->dev);
@@ -350,8 +350,6 @@ static int q6core_exit(struct apr_device *adev)
g_core = NULL;
kfree(core);
-
- return 0;
}
#ifdef CONFIG_OF