summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-11-03 17:00:52 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-03 17:00:52 -0700
commitd461e96cd22b5aeb1df448536b92e8d8e88c4a05 (patch)
treeffd7a4ddc858c79a9a30952bedf14652ac10a2e4
parentae45d84fc36d01dcb1007f4298871eec37907904 (diff)
parent6a03568932b2711c91e1572f08867690b52a18df (diff)
Merge tag 'drivers-5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
Pull ARM SoC driver updates from Arnd Bergmann: "These are all the driver updates for SoC specific drivers. There are a couple of subsystems with individual maintainers picking up their patches here: - The reset controller subsystem add support for a few new SoC variants to existing drivers, along with other minor improvements - The OP-TEE subsystem gets a driver for the ARM FF-A transport - The memory controller subsystem has improvements for Tegra, Mediatek, Renesas, Freescale and Broadcom specific drivers. - The tegra cpuidle driver changes get merged through this tree this time. There are only minor changes, but they depend on other tegra driver updates here. - The ep93xx platform finally moves to using the drivers/clk/ subsystem, moving the code out of arch/arm in the process. This depends on a small sound driver change that is included here as well. - There are some minor updates for Qualcomm and Tegra specific firmware drivers. The other driver updates are mainly for drivers/soc, which contains a mixture of vendor specific drivers that don't really fit elsewhere: - Mediatek drivers gain more support for MT8192, with new support for hw-mutex and mmsys routing, plus support for reset lines in the mmsys driver. - Qualcomm gains a new "sleep stats" driver, and support for the "Generic Packet Router" in the APR driver. - There is a new user interface for routing the UARTS on ASpeed BMCs, something that apparently nobody else has needed so far. - More drivers can now be built as loadable modules, in particular for Broadcom and Samsung platforms. - Lots of improvements to the TI sysc driver for better suspend/resume support" Finally, there are lots of minor cleanups and new device IDs for amlogic, renesas, tegra, qualcomm, mediateka, samsung, imx, layerscape, allwinner, broadcom, and omap" * tag 'drivers-5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (179 commits) optee: Fix spelling mistake "reclain" -> "reclaim" Revert "firmware: qcom: scm: Add support for MC boot address API" qcom: spm: allow compile-testing firmware: arm_ffa: Remove unused 'compat_version' variable soc: samsung: exynos-chipid: add exynosautov9 SoC support firmware: qcom: scm: Don't break compile test on non-ARM platforms soc: qcom: smp2p: Add of_node_put() before goto soc: qcom: apr: Add of_node_put() before return soc: qcom: qcom_stats: Fix client votes offset soc: qcom: rpmhpd: fix sm8350_mxc's peer domain dt-bindings: arm: cpus: Document qcom,msm8916-smp enable-method ARM: qcom: Add qcom,msm8916-smp enable-method identical to MSM8226 firmware: qcom: scm: Add support for MC boot address API soc: qcom: spm: Add 8916 SPM register data dt-bindings: soc: qcom: spm: Document qcom,msm8916-saw2-v3.0-cpu soc: qcom: socinfo: Add PM8150C and SMB2351 models firmware: qcom_scm: Fix error retval in __qcom_scm_is_call_available() soc: aspeed: Add UART routing support soc: fsl: dpio: rename the enqueue descriptor variable soc: fsl: dpio: use an explicit NULL instead of 0 ...
-rw-r--r--Documentation/ABI/testing/sysfs-driver-aspeed-uart-routing27
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.yaml6
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml5
-rw-r--r--Documentation/devicetree/bindings/ddr/lpddr2.txt102
-rw-r--r--Documentation/devicetree/bindings/display/msm/dp-controller.yaml1
-rw-r--r--Documentation/devicetree/bindings/firmware/qcom,scm.txt4
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2.yaml223
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ddr/lpddr2-timings.txt (renamed from Documentation/devicetree/bindings/ddr/lpddr2-timings.txt)0
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ddr/lpddr3-timings.txt (renamed from Documentation/devicetree/bindings/ddr/lpddr3-timings.txt)0
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ddr/lpddr3.txt (renamed from Documentation/devicetree/bindings/ddr/lpddr3.txt)5
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml34
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml3
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-emc.yaml23
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml1
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/samsung,exynos5422-dmc.yaml3
-rw-r--r--Documentation/devicetree/bindings/power/qcom,rpmpd.yaml2
-rw-r--r--Documentation/devicetree/bindings/reset/microchip,rst.yaml4
-rw-r--r--Documentation/devicetree/bindings/reset/socionext,uniphier-glue-reset.yaml1
-rw-r--r--Documentation/devicetree/bindings/reset/socionext,uniphier-reset.yaml3
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml12
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml3
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smem.yaml34
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,spm.yaml81
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom-stats.yaml47
-rw-r--r--Documentation/devicetree/bindings/sram/sram.yaml5
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml2
-rw-r--r--MAINTAINERS8
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/mach-ep93xx/clock.c975
-rw-r--r--arch/arm/mach-ep93xx/core.c2
-rw-r--r--arch/arm/mach-ep93xx/soc.h42
-rw-r--r--arch/arm/mach-exynos/Kconfig2
-rw-r--r--arch/arm/mach-qcom/platsmp.c72
-rw-r--r--arch/arm/mach-s5pv210/Kconfig1
-rw-r--r--arch/arm64/Kconfig.platforms2
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/bus/brcmstb_gisb.c7
-rw-r--r--drivers/bus/sun50i-de2.c7
-rw-r--r--drivers/bus/ti-sysc.c276
-rw-r--r--drivers/cpuidle/Kconfig.arm3
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c318
-rw-r--r--drivers/cpuidle/cpuidle-tegra.c3
-rw-r--r--drivers/firmware/arm_ffa/driver.c53
-rw-r--r--drivers/firmware/qcom_scm.c6
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c26
-rw-r--r--drivers/firmware/tegra/bpmp-tegra210.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c5
-rw-r--r--drivers/memory/Kconfig5
-rw-r--r--drivers/memory/fsl_ifc.c13
-rw-r--r--drivers/memory/jedec_ddr.h47
-rw-r--r--drivers/memory/jedec_ddr_data.c41
-rw-r--r--drivers/memory/mtk-smi.c596
-rw-r--r--drivers/memory/of_memory.c87
-rw-r--r--drivers/memory/of_memory.h9
-rw-r--r--drivers/memory/renesas-rpc-if.c159
-rw-r--r--drivers/memory/samsung/Kconfig13
-rw-r--r--drivers/memory/tegra/Kconfig1
-rw-r--r--drivers/memory/tegra/mc.c25
-rw-r--r--drivers/memory/tegra/tegra186-emc.c5
-rw-r--r--drivers/memory/tegra/tegra20-emc.c200
-rw-r--r--drivers/memory/tegra/tegra210-emc-cc-r21021.c2
-rw-r--r--drivers/memory/tegra/tegra210-emc-core.c6
-rw-r--r--drivers/memory/tegra/tegra30-emc.c4
-rw-r--r--drivers/of/platform.c1
-rw-r--r--drivers/reset/Kconfig4
-rw-r--r--drivers/reset/reset-microchip-sparx5.c40
-rw-r--r--drivers/reset/reset-uniphier-glue.c4
-rw-r--r--drivers/reset/reset-uniphier.c27
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/soc/amlogic/meson-canvas.c4
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c4
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c1
-rw-r--r--drivers/soc/aspeed/Kconfig10
-rw-r--r--drivers/soc/aspeed/Makefile9
-rw-r--r--drivers/soc/aspeed/aspeed-uart-routing.c603
-rw-r--r--drivers/soc/bcm/bcm63xx/bcm-pmb.c4
-rw-r--r--drivers/soc/bcm/bcm63xx/bcm63xx-power.c4
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c2
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c2
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c8
-rw-r--r--drivers/soc/fsl/guts.c4
-rw-r--r--drivers/soc/fsl/rcpm.c7
-rw-r--r--drivers/soc/imx/Kconfig1
-rw-r--r--drivers/soc/imx/Makefile1
-rw-r--r--drivers/soc/imx/gpcv2.c134
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c523
-rw-r--r--drivers/soc/mediatek/mt8192-mmsys.h76
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c79
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.h2
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c35
-rw-r--r--drivers/soc/qcom/Kconfig19
-rw-r--r--drivers/soc/qcom/Makefile2
-rw-r--r--drivers/soc/qcom/apr.c2
-rw-r--r--drivers/soc/qcom/cpr.c4
-rw-r--r--drivers/soc/qcom/llcc-qcom.c18
-rw-r--r--drivers/soc/qcom/ocmem.c4
-rw-r--r--drivers/soc/qcom/pdr_interface.c12
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c4
-rw-r--r--drivers/soc/qcom/qcom_aoss.c165
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c4
-rw-r--r--drivers/soc/qcom/qcom_stats.c277
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c4
-rw-r--r--drivers/soc/qcom/rpmhpd.c36
-rw-r--r--drivers/soc/qcom/rpmpd.c24
-rw-r--r--drivers/soc/qcom/smd-rpm.c2
-rw-r--r--drivers/soc/qcom/smem.c57
-rw-r--r--drivers/soc/qcom/smp2p.c154
-rw-r--r--drivers/soc/qcom/socinfo.c18
-rw-r--r--drivers/soc/qcom/spm.c279
-rw-r--r--drivers/soc/renesas/Kconfig7
-rw-r--r--drivers/soc/renesas/renesas-soc.c7
-rw-r--r--drivers/soc/samsung/Kconfig5
-rw-r--r--drivers/soc/samsung/Makefile3
-rw-r--r--drivers/soc/samsung/exynos-chipid.c94
-rw-r--r--drivers/soc/samsung/exynos5422-asv.c1
-rw-r--r--drivers/soc/samsung/pm_domains.c1
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c4
-rw-r--r--drivers/soc/tegra/Makefile1
-rw-r--r--drivers/soc/tegra/ari-tegra186.c80
-rw-r--r--drivers/soc/tegra/pmc.c28
-rw-r--r--drivers/tee/optee/Makefile5
-rw-r--r--drivers/tee/optee/call.c445
-rw-r--r--drivers/tee/optee/core.c719
-rw-r--r--drivers/tee/optee/ffa_abi.c911
-rw-r--r--drivers/tee/optee/optee_ffa.h153
-rw-r--r--drivers/tee/optee/optee_msg.h27
-rw-r--r--drivers/tee/optee/optee_private.h157
-rw-r--r--drivers/tee/optee/rpc.c237
-rw-r--r--drivers/tee/optee/shm_pool.c101
-rw-r--r--drivers/tee/optee/shm_pool.h14
-rw-r--r--drivers/tee/optee/smc_abi.c1362
-rw-r--r--include/dt-bindings/power/qcom-aoss-qmp.h14
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h17
-rw-r--r--include/linux/arm_ffa.h2
-rw-r--r--include/linux/clk/tegra.h24
-rw-r--r--include/linux/platform_data/ti-sysc.h3
-rw-r--r--include/linux/soc/mediatek/mtk-mmsys.h3
-rw-r--r--include/linux/soc/qcom/qcom_aoss.h38
-rw-r--r--include/linux/soc/samsung/exynos-chipid.h6
-rw-r--r--include/linux/tee_drv.h7
-rw-r--r--include/memory/renesas-rpc-if.h1
-rw-r--r--include/soc/qcom/spm.h43
-rw-r--r--include/soc/tegra/fuse.h31
-rw-r--r--include/soc/tegra/irq.h9
-rw-r--r--include/soc/tegra/pm.h2
-rw-r--r--sound/soc/cirrus/ep93xx-i2s.c12
146 files changed, 7959 insertions, 2963 deletions
diff --git a/Documentation/ABI/testing/sysfs-driver-aspeed-uart-routing b/Documentation/ABI/testing/sysfs-driver-aspeed-uart-routing
new file mode 100644
index 000000000000..b363827da437
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-aspeed-uart-routing
@@ -0,0 +1,27 @@
+What: /sys/bus/platform/drivers/aspeed-uart-routing/*/uart*
+Date: September 2021
+Contact: Oskar Senft <osk@google.com>
+ Chia-Wei Wang <chiawei_wang@aspeedtech.com>
+Description: Selects the RX source of the UARTx device.
+
+ When read, each file shows the list of available options with currently
+ selected option marked by brackets "[]". The list of available options
+ depends on the selected file.
+
+ e.g.
+ cat /sys/bus/platform/drivers/aspeed-uart-routing/*.uart_routing/uart1
+ [io1] io2 io3 io4 uart2 uart3 uart4 io6
+
+ In this case, UART1 gets its input from IO1 (physical serial port 1).
+
+Users: OpenBMC. Proposed changes should be mailed to
+ openbmc@lists.ozlabs.org
+
+What: /sys/bus/platform/drivers/aspeed-uart-routing/*/io*
+Date: September 2021
+Contact: Oskar Senft <osk@google.com>
+ Chia-Wei Wang <chiawei_wang@aspeedtech.com>
+Description: Selects the RX source of IOx serial port. The current selection
+ will be marked by brackets "[]".
+Users: OpenBMC. Proposed changes should be mailed to
+ openbmc@lists.ozlabs.org
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml
index 49bbaf1a3ea6..f2ab6423b4af 100644
--- a/Documentation/devicetree/bindings/arm/cpus.yaml
+++ b/Documentation/devicetree/bindings/arm/cpus.yaml
@@ -211,6 +211,9 @@ properties:
- qcom,gcc-msm8660
- qcom,kpss-acc-v1
- qcom,kpss-acc-v2
+ - qcom,msm8226-smp
+ # Only valid on ARM 32-bit, see above for ARM v8 64-bit
+ - qcom,msm8916-smp
- renesas,apmu
- renesas,r9a06g032-smp
- rockchip,rk3036-smp
@@ -297,7 +300,8 @@ properties:
Specifies the ACC* node associated with this CPU.
Required for systems that have an "enable-method" property
- value of "qcom,kpss-acc-v1" or "qcom,kpss-acc-v2"
+ value of "qcom,kpss-acc-v1", "qcom,kpss-acc-v2", "qcom,msm8226-smp" or
+ "qcom,msm8916-smp".
* arm/msm/qcom,kpss-acc.txt
diff --git a/Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml b/Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml
index f99c0c6df21b..bfc352a2fdd6 100644
--- a/Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml
+++ b/Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml
@@ -11,8 +11,9 @@ maintainers:
properties:
compatible:
- items:
- - const: samsung,exynos4210-chipid
+ enum:
+ - samsung,exynos4210-chipid
+ - samsung,exynos850-chipid
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/ddr/lpddr2.txt b/Documentation/devicetree/bindings/ddr/lpddr2.txt
deleted file mode 100644
index ddd40121e6f6..000000000000
--- a/Documentation/devicetree/bindings/ddr/lpddr2.txt
+++ /dev/null
@@ -1,102 +0,0 @@
-* LPDDR2 SDRAM memories compliant to JEDEC JESD209-2
-
-Required properties:
-- compatible : Should be one of - "jedec,lpddr2-nvm", "jedec,lpddr2-s2",
- "jedec,lpddr2-s4"
-
- "ti,jedec-lpddr2-s2" should be listed if the memory part is LPDDR2-S2 type
-
- "ti,jedec-lpddr2-s4" should be listed if the memory part is LPDDR2-S4 type
-
- "ti,jedec-lpddr2-nvm" should be listed if the memory part is LPDDR2-NVM type
-
-- density : <u32> representing density in Mb (Mega bits)
-
-- io-width : <u32> representing bus width. Possible values are 8, 16, and 32
-
-Optional properties:
-
-The following optional properties represent the minimum value of some AC
-timing parameters of the DDR device in terms of number of clock cycles.
-These values shall be obtained from the device data-sheet.
-- tRRD-min-tck
-- tWTR-min-tck
-- tXP-min-tck
-- tRTP-min-tck
-- tCKE-min-tck
-- tRPab-min-tck
-- tRCD-min-tck
-- tWR-min-tck
-- tRASmin-min-tck
-- tCKESR-min-tck
-- tFAW-min-tck
-
-Child nodes:
-- The lpddr2 node may have one or more child nodes of type "lpddr2-timings".
- "lpddr2-timings" provides AC timing parameters of the device for
- a given speed-bin. The user may provide the timings for as many
- speed-bins as is required. Please see Documentation/devicetree/
- bindings/ddr/lpddr2-timings.txt for more information on "lpddr2-timings"
-
-Example:
-
-elpida_ECB240ABACN : lpddr2 {
- compatible = "Elpida,ECB240ABACN","jedec,lpddr2-s4";
- density = <2048>;
- io-width = <32>;
-
- tRPab-min-tck = <3>;
- tRCD-min-tck = <3>;
- tWR-min-tck = <3>;
- tRASmin-min-tck = <3>;
- tRRD-min-tck = <2>;
- tWTR-min-tck = <2>;
- tXP-min-tck = <2>;
- tRTP-min-tck = <2>;
- tCKE-min-tck = <3>;
- tCKESR-min-tck = <3>;
- tFAW-min-tck = <8>;
-
- timings_elpida_ECB240ABACN_400mhz: lpddr2-timings@0 {
- compatible = "jedec,lpddr2-timings";
- min-freq = <10000000>;
- max-freq = <400000000>;
- tRPab = <21000>;
- tRCD = <18000>;
- tWR = <15000>;
- tRAS-min = <42000>;
- tRRD = <10000>;
- tWTR = <7500>;
- tXP = <7500>;
- tRTP = <7500>;
- tCKESR = <15000>;
- tDQSCK-max = <5500>;
- tFAW = <50000>;
- tZQCS = <90000>;
- tZQCL = <360000>;
- tZQinit = <1000000>;
- tRAS-max-ns = <70000>;
- };
-
- timings_elpida_ECB240ABACN_200mhz: lpddr2-timings@1 {
- compatible = "jedec,lpddr2-timings";
- min-freq = <10000000>;
- max-freq = <200000000>;
- tRPab = <21000>;
- tRCD = <18000>;
- tWR = <15000>;
- tRAS-min = <42000>;
- tRRD = <10000>;
- tWTR = <10000>;
- tXP = <7500>;
- tRTP = <7500>;
- tCKESR = <15000>;
- tDQSCK-max = <5500>;
- tFAW = <50000>;
- tZQCS = <90000>;
- tZQCL = <360000>;
- tZQinit = <1000000>;
- tRAS-max-ns = <70000>;
- };
-
-}
diff --git a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
index b36d74c1da7c..63e585f48789 100644
--- a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
@@ -102,7 +102,6 @@ examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,dispcc-sc7180.h>
- #include <dt-bindings/power/qcom-aoss-qmp.h>
#include <dt-bindings/power/qcom-rpmpd.h>
displayport-controller@ae90000 {
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.txt b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
index a7333ad938d2..d7e3cda8924e 100644
--- a/Documentation/devicetree/bindings/firmware/qcom,scm.txt
+++ b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
@@ -13,8 +13,10 @@ Required properties:
* "qcom,scm-ipq806x"
* "qcom,scm-ipq8074"
* "qcom,scm-mdm9607"
+ * "qcom,scm-msm8226"
* "qcom,scm-msm8660"
* "qcom,scm-msm8916"
+ * "qcom,scm-msm8953"
* "qcom,scm-msm8960"
* "qcom,scm-msm8974"
* "qcom,scm-msm8994"
@@ -33,7 +35,7 @@ Required properties:
* core clock required for "qcom,scm-apq8064", "qcom,scm-msm8660" and
"qcom,scm-msm8960"
* core, iface and bus clocks required for "qcom,scm-apq8084",
- "qcom,scm-msm8916" and "qcom,scm-msm8974"
+ "qcom,scm-msm8916", "qcom,scm-msm8953" and "qcom,scm-msm8974"
- clock-names: Must contain "core" for the core clock, "iface" for the interface
clock and "bus" for the bus clock per the requirements of the compatible.
- qcom,dload-mode: phandle to the TCSR hardware block and offset of the
diff --git a/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2.yaml b/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2.yaml
new file mode 100644
index 000000000000..25ed0266f6dd
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2.yaml
@@ -0,0 +1,223 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/ddr/jedec,lpddr2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: LPDDR2 SDRAM compliant to JEDEC JESD209-2
+
+maintainers:
+ - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - elpida,ECB240ABACN
+ - elpida,B8132B2PB-6D-F
+ - enum:
+ - jedec,lpddr2-s4
+ - items:
+ - enum:
+ - jedec,lpddr2-s2
+ - items:
+ - enum:
+ - jedec,lpddr2-nvm
+
+ revision-id1:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 255
+ description: |
+ Revision 1 value of SDRAM chip. Obtained from device datasheet.
+
+ revision-id2:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 255
+ description: |
+ Revision 2 value of SDRAM chip. Obtained from device datasheet.
+
+ density:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Density in megabits of SDRAM chip. Obtained from device datasheet.
+ enum:
+ - 64
+ - 128
+ - 256
+ - 512
+ - 1024
+ - 2048
+ - 4096
+ - 8192
+ - 16384
+ - 32768
+
+ io-width:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ IO bus width in bits of SDRAM chip. Obtained from device datasheet.
+ enum:
+ - 32
+ - 16
+ - 8
+
+ tRRD-min-tck:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 16
+ description: |
+ Active bank a to active bank b in terms of number of clock cycles.
+ Obtained from device datasheet.
+
+ tWTR-min-tck:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 16
+ description: |
+ Internal WRITE-to-READ command delay in terms of number of clock cycles.
+ Obtained from device datasheet.
+
+ tXP-min-tck:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 16
+ description: |
+ Exit power-down to next valid command delay in terms of number of clock
+ cycles. Obtained from device datasheet.
+
+ tRTP-min-tck:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 16
+ description: |
+ Internal READ to PRECHARGE command delay in terms of number of clock
+ cycles. Obtained from device datasheet.
+
+ tCKE-min-tck:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 16
+ description: |
+ CKE minimum pulse width (HIGH and LOW pulse width) in terms of number
+ of clock cycles. Obtained from device datasheet.
+
+ tRPab-min-tck:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 16
+ description: |
+ Row precharge time (all banks) in terms of number of clock cycles.
+ Obtained from device datasheet.
+
+ tRCD-min-tck:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 16
+ description: |
+ RAS-to-CAS delay in terms of number of clock cycles. Obtained from
+ device datasheet.
+
+ tWR-min-tck:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 16
+ description: |
+ WRITE recovery time in terms of number of clock cycles. Obtained from
+ device datasheet.
+
+ tRASmin-min-tck:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 16
+ description: |
+ Row active time in terms of number of clock cycles. Obtained from device
+ datasheet.
+
+ tCKESR-min-tck:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 16
+ description: |
+ CKE minimum pulse width during SELF REFRESH (low pulse width during
+ SELF REFRESH) in terms of number of clock cycles. Obtained from device
+ datasheet.
+
+ tFAW-min-tck:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 16
+ description: |
+ Four-bank activate window in terms of number of clock cycles. Obtained
+ from device datasheet.
+
+patternProperties:
+ "^lpddr2-timings":
+ type: object
+ description: |
+ The lpddr2 node may have one or more child nodes of type "lpddr2-timings".
+ "lpddr2-timings" provides AC timing parameters of the device for
+ a given speed-bin. The user may provide the timings for as many
+ speed-bins as is required. Please see Documentation/devicetree/
+ bindings/memory-controllers/ddr/lpddr2-timings.txt for more information
+ on "lpddr2-timings".
+
+required:
+ - compatible
+ - density
+ - io-width
+
+additionalProperties: false
+
+examples:
+ - |
+ elpida_ECB240ABACN: lpddr2 {
+ compatible = "elpida,ECB240ABACN", "jedec,lpddr2-s4";
+ density = <2048>;
+ io-width = <32>;
+ revision-id1 = <1>;
+ revision-id2 = <0>;
+
+ tRPab-min-tck = <3>;
+ tRCD-min-tck = <3>;
+ tWR-min-tck = <3>;
+ tRASmin-min-tck = <3>;
+ tRRD-min-tck = <2>;
+ tWTR-min-tck = <2>;
+ tXP-min-tck = <2>;
+ tRTP-min-tck = <2>;
+ tCKE-min-tck = <3>;
+ tCKESR-min-tck = <3>;
+ tFAW-min-tck = <8>;
+
+ timings_elpida_ECB240ABACN_400mhz: lpddr2-timings0 {
+ compatible = "jedec,lpddr2-timings";
+ min-freq = <10000000>;
+ max-freq = <400000000>;
+ tRPab = <21000>;
+ tRCD = <18000>;
+ tWR = <15000>;
+ tRAS-min = <42000>;
+ tRRD = <10000>;
+ tWTR = <7500>;
+ tXP = <7500>;
+ tRTP = <7500>;
+ tCKESR = <15000>;
+ tDQSCK-max = <5500>;
+ tFAW = <50000>;
+ tZQCS = <90000>;
+ tZQCL = <360000>;
+ tZQinit = <1000000>;
+ tRAS-max-ns = <70000>;
+ };
+
+ timings_elpida_ECB240ABACN_200mhz: lpddr2-timings1 {
+ compatible = "jedec,lpddr2-timings";
+ min-freq = <10000000>;
+ max-freq = <200000000>;
+ tRPab = <21000>;
+ tRCD = <18000>;
+ tWR = <15000>;
+ tRAS-min = <42000>;
+ tRRD = <10000>;
+ tWTR = <10000>;
+ tXP = <7500>;
+ tRTP = <7500>;
+ tCKESR = <15000>;
+ tDQSCK-max = <5500>;
+ tFAW = <50000>;
+ tZQCS = <90000>;
+ tZQCL = <360000>;
+ tZQinit = <1000000>;
+ tRAS-max-ns = <70000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/ddr/lpddr2-timings.txt b/Documentation/devicetree/bindings/memory-controllers/ddr/lpddr2-timings.txt
index 9ceb19e0c7fd..9ceb19e0c7fd 100644
--- a/Documentation/devicetree/bindings/ddr/lpddr2-timings.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/ddr/lpddr2-timings.txt
diff --git a/Documentation/devicetree/bindings/ddr/lpddr3-timings.txt b/Documentation/devicetree/bindings/memory-controllers/ddr/lpddr3-timings.txt
index 84705e50a3fd..84705e50a3fd 100644
--- a/Documentation/devicetree/bindings/ddr/lpddr3-timings.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/ddr/lpddr3-timings.txt
diff --git a/Documentation/devicetree/bindings/ddr/lpddr3.txt b/Documentation/devicetree/bindings/memory-controllers/ddr/lpddr3.txt
index b221e653d384..031af5fb0379 100644
--- a/Documentation/devicetree/bindings/ddr/lpddr3.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/ddr/lpddr3.txt
@@ -43,8 +43,9 @@ These values shall be obtained from the device data-sheet.
Child nodes:
- The lpddr3 node may have one or more child nodes of type "lpddr3-timings".
"lpddr3-timings" provides AC timing parameters of the device for
- a given speed-bin. Please see Documentation/devicetree/
- bindings/ddr/lpddr3-timings.txt for more information on "lpddr3-timings"
+ a given speed-bin. Please see
+ Documentation/devicetree/bindings/memory-controllers/ddr/lpddr3-timings.txt
+ for more information on "lpddr3-timings"
Example:
diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml
index e87e4382807c..3a82b0b27fa0 100644
--- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml
@@ -16,7 +16,7 @@ description: |
MediaTek SMI have two generations of HW architecture, here is the list
which generation the SoCs use:
generation 1: mt2701 and mt7623.
- generation 2: mt2712, mt6779, mt8167, mt8173, mt8183 and mt8192.
+ generation 2: mt2712, mt6779, mt8167, mt8173, mt8183, mt8192 and mt8195.
There's slight differences between the two SMI, for generation 2, the
register which control the iommu port is at each larb's register base. But
@@ -36,6 +36,9 @@ properties:
- mediatek,mt8173-smi-common
- mediatek,mt8183-smi-common
- mediatek,mt8192-smi-common
+ - mediatek,mt8195-smi-common-vdo
+ - mediatek,mt8195-smi-common-vpp
+ - mediatek,mt8195-smi-sub-common
- description: for mt7623
items:
@@ -65,6 +68,10 @@ properties:
minItems: 2
maxItems: 4
+ mediatek,smi:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: a phandle to the smi-common node above. Only for sub-common.
+
required:
- compatible
- reg
@@ -91,6 +98,29 @@ allOf:
- const: smi
- const: async
+ - if: # only for sub common
+ properties:
+ compatible:
+ contains:
+ enum:
+ - mediatek,mt8195-smi-sub-common
+ then:
+ required:
+ - mediatek,smi
+ properties:
+ clock:
+ items:
+ minItems: 3
+ maxItems: 3
+ clock-names:
+ items:
+ - const: apb
+ - const: smi
+ - const: gals0
+ else:
+ properties:
+ mediatek,smi: false
+
- if: # for gen2 HW that have gals
properties:
compatible:
@@ -98,6 +128,8 @@ allOf:
- mediatek,mt6779-smi-common
- mediatek,mt8183-smi-common
- mediatek,mt8192-smi-common
+ - mediatek,mt8195-smi-common-vdo
+ - mediatek,mt8195-smi-common-vpp
then:
properties:
diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
index 2353f6cf3c80..eaeff1ada7f8 100644
--- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
@@ -24,6 +24,7 @@ properties:
- mediatek,mt8173-smi-larb
- mediatek,mt8183-smi-larb
- mediatek,mt8192-smi-larb
+ - mediatek,mt8195-smi-larb
- description: for mt7623
items:
@@ -74,6 +75,7 @@ allOf:
compatible:
enum:
- mediatek,mt8183-smi-larb
+ - mediatek,mt8195-smi-larb
then:
properties:
@@ -108,6 +110,7 @@ allOf:
- mediatek,mt6779-smi-larb
- mediatek,mt8167-smi-larb
- mediatek,mt8192-smi-larb
+ - mediatek,mt8195-smi-larb
then:
required:
diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-emc.yaml b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-emc.yaml
index cac6842dc8f1..2fa44951cfde 100644
--- a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-emc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-emc.yaml
@@ -164,12 +164,20 @@ patternProperties:
"#size-cells":
const: 0
+ lpddr2:
+ $ref: "ddr/jedec,lpddr2.yaml#"
+ type: object
+
patternProperties:
"^emc-table@[0-9]+$":
$ref: "#/$defs/emc-table"
- required:
- - nvidia,ram-code
+ oneOf:
+ - required:
+ - nvidia,ram-code
+
+ - required:
+ - lpddr2
additionalProperties: false
@@ -227,4 +235,15 @@ examples:
0x00000000 0x00000000 0x00000000 0x00000000>;
};
};
+
+ emc-tables@1 {
+ reg = <1>;
+
+ lpddr2 {
+ compatible = "elpida,B8132B2PB-6D-F", "jedec,lpddr2-s4";
+ revision-id1 = <1>;
+ density = <2048>;
+ io-width = <16>;
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml b/Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml
index d25072c414e4..9da80e8f2444 100644
--- a/Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml
@@ -33,6 +33,7 @@ properties:
- renesas,r8a77970-rpc-if # R-Car V3M
- renesas,r8a77980-rpc-if # R-Car V3H
- renesas,r8a77995-rpc-if # R-Car D3
+ - renesas,r8a779a0-rpc-if # R-Car V3U
- const: renesas,rcar-gen3-rpc-if # a generic R-Car gen3 or RZ/G2 device
reg:
diff --git a/Documentation/devicetree/bindings/memory-controllers/samsung,exynos5422-dmc.yaml b/Documentation/devicetree/bindings/memory-controllers/samsung,exynos5422-dmc.yaml
index 6f4fd5814bf4..fe8639dcffab 100644
--- a/Documentation/devicetree/bindings/memory-controllers/samsung,exynos5422-dmc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/samsung,exynos5422-dmc.yaml
@@ -51,7 +51,8 @@ properties:
$ref: '/schemas/types.yaml#/definitions/phandle'
description: |
phandle of the connected DRAM memory device. For more information please
- refer to documentation file: Documentation/devicetree/bindings/ddr/lpddr3.txt
+ refer to documentation file:
+ Documentation/devicetree/bindings/memory-controllers/ddr/lpddr3.txt
operating-points-v2: true
diff --git a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
index 239f37881cae..e810480e3eb7 100644
--- a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
+++ b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
@@ -19,6 +19,7 @@ properties:
- qcom,mdm9607-rpmpd
- qcom,msm8916-rpmpd
- qcom,msm8939-rpmpd
+ - qcom,msm8953-rpmpd
- qcom,msm8976-rpmpd
- qcom,msm8994-rpmpd
- qcom,msm8996-rpmpd
@@ -31,6 +32,7 @@ properties:
- qcom,sdm845-rpmhpd
- qcom,sdx55-rpmhpd
- qcom,sm6115-rpmpd
+ - qcom,sm6350-rpmhpd
- qcom,sm8150-rpmhpd
- qcom,sm8250-rpmhpd
- qcom,sm8350-rpmhpd
diff --git a/Documentation/devicetree/bindings/reset/microchip,rst.yaml b/Documentation/devicetree/bindings/reset/microchip,rst.yaml
index 370579aeeca1..578bfa529b16 100644
--- a/Documentation/devicetree/bindings/reset/microchip,rst.yaml
+++ b/Documentation/devicetree/bindings/reset/microchip,rst.yaml
@@ -20,7 +20,9 @@ properties:
pattern: "^reset-controller@[0-9a-f]+$"
compatible:
- const: microchip,sparx5-switch-reset
+ enum:
+ - microchip,sparx5-switch-reset
+ - microchip,lan966x-switch-reset
reg:
items:
diff --git a/Documentation/devicetree/bindings/reset/socionext,uniphier-glue-reset.yaml b/Documentation/devicetree/bindings/reset/socionext,uniphier-glue-reset.yaml
index 29e4a900cad7..bfbd3e9b4186 100644
--- a/Documentation/devicetree/bindings/reset/socionext,uniphier-glue-reset.yaml
+++ b/Documentation/devicetree/bindings/reset/socionext,uniphier-glue-reset.yaml
@@ -23,6 +23,7 @@ properties:
- socionext,uniphier-pxs2-usb3-reset
- socionext,uniphier-ld20-usb3-reset
- socionext,uniphier-pxs3-usb3-reset
+ - socionext,uniphier-nx1-usb3-reset
- socionext,uniphier-pro4-ahci-reset
- socionext,uniphier-pxs2-ahci-reset
- socionext,uniphier-pxs3-ahci-reset
diff --git a/Documentation/devicetree/bindings/reset/socionext,uniphier-reset.yaml b/Documentation/devicetree/bindings/reset/socionext,uniphier-reset.yaml
index 4c9b0ebf6869..377a7d242323 100644
--- a/Documentation/devicetree/bindings/reset/socionext,uniphier-reset.yaml
+++ b/Documentation/devicetree/bindings/reset/socionext,uniphier-reset.yaml
@@ -23,6 +23,7 @@ properties:
- socionext,uniphier-ld11-reset
- socionext,uniphier-ld20-reset
- socionext,uniphier-pxs3-reset
+ - socionext,uniphier-nx1-reset
- description: Media I/O (MIO) reset, SD reset
enum:
- socionext,uniphier-ld4-mio-reset
@@ -34,6 +35,7 @@ properties:
- socionext,uniphier-ld11-sd-reset
- socionext,uniphier-ld20-sd-reset
- socionext,uniphier-pxs3-sd-reset
+ - socionext,uniphier-nx1-sd-reset
- description: Peripheral reset
enum:
- socionext,uniphier-ld4-peri-reset
@@ -44,6 +46,7 @@ properties:
- socionext,uniphier-ld11-peri-reset
- socionext,uniphier-ld20-peri-reset
- socionext,uniphier-pxs3-peri-reset
+ - socionext,uniphier-nx1-peri-reset
- description: Analog signal amplifier reset
enum:
- socionext,uniphier-ld11-adamv-reset
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml
index 93e4b737ee1b..e2e173dfada7 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml
@@ -19,8 +19,7 @@ description:
The AOSS side channel exposes control over a set of resources, used to control
a set of debug related clocks and to affect the low power state of resources
- related to the secondary subsystems. These resources are exposed as a set of
- power-domains.
+ related to the secondary subsystems.
properties:
compatible:
@@ -30,6 +29,7 @@ properties:
- qcom,sc7280-aoss-qmp
- qcom,sc8180x-aoss-qmp
- qcom,sdm845-aoss-qmp
+ - qcom,sm6350-aoss-qmp
- qcom,sm8150-aoss-qmp
- qcom,sm8250-aoss-qmp
- qcom,sm8350-aoss-qmp
@@ -57,13 +57,6 @@ properties:
description:
The single clock represents the QDSS clock.
- "#power-domain-cells":
- const: 1
- description: |
- The provided power-domains are:
- CDSP state (0), LPASS state (1), modem state (2), SLPI
- state (3), SPSS state (4) and Venus state (5).
-
required:
- compatible
- reg
@@ -101,7 +94,6 @@ examples:
mboxes = <&apss_shared 0>;
#clock-cells = <0>;
- #power-domain-cells = <1>;
cx_cdev: cx {
#cooling-cells = <2>;
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
index cc3fe5ed7421..b32457c2fc0b 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
@@ -34,6 +34,7 @@ properties:
- qcom,rpm-ipq6018
- qcom,rpm-msm8226
- qcom,rpm-msm8916
+ - qcom,rpm-msm8953
- qcom,rpm-msm8974
- qcom,rpm-msm8976
- qcom,rpm-msm8996
@@ -41,6 +42,7 @@ properties:
- qcom,rpm-sdm660
- qcom,rpm-sm6115
- qcom,rpm-sm6125
+ - qcom,rpm-qcm2290
- qcom,rpm-qcs404
qcom,smd-channels:
@@ -57,6 +59,7 @@ if:
- qcom,rpm-apq8084
- qcom,rpm-msm8916
- qcom,rpm-msm8974
+ - qcom,rpm-msm8953
then:
required:
- qcom,smd-channels
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smem.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smem.yaml
index f7e17713b3d8..4149cf2b66be 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smem.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smem.yaml
@@ -10,14 +10,18 @@ maintainers:
- Andy Gross <agross@kernel.org>
- Bjorn Andersson <bjorn.andersson@linaro.org>
-description: |
- This binding describes the Qualcomm Shared Memory Manager, used to share data
- between various subsystems and OSes in Qualcomm platforms.
+description:
+ This binding describes the Qualcomm Shared Memory Manager, a region of
+ reserved-memory used to share data between various subsystems and OSes in
+ Qualcomm platforms.
properties:
compatible:
const: qcom,smem
+ reg:
+ maxItems: 1
+
memory-region:
maxItems: 1
description: handle to memory reservation for main SMEM memory region.
@@ -29,11 +33,19 @@ properties:
$ref: /schemas/types.yaml#/definitions/phandle
description: handle to RPM message memory resource
+ no-map: true
+
required:
- compatible
- - memory-region
- hwlocks
+oneOf:
+ - required:
+ - reg
+ - no-map
+ - required:
+ - memory-region
+
additionalProperties: false
examples:
@@ -43,6 +55,20 @@ examples:
#size-cells = <1>;
ranges;
+ smem@fa00000 {
+ compatible = "qcom,smem";
+ reg = <0xfa00000 0x200000>;
+ no-map;
+
+ hwlocks = <&tcsr_mutex 3>;
+ };
+ };
+ - |
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
smem_region: smem@fa00000 {
reg = <0xfa00000 0x200000>;
no-map;
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,spm.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,spm.yaml
new file mode 100644
index 000000000000..07d2d5398345
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,spm.yaml
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/soc/qcom/qcom,spm.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Qualcomm Subsystem Power Manager binding
+
+maintainers:
+ - Andy Gross <agross@kernel.org>
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description: |
+ This binding describes the Qualcomm Subsystem Power Manager, used to control
+ the peripheral logic surrounding the application cores in Qualcomm platforms.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - qcom,sdm660-gold-saw2-v4.1-l2
+ - qcom,sdm660-silver-saw2-v4.1-l2
+ - qcom,msm8998-gold-saw2-v4.1-l2
+ - qcom,msm8998-silver-saw2-v4.1-l2
+ - qcom,msm8916-saw2-v3.0-cpu
+ - qcom,msm8226-saw2-v2.1-cpu
+ - qcom,msm8974-saw2-v2.1-cpu
+ - qcom,apq8084-saw2-v2.1-cpu
+ - qcom,apq8064-saw2-v1.1-cpu
+ - const: qcom,saw2
+
+ reg:
+ description: Base address and size of the SPM register region
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+
+ /* Example 1: SoC using SAW2 and kpss-acc-v2 CPUIdle */
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "qcom,kryo";
+ device_type = "cpu";
+ enable-method = "qcom,kpss-acc-v2";
+ qcom,saw = <&saw0>;
+ reg = <0x0>;
+ operating-points-v2 = <&cpu_opp_table>;
+ };
+ };
+
+ saw0: power-manager@f9089000 {
+ compatible = "qcom,msm8974-saw2-v2.1-cpu", "qcom,saw2";
+ reg = <0xf9089000 0x1000>;
+ };
+
+ - |
+
+ /*
+ * Example 2: New-gen multi cluster SoC using SAW only for L2;
+ * This does not require any cpuidle driver, nor any cpu phandle.
+ */
+ power-manager@17812000 {
+ compatible = "qcom,msm8998-gold-saw2-v4.1-l2", "qcom,saw2";
+ reg = <0x17812000 0x1000>;
+ };
+
+ power-manager@17912000 {
+ compatible = "qcom,msm8998-silver-saw2-v4.1-l2", "qcom,saw2";
+ reg = <0x17912000 0x1000>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom-stats.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom-stats.yaml
new file mode 100644
index 000000000000..99dff7d73b7e
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom-stats.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/qcom/qcom-stats.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. (QTI) Stats bindings
+
+maintainers:
+ - Maulik Shah <mkshah@codeaurora.org>
+
+description:
+ Always On Processor/Resource Power Manager maintains statistics of the SoC
+ sleep modes involving powering down of the rails and oscillator clock.
+
+ Statistics includes SoC sleep mode type, number of times low power mode were
+ entered, time of last entry, time of last exit and accumulated sleep duration.
+
+properties:
+ compatible:
+ enum:
+ - qcom,rpmh-stats
+ - qcom,rpm-stats
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ # Example of rpmh sleep stats
+ - |
+ sram@c3f0000 {
+ compatible = "qcom,rpmh-stats";
+ reg = <0x0c3f0000 0x400>;
+ };
+ # Example of rpm sleep stats
+ - |
+ sram@4690000 {
+ compatible = "qcom,rpm-stats";
+ reg = <0x04690000 0x10000>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/sram/sram.yaml b/Documentation/devicetree/bindings/sram/sram.yaml
index 939cf2418445..d4e418b6a1c1 100644
--- a/Documentation/devicetree/bindings/sram/sram.yaml
+++ b/Documentation/devicetree/bindings/sram/sram.yaml
@@ -31,6 +31,7 @@ properties:
- amlogic,meson-gxbb-sram
- arm,juno-sram-ns
- atmel,sama5d2-securam
+ - qcom,rpm-msg-ram
- rockchip,rk3288-pmu-sram
reg:
@@ -135,7 +136,9 @@ if:
properties:
compatible:
contains:
- const: rockchip,rk3288-pmu-sram
+ enum:
+ - qcom,rpm-msg-ram
+ - rockchip,rk3288-pmu-sram
else:
required:
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 518487ec65e5..3c28347f873e 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -359,6 +359,8 @@ patternProperties:
description: Shenzhen Elida Technology Co., Ltd.
"^elimo,.*":
description: Elimo Engineering Ltd.
+ "^elpida,.*":
+ description: Elpida Memory, Inc.
"^embest,.*":
description: Shenzhen Embest Technology Co., Ltd.
"^emlid,.*":
diff --git a/MAINTAINERS b/MAINTAINERS
index 83321332f63e..8f7288596157 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -11992,6 +11992,14 @@ M: Sean Wang <sean.wang@mediatek.com>
S: Maintained
F: drivers/char/hw_random/mtk-rng.c
+MEDIATEK SMI DRIVER
+M: Yong Wu <yong.wu@mediatek.com>
+L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
+S: Supported
+F: Documentation/devicetree/bindings/memory-controllers/mediatek,smi*
+F: drivers/memory/mtk-smi.c
+F: include/soc/mediatek/smi.h
+
MEDIATEK SWITCH DRIVER
M: Sean Wang <sean.wang@mediatek.com>
M: Landen Chao <Landen.Chao@mediatek.com>
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index d96209b20736..f0f9e8bec83a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -351,7 +351,7 @@ config ARCH_EP93XX
select CLKSRC_MMIO
select CPU_ARM920T
select GPIOLIB
- select HAVE_LEGACY_CLK
+ select COMMON_CLK
help
This enables support for the Cirrus EP93xx series of CPUs.
@@ -480,7 +480,6 @@ config ARCH_S3C24XX
select GPIOLIB
select GENERIC_IRQ_MULTI_HANDLER
select HAVE_S3C2410_I2C if I2C
- select HAVE_S3C_RTC if RTC_CLASS
select NEED_MACH_IO_H
select S3C2410_WATCHDOG
select SAMSUNG_ATAGS
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
index 2810eb5b2aca..cc75087134d3 100644
--- a/arch/arm/mach-ep93xx/clock.c
+++ b/arch/arm/mach-ep93xx/clock.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
#include <linux/soc/cirrus/ep93xx.h>
#include "hardware.h"
@@ -24,348 +25,194 @@
#include "soc.h"
-struct clk {
- struct clk *parent;
- unsigned long rate;
- int users;
- int sw_locked;
- void __iomem *enable_reg;
- u32 enable_mask;
-
- unsigned long (*get_rate)(struct clk *clk);
- int (*set_rate)(struct clk *clk, unsigned long rate);
-};
-
-
-static unsigned long get_uart_rate(struct clk *clk);
-
-static int set_keytchclk_rate(struct clk *clk, unsigned long rate);
-static int set_div_rate(struct clk *clk, unsigned long rate);
-static int set_i2s_sclk_rate(struct clk *clk, unsigned long rate);
-static int set_i2s_lrclk_rate(struct clk *clk, unsigned long rate);
+static DEFINE_SPINLOCK(clk_lock);
-static struct clk clk_xtali = {
- .rate = EP93XX_EXT_CLK_RATE,
-};
-static struct clk clk_uart1 = {
- .parent = &clk_xtali,
- .sw_locked = 1,
- .enable_reg = EP93XX_SYSCON_DEVCFG,
- .enable_mask = EP93XX_SYSCON_DEVCFG_U1EN,
- .get_rate = get_uart_rate,
-};
-static struct clk clk_uart2 = {
- .parent = &clk_xtali,
- .sw_locked = 1,
- .enable_reg = EP93XX_SYSCON_DEVCFG,
- .enable_mask = EP93XX_SYSCON_DEVCFG_U2EN,
- .get_rate = get_uart_rate,
-};
-static struct clk clk_uart3 = {
- .parent = &clk_xtali,
- .sw_locked = 1,
- .enable_reg = EP93XX_SYSCON_DEVCFG,
- .enable_mask = EP93XX_SYSCON_DEVCFG_U3EN,
- .get_rate = get_uart_rate,
-};
-static struct clk clk_pll1 = {
- .parent = &clk_xtali,
-};
-static struct clk clk_f = {
- .parent = &clk_pll1,
-};
-static struct clk clk_h = {
- .parent = &clk_pll1,
-};
-static struct clk clk_p = {
- .parent = &clk_pll1,
-};
-static struct clk clk_pll2 = {
- .parent = &clk_xtali,
-};
-static struct clk clk_usb_host = {
- .parent = &clk_pll2,
- .enable_reg = EP93XX_SYSCON_PWRCNT,
- .enable_mask = EP93XX_SYSCON_PWRCNT_USH_EN,
-};
-static struct clk clk_keypad = {
- .parent = &clk_xtali,
- .sw_locked = 1,
- .enable_reg = EP93XX_SYSCON_KEYTCHCLKDIV,
- .enable_mask = EP93XX_SYSCON_KEYTCHCLKDIV_KEN,
- .set_rate = set_keytchclk_rate,
-};
-static struct clk clk_adc = {
- .parent = &clk_xtali,
- .sw_locked = 1,
- .enable_reg = EP93XX_SYSCON_KEYTCHCLKDIV,
- .enable_mask = EP93XX_SYSCON_KEYTCHCLKDIV_TSEN,
- .set_rate = set_keytchclk_rate,
-};
-static struct clk clk_spi = {
- .parent = &clk_xtali,
- .rate = EP93XX_EXT_CLK_RATE,
-};
-static struct clk clk_pwm = {
- .parent = &clk_xtali,
- .rate = EP93XX_EXT_CLK_RATE,
-};
+static char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 };
+static char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 };
+static char pclk_divisors[] = { 1, 2, 4, 8 };
-static struct clk clk_video = {
- .sw_locked = 1,
- .enable_reg = EP93XX_SYSCON_VIDCLKDIV,
- .enable_mask = EP93XX_SYSCON_CLKDIV_ENABLE,
- .set_rate = set_div_rate,
-};
+static char adc_divisors[] = { 16, 4 };
+static char sclk_divisors[] = { 2, 4 };
+static char lrclk_divisors[] = { 32, 64, 128 };
-static struct clk clk_i2s_mclk = {
- .sw_locked = 1,
- .enable_reg = EP93XX_SYSCON_I2SCLKDIV,
- .enable_mask = EP93XX_SYSCON_CLKDIV_ENABLE,
- .set_rate = set_div_rate,
+static const char * const mux_parents[] = {
+ "xtali",
+ "pll1",
+ "pll2"
};
-static struct clk clk_i2s_sclk = {
- .sw_locked = 1,
- .parent = &clk_i2s_mclk,
- .enable_reg = EP93XX_SYSCON_I2SCLKDIV,
- .enable_mask = EP93XX_SYSCON_I2SCLKDIV_SENA,
- .set_rate = set_i2s_sclk_rate,
-};
+/*
+ * PLL rate = 14.7456 MHz * (X1FBD + 1) * (X2FBD + 1) / (X2IPD + 1) / 2^PS
+ */
+static unsigned long calc_pll_rate(unsigned long long rate, u32 config_word)
+{
+ int i;
-static struct clk clk_i2s_lrclk = {
- .sw_locked = 1,
- .parent = &clk_i2s_sclk,
- .enable_reg = EP93XX_SYSCON_I2SCLKDIV,
- .enable_mask = EP93XX_SYSCON_I2SCLKDIV_SENA,
- .set_rate = set_i2s_lrclk_rate,
-};
+ rate *= ((config_word >> 11) & 0x1f) + 1; /* X1FBD */
+ rate *= ((config_word >> 5) & 0x3f) + 1; /* X2FBD */
+ do_div(rate, (config_word & 0x1f) + 1); /* X2IPD */
+ for (i = 0; i < ((config_word >> 16) & 3); i++) /* PS */
+ rate >>= 1;
-/* DMA Clocks */
-static struct clk clk_m2p0 = {
- .parent = &clk_h,
- .enable_reg = EP93XX_SYSCON_PWRCNT,
- .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P0,
-};
-static struct clk clk_m2p1 = {
- .parent = &clk_h,
- .enable_reg = EP93XX_SYSCON_PWRCNT,
- .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P1,
-};
-static struct clk clk_m2p2 = {
- .parent = &clk_h,
- .enable_reg = EP93XX_SYSCON_PWRCNT,
- .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P2,
-};
-static struct clk clk_m2p3 = {
- .parent = &clk_h,
- .enable_reg = EP93XX_SYSCON_PWRCNT,
- .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P3,
-};
-static struct clk clk_m2p4 = {
- .parent = &clk_h,
- .enable_reg = EP93XX_SYSCON_PWRCNT,
- .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P4,
-};
-static struct clk clk_m2p5 = {
- .parent = &clk_h,
- .enable_reg = EP93XX_SYSCON_PWRCNT,
- .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P5,
-};
-static struct clk clk_m2p6 = {
- .parent = &clk_h,
- .enable_reg = EP93XX_SYSCON_PWRCNT,
- .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P6,
-};
-static struct clk clk_m2p7 = {
- .parent = &clk_h,
- .enable_reg = EP93XX_SYSCON_PWRCNT,
- .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P7,
-};
-static struct clk clk_m2p8 = {
- .parent = &clk_h,
- .enable_reg = EP93XX_SYSCON_PWRCNT,
- .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P8,
-};
-static struct clk clk_m2p9 = {
- .parent = &clk_h,
- .enable_reg = EP93XX_SYSCON_PWRCNT,
- .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P9,
-};
-static struct clk clk_m2m0 = {
- .parent = &clk_h,
- .enable_reg = EP93XX_SYSCON_PWRCNT,
- .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2M0,
-};
-static struct clk clk_m2m1 = {
- .parent = &clk_h,
- .enable_reg = EP93XX_SYSCON_PWRCNT,
- .enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2M1,
-};
+ return (unsigned long)rate;
+}
-#define INIT_CK(dev,con,ck) \
- { .dev_id = dev, .con_id = con, .clk = ck }
-
-static struct clk_lookup clocks[] = {
- INIT_CK(NULL, "xtali", &clk_xtali),
- INIT_CK("apb:uart1", NULL, &clk_uart1),
- INIT_CK("apb:uart2", NULL, &clk_uart2),
- INIT_CK("apb:uart3", NULL, &clk_uart3),
- INIT_CK(NULL, "pll1", &clk_pll1),
- INIT_CK(NULL, "fclk", &clk_f),
- INIT_CK(NULL, "hclk", &clk_h),
- INIT_CK(NULL, "apb_pclk", &clk_p),
- INIT_CK(NULL, "pll2", &clk_pll2),
- INIT_CK("ohci-platform", NULL, &clk_usb_host),
- INIT_CK("ep93xx-keypad", NULL, &clk_keypad),
- INIT_CK("ep93xx-adc", NULL, &clk_adc),
- INIT_CK("ep93xx-fb", NULL, &clk_video),
- INIT_CK("ep93xx-spi.0", NULL, &clk_spi),
- INIT_CK("ep93xx-i2s", "mclk", &clk_i2s_mclk),
- INIT_CK("ep93xx-i2s", "sclk", &clk_i2s_sclk),
- INIT_CK("ep93xx-i2s", "lrclk", &clk_i2s_lrclk),
- INIT_CK(NULL, "pwm_clk", &clk_pwm),
- INIT_CK(NULL, "m2p0", &clk_m2p0),
- INIT_CK(NULL, "m2p1", &clk_m2p1),
- INIT_CK(NULL, "m2p2", &clk_m2p2),
- INIT_CK(NULL, "m2p3", &clk_m2p3),
- INIT_CK(NULL, "m2p4", &clk_m2p4),
- INIT_CK(NULL, "m2p5", &clk_m2p5),
- INIT_CK(NULL, "m2p6", &clk_m2p6),
- INIT_CK(NULL, "m2p7", &clk_m2p7),
- INIT_CK(NULL, "m2p8", &clk_m2p8),
- INIT_CK(NULL, "m2p9", &clk_m2p9),
- INIT_CK(NULL, "m2m0", &clk_m2m0),
- INIT_CK(NULL, "m2m1", &clk_m2m1),
+struct clk_psc {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 bit_idx;
+ u32 mask;
+ u8 shift;
+ u8 width;
+ char *div;
+ u8 num_div;
+ spinlock_t *lock;
};
-static DEFINE_SPINLOCK(clk_lock);
+#define to_clk_psc(_hw) container_of(_hw, struct clk_psc, hw)
-static void __clk_enable(struct clk *clk)
+static int ep93xx_clk_is_enabled(struct clk_hw *hw)
{
- if (!clk->users++) {
- if (clk->parent)
- __clk_enable(clk->parent);
-
- if (clk->enable_reg) {
- u32 v;
-
- v = __raw_readl(clk->enable_reg);
- v |= clk->enable_mask;
- if (clk->sw_locked)
- ep93xx_syscon_swlocked_write(v, clk->enable_reg);
- else
- __raw_writel(v, clk->enable_reg);
- }
- }
+ struct clk_psc *psc = to_clk_psc(hw);
+ u32 val = readl(psc->reg);
+
+ return (val & BIT(psc->bit_idx)) ? 1 : 0;
}
-int clk_enable(struct clk *clk)
+static int ep93xx_clk_enable(struct clk_hw *hw)
{
- unsigned long flags;
+ struct clk_psc *psc = to_clk_psc(hw);
+ unsigned long flags = 0;
+ u32 val;
- if (!clk)
- return -EINVAL;
+ if (psc->lock)
+ spin_lock_irqsave(psc->lock, flags);
+
+ val = __raw_readl(psc->reg);
+ val |= BIT(psc->bit_idx);
+
+ ep93xx_syscon_swlocked_write(val, psc->reg);
- spin_lock_irqsave(&clk_lock, flags);
- __clk_enable(clk);
- spin_unlock_irqrestore(&clk_lock, flags);
+ if (psc->lock)
+ spin_unlock_irqrestore(psc->lock, flags);
return 0;
}
-EXPORT_SYMBOL(clk_enable);
-static void __clk_disable(struct clk *clk)
+static void ep93xx_clk_disable(struct clk_hw *hw)
{
- if (!--clk->users) {
- if (clk->enable_reg) {
- u32 v;
-
- v = __raw_readl(clk->enable_reg);
- v &= ~clk->enable_mask;
- if (clk->sw_locked)
- ep93xx_syscon_swlocked_write(v, clk->enable_reg);
- else
- __raw_writel(v, clk->enable_reg);
- }
+ struct clk_psc *psc = to_clk_psc(hw);
+ unsigned long flags = 0;
+ u32 val;
- if (clk->parent)
- __clk_disable(clk->parent);
- }
-}
+ if (psc->lock)
+ spin_lock_irqsave(psc->lock, flags);
-void clk_disable(struct clk *clk)
-{
- unsigned long flags;
+ val = __raw_readl(psc->reg);
+ val &= ~BIT(psc->bit_idx);
- if (!clk)
- return;
+ ep93xx_syscon_swlocked_write(val, psc->reg);
- spin_lock_irqsave(&clk_lock, flags);
- __clk_disable(clk);
- spin_unlock_irqrestore(&clk_lock, flags);
+ if (psc->lock)
+ spin_unlock_irqrestore(psc->lock, flags);
}
-EXPORT_SYMBOL(clk_disable);
-static unsigned long get_uart_rate(struct clk *clk)
-{
- unsigned long rate = clk_get_rate(clk->parent);
- u32 value;
+static const struct clk_ops clk_ep93xx_gate_ops = {
+ .enable = ep93xx_clk_enable,
+ .disable = ep93xx_clk_disable,
+ .is_enabled = ep93xx_clk_is_enabled,
+};
- value = __raw_readl(EP93XX_SYSCON_PWRCNT);
- if (value & EP93XX_SYSCON_PWRCNT_UARTBAUD)
- return rate;
- else
- return rate / 2;
+static struct clk_hw *ep93xx_clk_register_gate(const char *name,
+ const char *parent_name,
+ void __iomem *reg,
+ u8 bit_idx)
+{
+ struct clk_init_data init;
+ struct clk_psc *psc;
+ struct clk *clk;
+
+ psc = kzalloc(sizeof(*psc), GFP_KERNEL);
+ if (!psc)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_ep93xx_gate_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ psc->reg = reg;
+ psc->bit_idx = bit_idx;
+ psc->hw.init = &init;
+ psc->lock = &clk_lock;
+
+ clk = clk_register(NULL, &psc->hw);
+ if (IS_ERR(clk))
+ kfree(psc);
+
+ return &psc->hw;
}
-unsigned long clk_get_rate(struct clk *clk)
+static u8 ep93xx_mux_get_parent(struct clk_hw *hw)
{
- if (clk->get_rate)
- return clk->get_rate(clk);
+ struct clk_psc *psc = to_clk_psc(hw);
+ u32 val = __raw_readl(psc->reg);
- return clk->rate;
+ if (!(val & EP93XX_SYSCON_CLKDIV_ESEL))
+ return 0;
+
+ if (!(val & EP93XX_SYSCON_CLKDIV_PSEL))
+ return 1;
+
+ return 2;
}
-EXPORT_SYMBOL(clk_get_rate);
-static int set_keytchclk_rate(struct clk *clk, unsigned long rate)
+static int ep93xx_mux_set_parent_lock(struct clk_hw *hw, u8 index)
{
+ struct clk_psc *psc = to_clk_psc(hw);
+ unsigned long flags = 0;
u32 val;
- u32 div_bit;
- val = __raw_readl(clk->enable_reg);
+ if (index >= ARRAY_SIZE(mux_parents))
+ return -EINVAL;
- /*
- * The Key Matrix and ADC clocks are configured using the same
- * System Controller register. The clock used will be either
- * 1/4 or 1/16 the external clock rate depending on the
- * EP93XX_SYSCON_KEYTCHCLKDIV_KDIV/EP93XX_SYSCON_KEYTCHCLKDIV_ADIV
- * bit being set or cleared.
- */
- div_bit = clk->enable_mask >> 15;
+ if (psc->lock)
+ spin_lock_irqsave(psc->lock, flags);
- if (rate == EP93XX_KEYTCHCLK_DIV4)
- val |= div_bit;
- else if (rate == EP93XX_KEYTCHCLK_DIV16)
- val &= ~div_bit;
- else
- return -EINVAL;
+ val = __raw_readl(psc->reg);
+ val &= ~(EP93XX_SYSCON_CLKDIV_ESEL | EP93XX_SYSCON_CLKDIV_PSEL);
+
+
+ if (index != 0) {
+ val |= EP93XX_SYSCON_CLKDIV_ESEL;
+ val |= (index - 1) ? EP93XX_SYSCON_CLKDIV_PSEL : 0;
+ }
+
+ ep93xx_syscon_swlocked_write(val, psc->reg);
+
+ if (psc->lock)
+ spin_unlock_irqrestore(psc->lock, flags);
- ep93xx_syscon_swlocked_write(val, clk->enable_reg);
- clk->rate = rate;
return 0;
}
-static int calc_clk_div(struct clk *clk, unsigned long rate,
- int *psel, int *esel, int *pdiv, int *div)
+static bool is_best(unsigned long rate, unsigned long now,
+ unsigned long best)
{
- struct clk *mclk;
- unsigned long max_rate, actual_rate, mclk_rate, rate_err = -1;
- int i, found = 0, __div = 0, __pdiv = 0;
+ return abs(rate - now) < abs(rate - best);
+}
- /* Don't exceed the maximum rate */
- max_rate = max3(clk_pll1.rate / 4, clk_pll2.rate / 4, clk_xtali.rate / 4);
- rate = min(rate, max_rate);
+static int ep93xx_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned long rate = req->rate;
+ struct clk *best_parent = 0;
+ unsigned long __parent_rate;
+ unsigned long best_rate = 0, actual_rate, mclk_rate;
+ unsigned long best_parent_rate;
+ int __div = 0, __pdiv = 0;
+ int i;
/*
* Try the two pll's and the external clock
@@ -376,14 +223,11 @@ static int calc_clk_div(struct clk *clk, unsigned long rate,
* http://be-a-maverick.com/en/pubs/appNote/AN269REV1.pdf
*
*/
- for (i = 0; i < 3; i++) {
- if (i == 0)
- mclk = &clk_xtali;
- else if (i == 1)
- mclk = &clk_pll1;
- else
- mclk = &clk_pll2;
- mclk_rate = mclk->rate * 2;
+ for (i = 0; i < ARRAY_SIZE(mux_parents); i++) {
+ struct clk *parent = clk_get_sys(mux_parents[i], NULL);
+
+ __parent_rate = clk_get_rate(parent);
+ mclk_rate = __parent_rate * 2;
/* Try each predivider value */
for (__pdiv = 4; __pdiv <= 6; __pdiv++) {
@@ -392,197 +236,494 @@ static int calc_clk_div(struct clk *clk, unsigned long rate,
continue;
actual_rate = mclk_rate / (__pdiv * __div);
-
- if (!found || abs(actual_rate - rate) < rate_err) {
- *pdiv = __pdiv - 3;
- *div = __div;
- *psel = (i == 2);
- *esel = (i != 0);
- clk->parent = mclk;
- clk->rate = actual_rate;
- rate_err = abs(actual_rate - rate);
- found = 1;
+ if (is_best(rate, actual_rate, best_rate)) {
+ best_rate = actual_rate;
+ best_parent_rate = __parent_rate;
+ best_parent = parent;
}
}
}
- if (!found)
+ if (!best_parent)
return -EINVAL;
+ req->best_parent_rate = best_parent_rate;
+ req->best_parent_hw = __clk_get_hw(best_parent);
+ req->rate = best_rate;
+
return 0;
}
-static int set_div_rate(struct clk *clk, unsigned long rate)
+static unsigned long ep93xx_ddiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
- int err, psel = 0, esel = 0, pdiv = 0, div = 0;
- u32 val;
-
- err = calc_clk_div(clk, rate, &psel, &esel, &pdiv, &div);
- if (err)
- return err;
+ struct clk_psc *psc = to_clk_psc(hw);
+ unsigned long rate = 0;
+ u32 val = __raw_readl(psc->reg);
+ int __pdiv = ((val >> EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) & 0x03);
+ int __div = val & 0x7f;
- /* Clear the esel, psel, pdiv and div bits */
- val = __raw_readl(clk->enable_reg);
- val &= ~0x7fff;
+ if (__div > 0)
+ rate = (parent_rate * 2) / ((__pdiv + 3) * __div);
- /* Set the new esel, psel, pdiv and div bits for the new clock rate */
- val |= (esel ? EP93XX_SYSCON_CLKDIV_ESEL : 0) |
- (psel ? EP93XX_SYSCON_CLKDIV_PSEL : 0) |
- (pdiv << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | div;
- ep93xx_syscon_swlocked_write(val, clk->enable_reg);
- return 0;
+ return rate;
}
-static int set_i2s_sclk_rate(struct clk *clk, unsigned long rate)
+static int ep93xx_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
- unsigned val = __raw_readl(clk->enable_reg);
-
- if (rate == clk_i2s_mclk.rate / 2)
- ep93xx_syscon_swlocked_write(val & ~EP93XX_I2SCLKDIV_SDIV,
- clk->enable_reg);
- else if (rate == clk_i2s_mclk.rate / 4)
- ep93xx_syscon_swlocked_write(val | EP93XX_I2SCLKDIV_SDIV,
- clk->enable_reg);
- else
+ struct clk_psc *psc = to_clk_psc(hw);
+ int pdiv = 0, div = 0;
+ unsigned long best_rate = 0, actual_rate, mclk_rate;
+ int __div = 0, __pdiv = 0;
+ u32 val;
+
+ mclk_rate = parent_rate * 2;
+
+ for (__pdiv = 4; __pdiv <= 6; __pdiv++) {
+ __div = mclk_rate / (rate * __pdiv);
+ if (__div < 2 || __div > 127)
+ continue;
+
+ actual_rate = mclk_rate / (__pdiv * __div);
+ if (is_best(rate, actual_rate, best_rate)) {
+ pdiv = __pdiv - 3;
+ div = __div;
+ best_rate = actual_rate;
+ }
+ }
+
+ if (!best_rate)
return -EINVAL;
- clk_i2s_sclk.rate = rate;
+ val = __raw_readl(psc->reg);
+
+ /* Clear old dividers */
+ val &= ~0x37f;
+
+ /* Set the new pdiv and div bits for the new clock rate */
+ val |= (pdiv << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | div;
+ ep93xx_syscon_swlocked_write(val, psc->reg);
+
return 0;
}
-static int set_i2s_lrclk_rate(struct clk *clk, unsigned long rate)
-{
- unsigned val = __raw_readl(clk->enable_reg) &
- ~EP93XX_I2SCLKDIV_LRDIV_MASK;
-
- if (rate == clk_i2s_sclk.rate / 32)
- ep93xx_syscon_swlocked_write(val | EP93XX_I2SCLKDIV_LRDIV32,
- clk->enable_reg);
- else if (rate == clk_i2s_sclk.rate / 64)
- ep93xx_syscon_swlocked_write(val | EP93XX_I2SCLKDIV_LRDIV64,
- clk->enable_reg);
- else if (rate == clk_i2s_sclk.rate / 128)
- ep93xx_syscon_swlocked_write(val | EP93XX_I2SCLKDIV_LRDIV128,
- clk->enable_reg);
- else
- return -EINVAL;
+static const struct clk_ops clk_ddiv_ops = {
+ .enable = ep93xx_clk_enable,
+ .disable = ep93xx_clk_disable,
+ .is_enabled = ep93xx_clk_is_enabled,
+ .get_parent = ep93xx_mux_get_parent,
+ .set_parent = ep93xx_mux_set_parent_lock,
+ .determine_rate = ep93xx_mux_determine_rate,
+ .recalc_rate = ep93xx_ddiv_recalc_rate,
+ .set_rate = ep93xx_ddiv_set_rate,
+};
- clk_i2s_lrclk.rate = rate;
- return 0;
+static struct clk_hw *clk_hw_register_ddiv(const char *name,
+ void __iomem *reg,
+ u8 bit_idx)
+{
+ struct clk_init_data init;
+ struct clk_psc *psc;
+ struct clk *clk;
+
+ psc = kzalloc(sizeof(*psc), GFP_KERNEL);
+ if (!psc)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_ddiv_ops;
+ init.flags = 0;
+ init.parent_names = mux_parents;
+ init.num_parents = ARRAY_SIZE(mux_parents);
+
+ psc->reg = reg;
+ psc->bit_idx = bit_idx;
+ psc->lock = &clk_lock;
+ psc->hw.init = &init;
+
+ clk = clk_register(NULL, &psc->hw);
+ if (IS_ERR(clk))
+ kfree(psc);
+
+ return &psc->hw;
}
-int clk_set_rate(struct clk *clk, unsigned long rate)
+static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
- if (clk->set_rate)
- return clk->set_rate(clk, rate);
+ struct clk_psc *psc = to_clk_psc(hw);
+ u32 val = __raw_readl(psc->reg);
+ u8 index = (val & psc->mask) >> psc->shift;
- return -EINVAL;
+ if (index > psc->num_div)
+ return 0;
+
+ return DIV_ROUND_UP_ULL(parent_rate, psc->div[index]);
}
-EXPORT_SYMBOL(clk_set_rate);
-long clk_round_rate(struct clk *clk, unsigned long rate)
+static long ep93xx_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
{
- WARN_ON(clk);
- return 0;
+ struct clk_psc *psc = to_clk_psc(hw);
+ unsigned long best = 0, now, maxdiv;
+ int i;
+
+ maxdiv = psc->div[psc->num_div - 1];
+
+ for (i = 0; i < psc->num_div; i++) {
+ if ((rate * psc->div[i]) == *parent_rate)
+ return DIV_ROUND_UP_ULL((u64)*parent_rate, psc->div[i]);
+
+ now = DIV_ROUND_UP_ULL((u64)*parent_rate, psc->div[i]);
+
+ if (is_best(rate, now, best))
+ best = now;
+ }
+
+ if (!best)
+ best = DIV_ROUND_UP_ULL(*parent_rate, maxdiv);
+
+ return best;
}
-EXPORT_SYMBOL(clk_round_rate);
-int clk_set_parent(struct clk *clk, struct clk *parent)
+static int ep93xx_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
- WARN_ON(clk);
+ struct clk_psc *psc = to_clk_psc(hw);
+ u32 val = __raw_readl(psc->reg) & ~psc->mask;
+ int i;
+
+ for (i = 0; i < psc->num_div; i++)
+ if (rate == parent_rate / psc->div[i]) {
+ val |= i << psc->shift;
+ break;
+ }
+
+ if (i == psc->num_div)
+ return -EINVAL;
+
+ ep93xx_syscon_swlocked_write(val, psc->reg);
+
return 0;
}
-EXPORT_SYMBOL(clk_set_parent);
-struct clk *clk_get_parent(struct clk *clk)
+static const struct clk_ops ep93xx_div_ops = {
+ .enable = ep93xx_clk_enable,
+ .disable = ep93xx_clk_disable,
+ .is_enabled = ep93xx_clk_is_enabled,
+ .recalc_rate = ep93xx_div_recalc_rate,
+ .round_rate = ep93xx_div_round_rate,
+ .set_rate = ep93xx_div_set_rate,
+};
+
+static struct clk_hw *clk_hw_register_div(const char *name,
+ const char *parent_name,
+ void __iomem *reg,
+ u8 enable_bit,
+ u8 shift,
+ u8 width,
+ char *clk_divisors,
+ u8 num_div)
{
- return clk->parent;
+ struct clk_init_data init;
+ struct clk_psc *psc;
+ struct clk *clk;
+
+ psc = kzalloc(sizeof(*psc), GFP_KERNEL);
+ if (!psc)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &ep93xx_div_ops;
+ init.flags = 0;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = 1;
+
+ psc->reg = reg;
+ psc->bit_idx = enable_bit;
+ psc->mask = GENMASK(shift + width - 1, shift);
+ psc->shift = shift;
+ psc->div = clk_divisors;
+ psc->num_div = num_div;
+ psc->lock = &clk_lock;
+ psc->hw.init = &init;
+
+ clk = clk_register(NULL, &psc->hw);
+ if (IS_ERR(clk))
+ kfree(psc);
+
+ return &psc->hw;
}
-EXPORT_SYMBOL(clk_get_parent);
+struct ep93xx_gate {
+ unsigned int bit;
+ const char *dev_id;
+ const char *con_id;
+};
-static char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 };
-static char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 };
-static char pclk_divisors[] = { 1, 2, 4, 8 };
+static struct ep93xx_gate ep93xx_uarts[] = {
+ {EP93XX_SYSCON_DEVCFG_U1EN, "apb:uart1", NULL},
+ {EP93XX_SYSCON_DEVCFG_U2EN, "apb:uart2", NULL},
+ {EP93XX_SYSCON_DEVCFG_U3EN, "apb:uart3", NULL},
+};
-/*
- * PLL rate = 14.7456 MHz * (X1FBD + 1) * (X2FBD + 1) / (X2IPD + 1) / 2^PS
- */
-static unsigned long calc_pll_rate(u32 config_word)
+static void __init ep93xx_uart_clock_init(void)
{
- unsigned long long rate;
- int i;
+ unsigned int i;
+ struct clk_hw *hw;
+ u32 value;
+ unsigned int clk_uart_div;
- rate = clk_xtali.rate;
- rate *= ((config_word >> 11) & 0x1f) + 1; /* X1FBD */
- rate *= ((config_word >> 5) & 0x3f) + 1; /* X2FBD */
- do_div(rate, (config_word & 0x1f) + 1); /* X2IPD */
- for (i = 0; i < ((config_word >> 16) & 3); i++) /* PS */
- rate >>= 1;
+ value = __raw_readl(EP93XX_SYSCON_PWRCNT);
+ if (value & EP93XX_SYSCON_PWRCNT_UARTBAUD)
+ clk_uart_div = 1;
+ else
+ clk_uart_div = 2;
- return (unsigned long)rate;
+ hw = clk_hw_register_fixed_factor(NULL, "uart", "xtali", 0, 1, clk_uart_div);
+
+ /* parenting uart gate clocks to uart clock */
+ for (i = 0; i < ARRAY_SIZE(ep93xx_uarts); i++) {
+ hw = ep93xx_clk_register_gate(ep93xx_uarts[i].dev_id,
+ "uart",
+ EP93XX_SYSCON_DEVCFG,
+ ep93xx_uarts[i].bit);
+
+ clk_hw_register_clkdev(hw, NULL, ep93xx_uarts[i].dev_id);
+ }
}
+static struct ep93xx_gate ep93xx_dmas[] = {
+ {EP93XX_SYSCON_PWRCNT_DMA_M2P0, NULL, "m2p0"},
+ {EP93XX_SYSCON_PWRCNT_DMA_M2P1, NULL, "m2p1"},
+ {EP93XX_SYSCON_PWRCNT_DMA_M2P2, NULL, "m2p2"},
+ {EP93XX_SYSCON_PWRCNT_DMA_M2P3, NULL, "m2p3"},
+ {EP93XX_SYSCON_PWRCNT_DMA_M2P4, NULL, "m2p4"},
+ {EP93XX_SYSCON_PWRCNT_DMA_M2P5, NULL, "m2p5"},
+ {EP93XX_SYSCON_PWRCNT_DMA_M2P6, NULL, "m2p6"},
+ {EP93XX_SYSCON_PWRCNT_DMA_M2P7, NULL, "m2p7"},
+ {EP93XX_SYSCON_PWRCNT_DMA_M2P8, NULL, "m2p8"},
+ {EP93XX_SYSCON_PWRCNT_DMA_M2P9, NULL, "m2p9"},
+ {EP93XX_SYSCON_PWRCNT_DMA_M2M0, NULL, "m2m0"},
+ {EP93XX_SYSCON_PWRCNT_DMA_M2M1, NULL, "m2m1"},
+};
+
static void __init ep93xx_dma_clock_init(void)
{
- clk_m2p0.rate = clk_h.rate;
- clk_m2p1.rate = clk_h.rate;
- clk_m2p2.rate = clk_h.rate;
- clk_m2p3.rate = clk_h.rate;
- clk_m2p4.rate = clk_h.rate;
- clk_m2p5.rate = clk_h.rate;
- clk_m2p6.rate = clk_h.rate;
- clk_m2p7.rate = clk_h.rate;
- clk_m2p8.rate = clk_h.rate;
- clk_m2p9.rate = clk_h.rate;
- clk_m2m0.rate = clk_h.rate;
- clk_m2m1.rate = clk_h.rate;
+ unsigned int i;
+ struct clk_hw *hw;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(ep93xx_dmas); i++) {
+ hw = clk_hw_register_gate(NULL, ep93xx_dmas[i].con_id,
+ "hclk", 0,
+ EP93XX_SYSCON_PWRCNT,
+ ep93xx_dmas[i].bit,
+ 0,
+ &clk_lock);
+
+ ret = clk_hw_register_clkdev(hw, ep93xx_dmas[i].con_id, NULL);
+ if (ret)
+ pr_err("%s: failed to register lookup %s\n",
+ __func__, ep93xx_dmas[i].con_id);
+ }
}
static int __init ep93xx_clock_init(void)
{
u32 value;
+ struct clk_hw *hw;
+ unsigned long clk_pll1_rate;
+ unsigned long clk_f_rate;
+ unsigned long clk_h_rate;
+ unsigned long clk_p_rate;
+ unsigned long clk_pll2_rate;
+ unsigned int clk_f_div;
+ unsigned int clk_h_div;
+ unsigned int clk_p_div;
+ unsigned int clk_usb_div;
+ unsigned long clk_spi_div;
+
+ hw = clk_hw_register_fixed_rate(NULL, "xtali", NULL, 0, EP93XX_EXT_CLK_RATE);
+ clk_hw_register_clkdev(hw, NULL, "xtali");
/* Determine the bootloader configured pll1 rate */
value = __raw_readl(EP93XX_SYSCON_CLKSET1);
if (!(value & EP93XX_SYSCON_CLKSET1_NBYP1))
- clk_pll1.rate = clk_xtali.rate;
+ clk_pll1_rate = EP93XX_EXT_CLK_RATE;
else
- clk_pll1.rate = calc_pll_rate(value);
+ clk_pll1_rate = calc_pll_rate(EP93XX_EXT_CLK_RATE, value);
+
+ hw = clk_hw_register_fixed_rate(NULL, "pll1", "xtali", 0, clk_pll1_rate);
+ clk_hw_register_clkdev(hw, NULL, "pll1");
/* Initialize the pll1 derived clocks */
- clk_f.rate = clk_pll1.rate / fclk_divisors[(value >> 25) & 0x7];
- clk_h.rate = clk_pll1.rate / hclk_divisors[(value >> 20) & 0x7];
- clk_p.rate = clk_h.rate / pclk_divisors[(value >> 18) & 0x3];
+ clk_f_div = fclk_divisors[(value >> 25) & 0x7];
+ clk_h_div = hclk_divisors[(value >> 20) & 0x7];
+ clk_p_div = pclk_divisors[(value >> 18) & 0x3];
+
+ hw = clk_hw_register_fixed_factor(NULL, "fclk", "pll1", 0, 1, clk_f_div);
+ clk_f_rate = clk_get_rate(hw->clk);
+ hw = clk_hw_register_fixed_factor(NULL, "hclk", "pll1", 0, 1, clk_h_div);
+ clk_h_rate = clk_get_rate(hw->clk);
+ hw = clk_hw_register_fixed_factor(NULL, "pclk", "hclk", 0, 1, clk_p_div);
+ clk_p_rate = clk_get_rate(hw->clk);
+
+ clk_hw_register_clkdev(hw, "apb_pclk", NULL);
+
ep93xx_dma_clock_init();
/* Determine the bootloader configured pll2 rate */
value = __raw_readl(EP93XX_SYSCON_CLKSET2);
if (!(value & EP93XX_SYSCON_CLKSET2_NBYP2))
- clk_pll2.rate = clk_xtali.rate;
+ clk_pll2_rate = EP93XX_EXT_CLK_RATE;
else if (value & EP93XX_SYSCON_CLKSET2_PLL2_EN)
- clk_pll2.rate = calc_pll_rate(value);
+ clk_pll2_rate = calc_pll_rate(EP93XX_EXT_CLK_RATE, value);
else
- clk_pll2.rate = 0;
+ clk_pll2_rate = 0;
+
+ hw = clk_hw_register_fixed_rate(NULL, "pll2", "xtali", 0, clk_pll2_rate);
+ clk_hw_register_clkdev(hw, NULL, "pll2");
/* Initialize the pll2 derived clocks */
- clk_usb_host.rate = clk_pll2.rate / (((value >> 28) & 0xf) + 1);
+ /*
+ * These four bits set the divide ratio between the PLL2
+ * output and the USB clock.
+ * 0000 - Divide by 1
+ * 0001 - Divide by 2
+ * 0010 - Divide by 3
+ * 0011 - Divide by 4
+ * 0100 - Divide by 5
+ * 0101 - Divide by 6
+ * 0110 - Divide by 7
+ * 0111 - Divide by 8
+ * 1000 - Divide by 9
+ * 1001 - Divide by 10
+ * 1010 - Divide by 11
+ * 1011 - Divide by 12
+ * 1100 - Divide by 13
+ * 1101 - Divide by 14
+ * 1110 - Divide by 15
+ * 1111 - Divide by 1
+ * On power-on-reset these bits are reset to 0000b.
+ */
+ clk_usb_div = (((value >> 28) & 0xf) + 1);
+ hw = clk_hw_register_fixed_factor(NULL, "usb_clk", "pll2", 0, 1, clk_usb_div);
+ hw = clk_hw_register_gate(NULL, "ohci-platform",
+ "usb_clk", 0,
+ EP93XX_SYSCON_PWRCNT,
+ EP93XX_SYSCON_PWRCNT_USH_EN,
+ 0,
+ &clk_lock);
+ clk_hw_register_clkdev(hw, NULL, "ohci-platform");
/*
* EP93xx SSP clock rate was doubled in version E2. For more information
* see:
* http://www.cirrus.com/en/pubs/appNote/AN273REV4.pdf
*/
+ clk_spi_div = 1;
if (ep93xx_chip_revision() < EP93XX_CHIP_REV_E2)
- clk_spi.rate /= 2;
+ clk_spi_div = 2;
+ hw = clk_hw_register_fixed_factor(NULL, "ep93xx-spi.0", "xtali", 0, 1, clk_spi_div);
+ clk_hw_register_clkdev(hw, NULL, "ep93xx-spi.0");
+
+ /* pwm clock */
+ hw = clk_hw_register_fixed_factor(NULL, "pwm_clk", "xtali", 0, 1, 1);
+ clk_hw_register_clkdev(hw, "pwm_clk", NULL);
pr_info("PLL1 running at %ld MHz, PLL2 at %ld MHz\n",
- clk_pll1.rate / 1000000, clk_pll2.rate / 1000000);
+ clk_pll1_rate / 1000000, clk_pll2_rate / 1000000);
pr_info("FCLK %ld MHz, HCLK %ld MHz, PCLK %ld MHz\n",
- clk_f.rate / 1000000, clk_h.rate / 1000000,
- clk_p.rate / 1000000);
+ clk_f_rate / 1000000, clk_h_rate / 1000000,
+ clk_p_rate / 1000000);
+
+ ep93xx_uart_clock_init();
+
+ /* touchscreen/adc clock */
+ hw = clk_hw_register_div("ep93xx-adc",
+ "xtali",
+ EP93XX_SYSCON_KEYTCHCLKDIV,
+ EP93XX_SYSCON_KEYTCHCLKDIV_TSEN,
+ EP93XX_SYSCON_KEYTCHCLKDIV_ADIV,
+ 1,
+ adc_divisors,
+ ARRAY_SIZE(adc_divisors));
+
+ clk_hw_register_clkdev(hw, NULL, "ep93xx-adc");
+
+ /* keypad clock */
+ hw = clk_hw_register_div("ep93xx-keypad",
+ "xtali",
+ EP93XX_SYSCON_KEYTCHCLKDIV,
+ EP93XX_SYSCON_KEYTCHCLKDIV_KEN,
+ EP93XX_SYSCON_KEYTCHCLKDIV_KDIV,
+ 1,
+ adc_divisors,
+ ARRAY_SIZE(adc_divisors));
+
+ clk_hw_register_clkdev(hw, NULL, "ep93xx-keypad");
+
+ /* On reset PDIV and VDIV is set to zero, while PDIV zero
+ * means clock disable, VDIV shouldn't be zero.
+ * So i set both dividers to minimum.
+ */
+ /* ENA - Enable CLK divider. */
+ /* PDIV - 00 - Disable clock */
+ /* VDIV - at least 2 */
+ /* Check and enable video clk registers */
+ value = __raw_readl(EP93XX_SYSCON_VIDCLKDIV);
+ value |= (1 << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | 2;
+ ep93xx_syscon_swlocked_write(value, EP93XX_SYSCON_VIDCLKDIV);
+
+ /* check and enable i2s clk registers */
+ value = __raw_readl(EP93XX_SYSCON_I2SCLKDIV);
+ value |= (1 << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | 2;
+ ep93xx_syscon_swlocked_write(value, EP93XX_SYSCON_I2SCLKDIV);
+
+ /* video clk */
+ hw = clk_hw_register_ddiv("ep93xx-fb",
+ EP93XX_SYSCON_VIDCLKDIV,
+ EP93XX_SYSCON_CLKDIV_ENABLE);
+
+ clk_hw_register_clkdev(hw, NULL, "ep93xx-fb");
+
+ /* i2s clk */
+ hw = clk_hw_register_ddiv("mclk",
+ EP93XX_SYSCON_I2SCLKDIV,
+ EP93XX_SYSCON_CLKDIV_ENABLE);
+
+ clk_hw_register_clkdev(hw, "mclk", "ep93xx-i2s");
+
+ /* i2s sclk */
+#define EP93XX_I2SCLKDIV_SDIV_SHIFT 16
+#define EP93XX_I2SCLKDIV_SDIV_WIDTH 1
+ hw = clk_hw_register_div("sclk",
+ "mclk",
+ EP93XX_SYSCON_I2SCLKDIV,
+ EP93XX_SYSCON_I2SCLKDIV_SENA,
+ EP93XX_I2SCLKDIV_SDIV_SHIFT,
+ EP93XX_I2SCLKDIV_SDIV_WIDTH,
+ sclk_divisors,
+ ARRAY_SIZE(sclk_divisors));
+
+ clk_hw_register_clkdev(hw, "sclk", "ep93xx-i2s");
+
+ /* i2s lrclk */
+#define EP93XX_I2SCLKDIV_LRDIV32_SHIFT 17
+#define EP93XX_I2SCLKDIV_LRDIV32_WIDTH 3
+ hw = clk_hw_register_div("lrclk",
+ "sclk",
+ EP93XX_SYSCON_I2SCLKDIV,
+ EP93XX_SYSCON_I2SCLKDIV_SENA,
+ EP93XX_I2SCLKDIV_LRDIV32_SHIFT,
+ EP93XX_I2SCLKDIV_LRDIV32_WIDTH,
+ lrclk_divisors,
+ ARRAY_SIZE(lrclk_divisors));
+
+ clk_hw_register_clkdev(hw, "lrclk", "ep93xx-i2s");
- clkdev_add_table(clocks, ARRAY_SIZE(clocks));
return 0;
}
postcore_initcall(ep93xx_clock_init);
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 4659132a0509..a3b4e843456a 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -214,7 +214,7 @@ static int ep93xx_ohci_power_on(struct platform_device *pdev)
return PTR_ERR(ep93xx_ohci_host_clock);
}
- return clk_enable(ep93xx_ohci_host_clock);
+ return clk_prepare_enable(ep93xx_ohci_host_clock);
}
static void ep93xx_ohci_power_off(struct platform_device *pdev)
diff --git a/arch/arm/mach-ep93xx/soc.h b/arch/arm/mach-ep93xx/soc.h
index f2dace1c9154..94ef7f275f94 100644
--- a/arch/arm/mach-ep93xx/soc.h
+++ b/arch/arm/mach-ep93xx/soc.h
@@ -111,19 +111,19 @@
#define EP93XX_SYSCON_PWRCNT EP93XX_SYSCON_REG(0x04)
#define EP93XX_SYSCON_PWRCNT_FIR_EN (1<<31)
#define EP93XX_SYSCON_PWRCNT_UARTBAUD (1<<29)
-#define EP93XX_SYSCON_PWRCNT_USH_EN (1<<28)
-#define EP93XX_SYSCON_PWRCNT_DMA_M2M1 (1<<27)
-#define EP93XX_SYSCON_PWRCNT_DMA_M2M0 (1<<26)
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P8 (1<<25)
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P9 (1<<24)
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P6 (1<<23)
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P7 (1<<22)
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P4 (1<<21)
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P5 (1<<20)
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P2 (1<<19)
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P3 (1<<18)
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P0 (1<<17)
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P1 (1<<16)
+#define EP93XX_SYSCON_PWRCNT_USH_EN 28
+#define EP93XX_SYSCON_PWRCNT_DMA_M2M1 27
+#define EP93XX_SYSCON_PWRCNT_DMA_M2M0 26
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P8 25
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P9 24
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P6 23
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P7 22
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P4 21
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P5 20
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P2 19
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P3 18
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P0 17
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P1 16
#define EP93XX_SYSCON_HALT EP93XX_SYSCON_REG(0x08)
#define EP93XX_SYSCON_STANDBY EP93XX_SYSCON_REG(0x0c)
#define EP93XX_SYSCON_CLKSET1 EP93XX_SYSCON_REG(0x20)
@@ -139,13 +139,13 @@
#define EP93XX_SYSCON_DEVCFG_GONK (1<<27)
#define EP93XX_SYSCON_DEVCFG_TONG (1<<26)
#define EP93XX_SYSCON_DEVCFG_MONG (1<<25)
-#define EP93XX_SYSCON_DEVCFG_U3EN (1<<24)
+#define EP93XX_SYSCON_DEVCFG_U3EN 24
#define EP93XX_SYSCON_DEVCFG_CPENA (1<<23)
#define EP93XX_SYSCON_DEVCFG_A2ONG (1<<22)
#define EP93XX_SYSCON_DEVCFG_A1ONG (1<<21)
-#define EP93XX_SYSCON_DEVCFG_U2EN (1<<20)
+#define EP93XX_SYSCON_DEVCFG_U2EN 20
#define EP93XX_SYSCON_DEVCFG_EXVC (1<<19)
-#define EP93XX_SYSCON_DEVCFG_U1EN (1<<18)
+#define EP93XX_SYSCON_DEVCFG_U1EN 18
#define EP93XX_SYSCON_DEVCFG_TIN (1<<17)
#define EP93XX_SYSCON_DEVCFG_HC3IN (1<<15)
#define EP93XX_SYSCON_DEVCFG_HC3EN (1<<14)
@@ -163,12 +163,12 @@
#define EP93XX_SYSCON_DEVCFG_KEYS (1<<1)
#define EP93XX_SYSCON_DEVCFG_SHENA (1<<0)
#define EP93XX_SYSCON_VIDCLKDIV EP93XX_SYSCON_REG(0x84)
-#define EP93XX_SYSCON_CLKDIV_ENABLE (1<<15)
+#define EP93XX_SYSCON_CLKDIV_ENABLE 15
#define EP93XX_SYSCON_CLKDIV_ESEL (1<<14)
#define EP93XX_SYSCON_CLKDIV_PSEL (1<<13)
#define EP93XX_SYSCON_CLKDIV_PDIV_SHIFT 8
#define EP93XX_SYSCON_I2SCLKDIV EP93XX_SYSCON_REG(0x8c)
-#define EP93XX_SYSCON_I2SCLKDIV_SENA (1<<31)
+#define EP93XX_SYSCON_I2SCLKDIV_SENA 31
#define EP93XX_SYSCON_I2SCLKDIV_ORIDE (1<<29)
#define EP93XX_SYSCON_I2SCLKDIV_SPOL (1<<19)
#define EP93XX_I2SCLKDIV_SDIV (1 << 16)
@@ -177,9 +177,9 @@
#define EP93XX_I2SCLKDIV_LRDIV128 (2 << 17)
#define EP93XX_I2SCLKDIV_LRDIV_MASK (3 << 17)
#define EP93XX_SYSCON_KEYTCHCLKDIV EP93XX_SYSCON_REG(0x90)
-#define EP93XX_SYSCON_KEYTCHCLKDIV_TSEN (1<<31)
-#define EP93XX_SYSCON_KEYTCHCLKDIV_ADIV (1<<16)
-#define EP93XX_SYSCON_KEYTCHCLKDIV_KEN (1<<15)
+#define EP93XX_SYSCON_KEYTCHCLKDIV_TSEN 31
+#define EP93XX_SYSCON_KEYTCHCLKDIV_ADIV 16
+#define EP93XX_SYSCON_KEYTCHCLKDIV_KEN 15
#define EP93XX_SYSCON_KEYTCHCLKDIV_KDIV (1<<0)
#define EP93XX_SYSCON_SYSCFG EP93XX_SYSCON_REG(0x9c)
#define EP93XX_SYSCON_SYSCFG_REV_MASK (0xf0000000)
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 5a48abac6af4..dd1ae5571f43 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -13,7 +13,6 @@ menuconfig ARCH_EXYNOS
select ARM_GIC
select EXYNOS_IRQ_COMBINER
select COMMON_CLK_SAMSUNG
- select EXYNOS_CHIPID
select EXYNOS_THERMAL
select EXYNOS_PMU
select EXYNOS_SROM
@@ -22,7 +21,6 @@ menuconfig ARCH_EXYNOS
select HAVE_ARM_ARCH_TIMER if ARCH_EXYNOS5
select HAVE_ARM_SCU if SMP
select HAVE_S3C2410_I2C if I2C
- select HAVE_S3C_RTC if RTC_CLASS
select PINCTRL
select PINCTRL_EXYNOS
select PM_GENERIC_DOMAINS if PM
diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
index 630a038f4513..58a4228455ce 100644
--- a/arch/arm/mach-qcom/platsmp.c
+++ b/arch/arm/mach-qcom/platsmp.c
@@ -29,6 +29,7 @@
#define COREPOR_RST BIT(5)
#define CORE_RST BIT(4)
#define L2DT_SLP BIT(3)
+#define CORE_MEM_CLAMP BIT(1)
#define CLAMP BIT(0)
#define APC_PWR_GATE_CTL 0x14
@@ -75,6 +76,62 @@ static int scss_release_secondary(unsigned int cpu)
return 0;
}
+static int cortex_a7_release_secondary(unsigned int cpu)
+{
+ int ret = 0;
+ void __iomem *reg;
+ struct device_node *cpu_node, *acc_node;
+ u32 reg_val;
+
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (!cpu_node)
+ return -ENODEV;
+
+ acc_node = of_parse_phandle(cpu_node, "qcom,acc", 0);
+ if (!acc_node) {
+ ret = -ENODEV;
+ goto out_acc;
+ }
+
+ reg = of_iomap(acc_node, 0);
+ if (!reg) {
+ ret = -ENOMEM;
+ goto out_acc_map;
+ }
+
+ /* Put the CPU into reset. */
+ reg_val = CORE_RST | COREPOR_RST | CLAMP | CORE_MEM_CLAMP;
+ writel(reg_val, reg + APCS_CPU_PWR_CTL);
+
+ /* Turn on the BHS and set the BHS_CNT to 16 XO clock cycles */
+ writel(BHS_EN | (0x10 << BHS_CNT_SHIFT), reg + APC_PWR_GATE_CTL);
+ /* Wait for the BHS to settle */
+ udelay(2);
+
+ reg_val &= ~CORE_MEM_CLAMP;
+ writel(reg_val, reg + APCS_CPU_PWR_CTL);
+ reg_val |= L2DT_SLP;
+ writel(reg_val, reg + APCS_CPU_PWR_CTL);
+ udelay(2);
+
+ reg_val = (reg_val | BIT(17)) & ~CLAMP;
+ writel(reg_val, reg + APCS_CPU_PWR_CTL);
+ udelay(2);
+
+ /* Release CPU out of reset and bring it to life. */
+ reg_val &= ~(CORE_RST | COREPOR_RST);
+ writel(reg_val, reg + APCS_CPU_PWR_CTL);
+ reg_val |= CORE_PWRD_UP;
+ writel(reg_val, reg + APCS_CPU_PWR_CTL);
+
+ iounmap(reg);
+out_acc_map:
+ of_node_put(acc_node);
+out_acc:
+ of_node_put(cpu_node);
+ return ret;
+}
+
static int kpssv1_release_secondary(unsigned int cpu)
{
int ret = 0;
@@ -281,6 +338,11 @@ static int msm8660_boot_secondary(unsigned int cpu, struct task_struct *idle)
return qcom_boot_secondary(cpu, scss_release_secondary);
}
+static int cortex_a7_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ return qcom_boot_secondary(cpu, cortex_a7_release_secondary);
+}
+
static int kpssv1_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
return qcom_boot_secondary(cpu, kpssv1_release_secondary);
@@ -315,6 +377,16 @@ static const struct smp_operations smp_msm8660_ops __initconst = {
};
CPU_METHOD_OF_DECLARE(qcom_smp, "qcom,gcc-msm8660", &smp_msm8660_ops);
+static const struct smp_operations qcom_smp_cortex_a7_ops __initconst = {
+ .smp_prepare_cpus = qcom_smp_prepare_cpus,
+ .smp_boot_secondary = cortex_a7_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_die = qcom_cpu_die,
+#endif
+};
+CPU_METHOD_OF_DECLARE(qcom_smp_msm8226, "qcom,msm8226-smp", &qcom_smp_cortex_a7_ops);
+CPU_METHOD_OF_DECLARE(qcom_smp_msm8916, "qcom,msm8916-smp", &qcom_smp_cortex_a7_ops);
+
static const struct smp_operations qcom_smp_kpssv1_ops __initconst = {
.smp_prepare_cpus = qcom_smp_prepare_cpus,
.smp_boot_secondary = kpssv1_boot_secondary,
diff --git a/arch/arm/mach-s5pv210/Kconfig b/arch/arm/mach-s5pv210/Kconfig
index d644b45bc29d..5a96099af991 100644
--- a/arch/arm/mach-s5pv210/Kconfig
+++ b/arch/arm/mach-s5pv210/Kconfig
@@ -13,7 +13,6 @@ config ARCH_S5PV210
select COMMON_CLK_SAMSUNG
select GPIOLIB
select HAVE_S3C2410_I2C if I2C
- select HAVE_S3C_RTC if RTC_CLASS
select PINCTRL
select PINCTRL_EXYNOS
select SOC_SAMSUNG
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 96a81967c3bf..1aa8b7073218 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -89,10 +89,8 @@ config ARCH_BRCMSTB
config ARCH_EXYNOS
bool "ARMv8 based Samsung Exynos SoC family"
select COMMON_CLK_SAMSUNG
- select EXYNOS_CHIPID
select EXYNOS_PM_DOMAINS if PM_GENERIC_DOMAINS
select EXYNOS_PMU
- select HAVE_S3C_RTC if RTC_CLASS
select PINCTRL
select PINCTRL_EXYNOS
select PM_GENERIC_DOMAINS if PM
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index a4cf3d692dc3..3c68e174a113 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -30,7 +30,7 @@ config ARM_INTEGRATOR_LM
found on the ARM Integrator AP (Application Platform)
config BRCMSTB_GISB_ARB
- bool "Broadcom STB GISB bus arbiter"
+ tristate "Broadcom STB GISB bus arbiter"
depends on ARM || ARM64 || MIPS
default ARCH_BRCMSTB || BMIPS_GENERIC
help
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index 6551286a60cc..4c2f7d61cb9b 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2014-2017 Broadcom
+ * Copyright (C) 2014-2021 Broadcom
*/
#include <linux/init.h>
@@ -536,6 +536,7 @@ static struct platform_driver brcmstb_gisb_arb_driver = {
.name = "brcm-gisb-arb",
.of_match_table = brcmstb_gisb_arb_of_match,
.pm = &brcmstb_gisb_arb_pm_ops,
+ .suppress_bind_attrs = true,
},
};
@@ -546,3 +547,7 @@ static int __init brcm_gisb_driver_init(void)
}
module_init(brcm_gisb_driver_init);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Broadcom STB GISB arbiter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/sun50i-de2.c b/drivers/bus/sun50i-de2.c
index 672518741f86..414f29cdedf0 100644
--- a/drivers/bus/sun50i-de2.c
+++ b/drivers/bus/sun50i-de2.c
@@ -15,10 +15,9 @@ static int sun50i_de2_bus_probe(struct platform_device *pdev)
int ret;
ret = sunxi_sram_claim(&pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "Error couldn't map SRAM to device\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Couldn't map SRAM to device\n");
of_platform_populate(np, NULL, NULL, &pdev->dev);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 6a8b7fb5be58..54c0ee6dda30 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -6,6 +6,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/cpu_pm.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -17,6 +18,7 @@
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
+#include <linux/timekeeping.h>
#include <linux/iopoll.h>
#include <linux/platform_data/ti-sysc.h>
@@ -51,11 +53,18 @@ struct sysc_address {
struct list_head node;
};
+struct sysc_module {
+ struct sysc *ddata;
+ struct list_head node;
+};
+
struct sysc_soc_info {
unsigned long general_purpose:1;
enum sysc_soc soc;
- struct mutex list_lock; /* disabled modules list lock */
+ struct mutex list_lock; /* disabled and restored modules list lock */
struct list_head disabled_modules;
+ struct list_head restored_modules;
+ struct notifier_block nb;
};
enum sysc_clocks {
@@ -131,6 +140,7 @@ struct sysc {
struct ti_sysc_cookie cookie;
const char *name;
u32 revision;
+ u32 sysconfig;
unsigned int reserved:1;
unsigned int enabled:1;
unsigned int needs_resume:1;
@@ -147,6 +157,7 @@ struct sysc {
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
bool is_child);
+static int sysc_reset(struct sysc *ddata);
static void sysc_write(struct sysc *ddata, int offset, u32 value)
{
@@ -223,37 +234,77 @@ static u32 sysc_read_sysstatus(struct sysc *ddata)
return sysc_read(ddata, offset);
}
-/* Poll on reset status */
-static int sysc_wait_softreset(struct sysc *ddata)
+static int sysc_poll_reset_sysstatus(struct sysc *ddata)
{
- u32 sysc_mask, syss_done, rstval;
- int syss_offset, error = 0;
-
- if (ddata->cap->regbits->srst_shift < 0)
- return 0;
-
- syss_offset = ddata->offsets[SYSC_SYSSTATUS];
- sysc_mask = BIT(ddata->cap->regbits->srst_shift);
+ int error, retries;
+ u32 syss_done, rstval;
if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
syss_done = 0;
else
syss_done = ddata->cfg.syss_mask;
- if (syss_offset >= 0) {
+ if (likely(!timekeeping_suspended)) {
error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata,
rstval, (rstval & ddata->cfg.syss_mask) ==
syss_done, 100, MAX_MODULE_SOFTRESET_WAIT);
+ } else {
+ retries = MAX_MODULE_SOFTRESET_WAIT;
+ while (retries--) {
+ rstval = sysc_read_sysstatus(ddata);
+ if ((rstval & ddata->cfg.syss_mask) == syss_done)
+ return 0;
+ udelay(2); /* Account for udelay flakeyness */
+ }
+ error = -ETIMEDOUT;
+ }
+
+ return error;
+}
+
+static int sysc_poll_reset_sysconfig(struct sysc *ddata)
+{
+ int error, retries;
+ u32 sysc_mask, rstval;
+
+ sysc_mask = BIT(ddata->cap->regbits->srst_shift);
- } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
+ if (likely(!timekeeping_suspended)) {
error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata,
rstval, !(rstval & sysc_mask),
100, MAX_MODULE_SOFTRESET_WAIT);
+ } else {
+ retries = MAX_MODULE_SOFTRESET_WAIT;
+ while (retries--) {
+ rstval = sysc_read_sysconfig(ddata);
+ if (!(rstval & sysc_mask))
+ return 0;
+ udelay(2); /* Account for udelay flakeyness */
+ }
+ error = -ETIMEDOUT;
}
return error;
}
+/* Poll on reset status */
+static int sysc_wait_softreset(struct sysc *ddata)
+{
+ int syss_offset, error = 0;
+
+ if (ddata->cap->regbits->srst_shift < 0)
+ return 0;
+
+ syss_offset = ddata->offsets[SYSC_SYSSTATUS];
+
+ if (syss_offset >= 0)
+ error = sysc_poll_reset_sysstatus(ddata);
+ else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS)
+ error = sysc_poll_reset_sysconfig(ddata);
+
+ return error;
+}
+
static int sysc_add_named_clock_from_child(struct sysc *ddata,
const char *name,
const char *optfck_name)
@@ -1094,7 +1145,8 @@ set_midle:
best_mode = fls(ddata->cfg.midlemodes) - 1;
if (best_mode > SYSC_IDLE_MASK) {
dev_err(dev, "%s: invalid midlemode\n", __func__);
- return -EINVAL;
+ error = -EINVAL;
+ goto save_context;
}
if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
@@ -1112,13 +1164,16 @@ set_autoidle:
sysc_write_sysconfig(ddata, reg);
}
- /* Flush posted write */
- sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+ error = 0;
+
+save_context:
+ /* Save context and flush posted write */
+ ddata->sysconfig = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
if (ddata->module_enable_quirk)
ddata->module_enable_quirk(ddata);
- return 0;
+ return error;
}
static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
@@ -1175,8 +1230,10 @@ static int sysc_disable_module(struct device *dev)
set_sidle:
/* Set SIDLE mode */
idlemodes = ddata->cfg.sidlemodes;
- if (!idlemodes || regbits->sidle_shift < 0)
- return 0;
+ if (!idlemodes || regbits->sidle_shift < 0) {
+ ret = 0;
+ goto save_context;
+ }
if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE) {
best_mode = SYSC_IDLE_FORCE;
@@ -1184,7 +1241,8 @@ set_sidle:
ret = sysc_best_idle_mode(idlemodes, &best_mode);
if (ret) {
dev_err(dev, "%s: invalid sidlemode\n", __func__);
- return ret;
+ ret = -EINVAL;
+ goto save_context;
}
}
@@ -1195,10 +1253,13 @@ set_sidle:
reg |= 1 << regbits->autoidle_shift;
sysc_write_sysconfig(ddata, reg);
- /* Flush posted write */
- sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+ ret = 0;
- return 0;
+save_context:
+ /* Save context and flush posted write */
+ ddata->sysconfig = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ return ret;
}
static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
@@ -1336,13 +1397,40 @@ err_allow_idle:
return error;
}
+/*
+ * Checks if device context was lost. Assumes the sysconfig register value
+ * after lost context is different from the configured value. Only works for
+ * enabled devices.
+ *
+ * Eventually we may want to also add support to using the context lost
+ * registers that some SoCs have.
+ */
+static int sysc_check_context(struct sysc *ddata)
+{
+ u32 reg;
+
+ if (!ddata->enabled)
+ return -ENODATA;
+
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+ if (reg == ddata->sysconfig)
+ return 0;
+
+ return -EACCES;
+}
+
static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
{
struct device *dev = ddata->dev;
int error;
- /* Disable target module if it is enabled */
if (ddata->enabled) {
+ /* Nothing to do if enabled and context not lost */
+ error = sysc_check_context(ddata);
+ if (!error)
+ return 0;
+
+ /* Disable target module if it is enabled */
error = sysc_runtime_suspend(dev);
if (error)
dev_warn(dev, "reinit suspend failed: %i\n", error);
@@ -1353,6 +1441,15 @@ static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
if (error)
dev_warn(dev, "reinit resume failed: %i\n", error);
+ /* Some modules like am335x gpmc need reset and restore of sysconfig */
+ if (ddata->cfg.quirks & SYSC_QUIRK_RESET_ON_CTX_LOST) {
+ error = sysc_reset(ddata);
+ if (error)
+ dev_warn(dev, "reinit reset failed: %i\n", error);
+
+ sysc_write_sysconfig(ddata, ddata->sysconfig);
+ }
+
if (leave_enabled)
return error;
@@ -1442,10 +1539,6 @@ struct sysc_revision_quirk {
static const struct sysc_revision_quirk sysc_revision_quirks[] = {
/* These drivers need to be fixed to not use pm_runtime_irq_safe() */
- SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff,
- SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET),
- SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
@@ -1479,7 +1572,10 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_CLKDM_NOAUTO),
SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET),
SYSC_QUIRK("gpmc", 0, 0, 0x10, 0x14, 0x00000060, 0xffffffff,
+ SYSC_QUIRK_REINIT_ON_CTX_LOST | SYSC_QUIRK_RESET_ON_CTX_LOST |
SYSC_QUIRK_GPMC_DEBUG),
SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
SYSC_QUIRK_OPT_CLKS_NEEDED),
@@ -1515,10 +1611,11 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
- 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
+ SYSC_MODULE_QUIRK_OTG),
SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
- SYSC_QUIRK_REINIT_ON_RESUME),
+ SYSC_QUIRK_REINIT_ON_CTX_LOST),
SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
SYSC_MODULE_QUIRK_WDT),
/* PRUSS on am3, am4 and am5 */
@@ -1583,6 +1680,7 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0),
SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0),
SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0),
+ SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff, 0),
SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0),
SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff, 0),
@@ -1874,6 +1972,22 @@ static void sysc_module_lock_quirk_rtc(struct sysc *ddata)
sysc_quirk_rtc(ddata, true);
}
+/* OTG omap2430 glue layer up to omap4 needs OTG_FORCESTDBY configured */
+static void sysc_module_enable_quirk_otg(struct sysc *ddata)
+{
+ int offset = 0x414; /* OTG_FORCESTDBY */
+
+ sysc_write(ddata, offset, 0);
+}
+
+static void sysc_module_disable_quirk_otg(struct sysc *ddata)
+{
+ int offset = 0x414; /* OTG_FORCESTDBY */
+ u32 val = BIT(0); /* ENABLEFORCE */
+
+ sysc_write(ddata, offset, val);
+}
+
/* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */
static void sysc_module_enable_quirk_sgx(struct sysc *ddata)
{
@@ -1956,6 +2070,11 @@ static void sysc_init_module_quirks(struct sysc *ddata)
return;
}
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_OTG) {
+ ddata->module_enable_quirk = sysc_module_enable_quirk_otg;
+ ddata->module_disable_quirk = sysc_module_disable_quirk_otg;
+ }
+
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
@@ -2401,6 +2520,78 @@ static struct dev_pm_domain sysc_child_pm_domain = {
}
};
+/* Caller needs to take list_lock if ever used outside of cpu_pm */
+static void sysc_reinit_modules(struct sysc_soc_info *soc)
+{
+ struct sysc_module *module;
+ struct list_head *pos;
+ struct sysc *ddata;
+
+ list_for_each(pos, &sysc_soc->restored_modules) {
+ module = list_entry(pos, struct sysc_module, node);
+ ddata = module->ddata;
+ sysc_reinit_module(ddata, ddata->enabled);
+ }
+}
+
+/**
+ * sysc_context_notifier - optionally reset and restore module after idle
+ * @nb: notifier block
+ * @cmd: unused
+ * @v: unused
+ *
+ * Some interconnect target modules need to be restored, or reset and restored
+ * on CPU_PM CPU_PM_CLUSTER_EXIT notifier. This is needed at least for am335x
+ * OTG and GPMC target modules even if the modules are unused.
+ */
+static int sysc_context_notifier(struct notifier_block *nb, unsigned long cmd,
+ void *v)
+{
+ struct sysc_soc_info *soc;
+
+ soc = container_of(nb, struct sysc_soc_info, nb);
+
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
+ break;
+ case CPU_CLUSTER_PM_EXIT:
+ sysc_reinit_modules(soc);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+/**
+ * sysc_add_restored - optionally add reset and restore quirk hanlling
+ * @ddata: device data
+ */
+static void sysc_add_restored(struct sysc *ddata)
+{
+ struct sysc_module *restored_module;
+
+ restored_module = kzalloc(sizeof(*restored_module), GFP_KERNEL);
+ if (!restored_module)
+ return;
+
+ restored_module->ddata = ddata;
+
+ mutex_lock(&sysc_soc->list_lock);
+
+ list_add(&restored_module->node, &sysc_soc->restored_modules);
+
+ if (sysc_soc->nb.notifier_call)
+ goto out_unlock;
+
+ sysc_soc->nb.notifier_call = sysc_context_notifier;
+ cpu_pm_register_notifier(&sysc_soc->nb);
+
+out_unlock:
+ mutex_unlock(&sysc_soc->list_lock);
+}
+
/**
* sysc_legacy_idle_quirk - handle children in omap_device compatible way
* @ddata: device driver data
@@ -2900,12 +3091,14 @@ static int sysc_add_disabled(unsigned long base)
}
/*
- * One time init to detect the booted SoC and disable unavailable features.
+ * One time init to detect the booted SoC, disable unavailable features
+ * and initialize list for optional cpu_pm notifier.
+ *
* Note that we initialize static data shared across all ti-sysc instances
* so ddata is only used for SoC type. This can be called from module_init
* once we no longer need to rely on platform data.
*/
-static int sysc_init_soc(struct sysc *ddata)
+static int sysc_init_static_data(struct sysc *ddata)
{
const struct soc_device_attribute *match;
struct ti_sysc_platform_data *pdata;
@@ -2921,6 +3114,7 @@ static int sysc_init_soc(struct sysc *ddata)
mutex_init(&sysc_soc->list_lock);
INIT_LIST_HEAD(&sysc_soc->disabled_modules);
+ INIT_LIST_HEAD(&sysc_soc->restored_modules);
sysc_soc->general_purpose = true;
pdata = dev_get_platdata(ddata->dev);
@@ -2985,15 +3179,24 @@ static int sysc_init_soc(struct sysc *ddata)
return 0;
}
-static void sysc_cleanup_soc(void)
+static void sysc_cleanup_static_data(void)
{
+ struct sysc_module *restored_module;
struct sysc_address *disabled_module;
struct list_head *pos, *tmp;
if (!sysc_soc)
return;
+ if (sysc_soc->nb.notifier_call)
+ cpu_pm_unregister_notifier(&sysc_soc->nb);
+
mutex_lock(&sysc_soc->list_lock);
+ list_for_each_safe(pos, tmp, &sysc_soc->restored_modules) {
+ restored_module = list_entry(pos, struct sysc_module, node);
+ list_del(pos);
+ kfree(restored_module);
+ }
list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) {
disabled_module = list_entry(pos, struct sysc_address, node);
list_del(pos);
@@ -3061,7 +3264,7 @@ static int sysc_probe(struct platform_device *pdev)
ddata->dev = &pdev->dev;
platform_set_drvdata(pdev, ddata);
- error = sysc_init_soc(ddata);
+ error = sysc_init_static_data(ddata);
if (error)
return error;
@@ -3159,6 +3362,9 @@ static int sysc_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
}
+ if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_CTX_LOST)
+ sysc_add_restored(ddata);
+
return 0;
err:
@@ -3240,7 +3446,7 @@ static void __exit sysc_exit(void)
{
bus_unregister_notifier(&platform_bus_type, &sysc_nb);
platform_driver_unregister(&sysc_driver);
- sysc_cleanup_soc();
+ sysc_cleanup_static_data();
}
module_exit(sysc_exit);
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 334f83e56120..15d6c46c0a47 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -99,7 +99,7 @@ config ARM_MVEBU_V7_CPUIDLE
config ARM_TEGRA_CPUIDLE
bool "CPU Idle Driver for NVIDIA Tegra SoCs"
- depends on ARCH_TEGRA && !ARM64
+ depends on (ARCH_TEGRA || COMPILE_TEST) && !ARM64 && MMU
select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
select ARM_CPU_SUSPEND
help
@@ -112,6 +112,7 @@ config ARM_QCOM_SPM_CPUIDLE
select CPU_IDLE_MULTIPLE_DRIVERS
select DT_IDLE_STATES
select QCOM_SCM
+ select QCOM_SPM
help
Select this to enable cpuidle for Qualcomm processors.
The Subsystem Power Manager (SPM) controls low power modes for the
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index c0e7971da2da..01e77913a414 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -18,158 +18,18 @@
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/qcom_scm.h>
+#include <soc/qcom/spm.h>
#include <asm/proc-fns.h>
#include <asm/suspend.h>
#include "dt_idle_states.h"
-#define MAX_PMIC_DATA 2
-#define MAX_SEQ_DATA 64
-#define SPM_CTL_INDEX 0x7f
-#define SPM_CTL_INDEX_SHIFT 4
-#define SPM_CTL_EN BIT(0)
-
-enum pm_sleep_mode {
- PM_SLEEP_MODE_STBY,
- PM_SLEEP_MODE_RET,
- PM_SLEEP_MODE_SPC,
- PM_SLEEP_MODE_PC,
- PM_SLEEP_MODE_NR,
-};
-
-enum spm_reg {
- SPM_REG_CFG,
- SPM_REG_SPM_CTL,
- SPM_REG_DLY,
- SPM_REG_PMIC_DLY,
- SPM_REG_PMIC_DATA_0,
- SPM_REG_PMIC_DATA_1,
- SPM_REG_VCTL,
- SPM_REG_SEQ_ENTRY,
- SPM_REG_SPM_STS,
- SPM_REG_PMIC_STS,
- SPM_REG_NR,
-};
-
-struct spm_reg_data {
- const u8 *reg_offset;
- u32 spm_cfg;
- u32 spm_dly;
- u32 pmic_dly;
- u32 pmic_data[MAX_PMIC_DATA];
- u8 seq[MAX_SEQ_DATA];
- u8 start_index[PM_SLEEP_MODE_NR];
-};
-
-struct spm_driver_data {
+struct cpuidle_qcom_spm_data {
struct cpuidle_driver cpuidle_driver;
- void __iomem *reg_base;
- const struct spm_reg_data *reg_data;
-};
-
-static const u8 spm_reg_offset_v2_1[SPM_REG_NR] = {
- [SPM_REG_CFG] = 0x08,
- [SPM_REG_SPM_CTL] = 0x30,
- [SPM_REG_DLY] = 0x34,
- [SPM_REG_SEQ_ENTRY] = 0x80,
-};
-
-/* SPM register data for 8974, 8084 */
-static const struct spm_reg_data spm_reg_8974_8084_cpu = {
- .reg_offset = spm_reg_offset_v2_1,
- .spm_cfg = 0x1,
- .spm_dly = 0x3C102800,
- .seq = { 0x03, 0x0B, 0x0F, 0x00, 0x20, 0x80, 0x10, 0xE8, 0x5B, 0x03,
- 0x3B, 0xE8, 0x5B, 0x82, 0x10, 0x0B, 0x30, 0x06, 0x26, 0x30,
- 0x0F },
- .start_index[PM_SLEEP_MODE_STBY] = 0,
- .start_index[PM_SLEEP_MODE_SPC] = 3,
+ struct spm_driver_data *spm;
};
-/* SPM register data for 8226 */
-static const struct spm_reg_data spm_reg_8226_cpu = {
- .reg_offset = spm_reg_offset_v2_1,
- .spm_cfg = 0x0,
- .spm_dly = 0x3C102800,
- .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90,
- 0x5B, 0x60, 0x03, 0x60, 0x3B, 0x76, 0x76, 0x0B, 0x94, 0x5B,
- 0x80, 0x10, 0x26, 0x30, 0x0F },
- .start_index[PM_SLEEP_MODE_STBY] = 0,
- .start_index[PM_SLEEP_MODE_SPC] = 5,
-};
-
-static const u8 spm_reg_offset_v1_1[SPM_REG_NR] = {
- [SPM_REG_CFG] = 0x08,
- [SPM_REG_SPM_CTL] = 0x20,
- [SPM_REG_PMIC_DLY] = 0x24,
- [SPM_REG_PMIC_DATA_0] = 0x28,
- [SPM_REG_PMIC_DATA_1] = 0x2C,
- [SPM_REG_SEQ_ENTRY] = 0x80,
-};
-
-/* SPM register data for 8064 */
-static const struct spm_reg_data spm_reg_8064_cpu = {
- .reg_offset = spm_reg_offset_v1_1,
- .spm_cfg = 0x1F,
- .pmic_dly = 0x02020004,
- .pmic_data[0] = 0x0084009C,
- .pmic_data[1] = 0x00A4001C,
- .seq = { 0x03, 0x0F, 0x00, 0x24, 0x54, 0x10, 0x09, 0x03, 0x01,
- 0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F },
- .start_index[PM_SLEEP_MODE_STBY] = 0,
- .start_index[PM_SLEEP_MODE_SPC] = 2,
-};
-
-static inline void spm_register_write(struct spm_driver_data *drv,
- enum spm_reg reg, u32 val)
-{
- if (drv->reg_data->reg_offset[reg])
- writel_relaxed(val, drv->reg_base +
- drv->reg_data->reg_offset[reg]);
-}
-
-/* Ensure a guaranteed write, before return */
-static inline void spm_register_write_sync(struct spm_driver_data *drv,
- enum spm_reg reg, u32 val)
-{
- u32 ret;
-
- if (!drv->reg_data->reg_offset[reg])
- return;
-
- do {
- writel_relaxed(val, drv->reg_base +
- drv->reg_data->reg_offset[reg]);
- ret = readl_relaxed(drv->reg_base +
- drv->reg_data->reg_offset[reg]);
- if (ret == val)
- break;
- cpu_relax();
- } while (1);
-}
-
-static inline u32 spm_register_read(struct spm_driver_data *drv,
- enum spm_reg reg)
-{
- return readl_relaxed(drv->reg_base + drv->reg_data->reg_offset[reg]);
-}
-
-static void spm_set_low_power_mode(struct spm_driver_data *drv,
- enum pm_sleep_mode mode)
-{
- u32 start_index;
- u32 ctl_val;
-
- start_index = drv->reg_data->start_index[mode];
-
- ctl_val = spm_register_read(drv, SPM_REG_SPM_CTL);
- ctl_val &= ~(SPM_CTL_INDEX << SPM_CTL_INDEX_SHIFT);
- ctl_val |= start_index << SPM_CTL_INDEX_SHIFT;
- ctl_val |= SPM_CTL_EN;
- spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val);
-}
-
static int qcom_pm_collapse(unsigned long int unused)
{
qcom_scm_cpu_power_down(QCOM_SCM_CPU_PWR_DOWN_L2_ON);
@@ -201,10 +61,10 @@ static int qcom_cpu_spc(struct spm_driver_data *drv)
static int spm_enter_idle_state(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
- struct spm_driver_data *data = container_of(drv, struct spm_driver_data,
- cpuidle_driver);
+ struct cpuidle_qcom_spm_data *data = container_of(drv, struct cpuidle_qcom_spm_data,
+ cpuidle_driver);
- return CPU_PM_CPU_IDLE_ENTER_PARAM(qcom_cpu_spc, idx, data);
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(qcom_cpu_spc, idx, data->spm);
}
static struct cpuidle_driver qcom_spm_idle_driver = {
@@ -225,134 +85,92 @@ static const struct of_device_id qcom_idle_state_match[] = {
{ },
};
-static int spm_cpuidle_init(struct cpuidle_driver *drv, int cpu)
+static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
{
+ struct platform_device *pdev = NULL;
+ struct device_node *cpu_node, *saw_node;
+ struct cpuidle_qcom_spm_data *data = NULL;
int ret;
- memcpy(drv, &qcom_spm_idle_driver, sizeof(*drv));
- drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node)
+ return -ENODEV;
- /* Parse idle states from device tree */
- ret = dt_init_idle_driver(drv, qcom_idle_state_match, 1);
- if (ret <= 0)
- return ret ? : -ENODEV;
+ saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+ if (!saw_node)
+ return -ENODEV;
- /* We have atleast one power down mode */
- return qcom_scm_set_warm_boot_addr(cpu_resume_arm, drv->cpumask);
-}
+ pdev = of_find_device_by_node(saw_node);
+ of_node_put(saw_node);
+ of_node_put(cpu_node);
+ if (!pdev)
+ return -ENODEV;
-static struct spm_driver_data *spm_get_drv(struct platform_device *pdev,
- int *spm_cpu)
-{
- struct spm_driver_data *drv = NULL;
- struct device_node *cpu_node, *saw_node;
- int cpu;
- bool found = 0;
+ data = devm_kzalloc(cpuidle_dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_node = of_cpu_device_node_get(cpu);
- if (!cpu_node)
- continue;
- saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
- found = (saw_node == pdev->dev.of_node);
- of_node_put(saw_node);
- of_node_put(cpu_node);
- if (found)
- break;
- }
+ data->spm = dev_get_drvdata(&pdev->dev);
+ if (!data->spm)
+ return -EINVAL;
- if (found) {
- drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
- if (drv)
- *spm_cpu = cpu;
- }
+ data->cpuidle_driver = qcom_spm_idle_driver;
+ data->cpuidle_driver.cpumask = (struct cpumask *)cpumask_of(cpu);
- return drv;
-}
+ ret = dt_init_idle_driver(&data->cpuidle_driver,
+ qcom_idle_state_match, 1);
+ if (ret <= 0)
+ return ret ? : -ENODEV;
-static const struct of_device_id spm_match_table[] = {
- { .compatible = "qcom,msm8226-saw2-v2.1-cpu",
- .data = &spm_reg_8226_cpu },
- { .compatible = "qcom,msm8974-saw2-v2.1-cpu",
- .data = &spm_reg_8974_8084_cpu },
- { .compatible = "qcom,apq8084-saw2-v2.1-cpu",
- .data = &spm_reg_8974_8084_cpu },
- { .compatible = "qcom,apq8064-saw2-v1.1-cpu",
- .data = &spm_reg_8064_cpu },
- { },
-};
+ ret = qcom_scm_set_warm_boot_addr(cpu_resume_arm, cpumask_of(cpu));
+ if (ret)
+ return ret;
+
+ return cpuidle_register(&data->cpuidle_driver, NULL);
+}
-static int spm_dev_probe(struct platform_device *pdev)
+static int spm_cpuidle_drv_probe(struct platform_device *pdev)
{
- struct spm_driver_data *drv;
- struct resource *res;
- const struct of_device_id *match_id;
- void __iomem *addr;
int cpu, ret;
if (!qcom_scm_is_available())
return -EPROBE_DEFER;
- drv = spm_get_drv(pdev, &cpu);
- if (!drv)
- return -EINVAL;
- platform_set_drvdata(pdev, drv);
+ for_each_possible_cpu(cpu) {
+ ret = spm_cpuidle_register(&pdev->dev, cpu);
+ if (ret && ret != -ENODEV) {
+ dev_err(&pdev->dev,
+ "Cannot register for CPU%d: %d\n", cpu, ret);
+ }
+ }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(drv->reg_base))
- return PTR_ERR(drv->reg_base);
+ return 0;
+}
- match_id = of_match_node(spm_match_table, pdev->dev.of_node);
- if (!match_id)
- return -ENODEV;
+static struct platform_driver spm_cpuidle_driver = {
+ .probe = spm_cpuidle_drv_probe,
+ .driver = {
+ .name = "qcom-spm-cpuidle",
+ .suppress_bind_attrs = true,
+ },
+};
- drv->reg_data = match_id->data;
+static int __init qcom_spm_cpuidle_init(void)
+{
+ struct platform_device *pdev;
+ int ret;
- ret = spm_cpuidle_init(&drv->cpuidle_driver, cpu);
+ ret = platform_driver_register(&spm_cpuidle_driver);
if (ret)
return ret;
- /* Write the SPM sequences first.. */
- addr = drv->reg_base + drv->reg_data->reg_offset[SPM_REG_SEQ_ENTRY];
- __iowrite32_copy(addr, drv->reg_data->seq,
- ARRAY_SIZE(drv->reg_data->seq) / 4);
-
- /*
- * ..and then the control registers.
- * On some SoC if the control registers are written first and if the
- * CPU was held in reset, the reset signal could trigger the SPM state
- * machine, before the sequences are completely written.
- */
- spm_register_write(drv, SPM_REG_CFG, drv->reg_data->spm_cfg);
- spm_register_write(drv, SPM_REG_DLY, drv->reg_data->spm_dly);
- spm_register_write(drv, SPM_REG_PMIC_DLY, drv->reg_data->pmic_dly);
- spm_register_write(drv, SPM_REG_PMIC_DATA_0,
- drv->reg_data->pmic_data[0]);
- spm_register_write(drv, SPM_REG_PMIC_DATA_1,
- drv->reg_data->pmic_data[1]);
-
- /* Set up Standby as the default low power mode */
- spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
-
- return cpuidle_register(&drv->cpuidle_driver, NULL);
-}
-
-static int spm_dev_remove(struct platform_device *pdev)
-{
- struct spm_driver_data *drv = platform_get_drvdata(pdev);
+ pdev = platform_device_register_simple("qcom-spm-cpuidle",
+ -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ platform_driver_unregister(&spm_cpuidle_driver);
+ return PTR_ERR(pdev);
+ }
- cpuidle_unregister(&drv->cpuidle_driver);
return 0;
}
-
-static struct platform_driver spm_driver = {
- .probe = spm_dev_probe,
- .remove = spm_dev_remove,
- .driver = {
- .name = "saw",
- .of_match_table = spm_match_table,
- },
-};
-
-builtin_platform_driver(spm_driver);
+device_initcall(qcom_spm_cpuidle_init);
diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
index 508bd9f23792..9845629aeb6d 100644
--- a/drivers/cpuidle/cpuidle-tegra.c
+++ b/drivers/cpuidle/cpuidle-tegra.c
@@ -337,6 +337,9 @@ static void tegra_cpuidle_setup_tegra114_c7_state(void)
static int tegra_cpuidle_probe(struct platform_device *pdev)
{
+ if (tegra_pmc_get_suspend_mode() == TEGRA_SUSPEND_NOT_READY)
+ return -EPROBE_DEFER;
+
/* LP2 could be disabled in device-tree */
if (tegra_pmc_get_suspend_mode() < TEGRA_SUSPEND_LP2)
tegra_cpuidle_disable_state(TEGRA_CC6);
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index c9fb56afbcb4..14f900047ac0 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -167,6 +167,27 @@ struct ffa_drv_info {
static struct ffa_drv_info *drv_info;
+/*
+ * The driver must be able to support all the versions from the earliest
+ * supported FFA_MIN_VERSION to the latest supported FFA_DRIVER_VERSION.
+ * The specification states that if firmware supports a FFA implementation
+ * that is incompatible with and at a greater version number than specified
+ * by the caller(FFA_DRIVER_VERSION passed as parameter to FFA_VERSION),
+ * it must return the NOT_SUPPORTED error code.
+ */
+static u32 ffa_compatible_version_find(u32 version)
+{
+ u16 major = MAJOR_VERSION(version), minor = MINOR_VERSION(version);
+ u16 drv_major = MAJOR_VERSION(FFA_DRIVER_VERSION);
+ u16 drv_minor = MINOR_VERSION(FFA_DRIVER_VERSION);
+
+ if ((major < drv_major) || (major == drv_major && minor <= drv_minor))
+ return version;
+
+ pr_info("Firmware version higher than driver version, downgrading\n");
+ return FFA_DRIVER_VERSION;
+}
+
static int ffa_version_check(u32 *version)
{
ffa_value_t ver;
@@ -180,15 +201,20 @@ static int ffa_version_check(u32 *version)
return -EOPNOTSUPP;
}
- if (ver.a0 < FFA_MIN_VERSION || ver.a0 > FFA_DRIVER_VERSION) {
- pr_err("Incompatible version %d.%d found\n",
- MAJOR_VERSION(ver.a0), MINOR_VERSION(ver.a0));
+ if (ver.a0 < FFA_MIN_VERSION) {
+ pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
+ MAJOR_VERSION(ver.a0), MINOR_VERSION(ver.a0),
+ MAJOR_VERSION(FFA_MIN_VERSION),
+ MINOR_VERSION(FFA_MIN_VERSION));
return -EINVAL;
}
- *version = ver.a0;
- pr_info("Version %d.%d found\n", MAJOR_VERSION(ver.a0),
+ pr_info("Driver version %d.%d\n", MAJOR_VERSION(FFA_DRIVER_VERSION),
+ MINOR_VERSION(FFA_DRIVER_VERSION));
+ pr_info("Firmware version %d.%d found\n", MAJOR_VERSION(ver.a0),
MINOR_VERSION(ver.a0));
+ *version = ffa_compatible_version_find(ver.a0);
+
return 0;
}
@@ -586,6 +612,22 @@ ffa_memory_share(struct ffa_device *dev, struct ffa_mem_ops_args *args)
return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
}
+static int
+ffa_memory_lend(struct ffa_device *dev, struct ffa_mem_ops_args *args)
+{
+ /* Note that upon a successful MEM_LEND request the caller
+ * must ensure that the memory region specified is not accessed
+ * until a successful MEM_RECALIM call has been made.
+ * On systems with a hypervisor present this will been enforced,
+ * however on systems without a hypervisor the responsibility
+ * falls to the calling kernel driver to prevent access.
+ */
+ if (dev->mode_32bit)
+ return ffa_memory_ops(FFA_MEM_LEND, args);
+
+ return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args);
+}
+
static const struct ffa_dev_ops ffa_ops = {
.api_version_get = ffa_api_version_get,
.partition_info_get = ffa_partition_info_get,
@@ -593,6 +635,7 @@ static const struct ffa_dev_ops ffa_ops = {
.sync_send_receive = ffa_sync_send_receive,
.memory_reclaim = ffa_memory_reclaim,
.memory_share = ffa_memory_share,
+ .memory_lend = ffa_memory_lend,
};
const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev)
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 2ee97bab7440..7db8066b19fd 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -252,7 +252,7 @@ static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
break;
default:
pr_err("Unknown SMC convention being used\n");
- return -EINVAL;
+ return false;
}
ret = qcom_scm_call(dev, &desc, &res);
@@ -1348,6 +1348,10 @@ static const struct of_device_id qcom_scm_dt_match[] = {
SCM_HAS_IFACE_CLK |
SCM_HAS_BUS_CLK)
},
+ { .compatible = "qcom,scm-msm8953", .data = (void *)(SCM_HAS_CORE_CLK |
+ SCM_HAS_IFACE_CLK |
+ SCM_HAS_BUS_CLK)
+ },
{ .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
SCM_HAS_IFACE_CLK |
SCM_HAS_BUS_CLK)
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index 3e9fa4b54358..6d66fe03fb6a 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -74,28 +74,36 @@ static void seqbuf_seek(struct seqbuf *seqbuf, ssize_t offset)
static const char *get_filename(struct tegra_bpmp *bpmp,
const struct file *file, char *buf, int size)
{
- char root_path_buf[512];
- const char *root_path;
- const char *filename;
+ const char *root_path, *filename = NULL;
+ char *root_path_buf;
size_t root_len;
+ root_path_buf = kzalloc(512, GFP_KERNEL);
+ if (!root_path_buf)
+ goto out;
+
root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
sizeof(root_path_buf));
if (IS_ERR(root_path))
- return NULL;
+ goto out;
root_len = strlen(root_path);
filename = dentry_path(file->f_path.dentry, buf, size);
- if (IS_ERR(filename))
- return NULL;
+ if (IS_ERR(filename)) {
+ filename = NULL;
+ goto out;
+ }
- if (strlen(filename) < root_len ||
- strncmp(filename, root_path, root_len))
- return NULL;
+ if (strlen(filename) < root_len || strncmp(filename, root_path, root_len)) {
+ filename = NULL;
+ goto out;
+ }
filename += root_len;
+out:
+ kfree(root_path_buf);
return filename;
}
diff --git a/drivers/firmware/tegra/bpmp-tegra210.c b/drivers/firmware/tegra/bpmp-tegra210.c
index c32754055c60..c9c830f658c3 100644
--- a/drivers/firmware/tegra/bpmp-tegra210.c
+++ b/drivers/firmware/tegra/bpmp-tegra210.c
@@ -162,7 +162,6 @@ static int tegra210_bpmp_init(struct tegra_bpmp *bpmp)
{
struct platform_device *pdev = to_platform_device(bpmp->dev);
struct tegra210_bpmp *priv;
- struct resource *res;
unsigned int i;
int err;
@@ -172,13 +171,11 @@ static int tegra210_bpmp_init(struct tegra_bpmp *bpmp)
bpmp->priv = priv;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->atomics = devm_ioremap_resource(&pdev->dev, res);
+ priv->atomics = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->atomics))
return PTR_ERR(priv->atomics);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->arb_sema = devm_ioremap_resource(&pdev->dev, res);
+ priv->arb_sema = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->arb_sema))
return PTR_ERR(priv->arb_sema);
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 93b40c245f00..5d90d2eb0019 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -11,6 +11,7 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <video/mipi_display.h>
#include <video/videomode.h>
@@ -980,8 +981,10 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
struct mtk_dsi *dsi = dev_get_drvdata(dev);
ret = mtk_dsi_encoder_init(drm, dsi);
+ if (ret)
+ return ret;
- return ret;
+ return device_reset_optional(dev);
}
static void mtk_dsi_unbind(struct device *dev, struct device *master,
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 72c0df129d5c..30bff6cb1b8d 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -55,8 +55,8 @@ config ATMEL_EBI
SRAMs, ATA devices, etc.
config BRCMSTB_DPFE
- bool "Broadcom STB DPFE driver" if COMPILE_TEST
- default y if ARCH_BRCMSTB
+ tristate "Broadcom STB DPFE driver"
+ default ARCH_BRCMSTB
depends on ARCH_BRCMSTB || COMPILE_TEST
help
This driver provides access to the DPFE interface of Broadcom
@@ -210,6 +210,7 @@ config RENESAS_RPCIF
tristate "Renesas RPC-IF driver"
depends on ARCH_RENESAS || COMPILE_TEST
select REGMAP_MMIO
+ select RESET_CONTROLLER
help
This supports Renesas R-Car Gen3 or RZ/G2 RPC-IF which provides
either SPI host or HyperFlash. You'll have to select individual
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index d062c2f8250f..75a8c38df939 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -263,7 +263,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
ret = fsl_ifc_ctrl_init(fsl_ifc_ctrl_dev);
if (ret < 0)
- goto err;
+ goto err_unmap_nandirq;
init_waitqueue_head(&fsl_ifc_ctrl_dev->nand_wait);
@@ -272,7 +272,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
if (ret != 0) {
dev_err(&dev->dev, "failed to install irq (%d)\n",
fsl_ifc_ctrl_dev->irq);
- goto err_irq;
+ goto err_unmap_nandirq;
}
if (fsl_ifc_ctrl_dev->nand_irq) {
@@ -281,17 +281,16 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
if (ret != 0) {
dev_err(&dev->dev, "failed to install irq (%d)\n",
fsl_ifc_ctrl_dev->nand_irq);
- goto err_nandirq;
+ goto err_free_irq;
}
}
return 0;
-err_nandirq:
- free_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_ctrl_dev);
- irq_dispose_mapping(fsl_ifc_ctrl_dev->nand_irq);
-err_irq:
+err_free_irq:
free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev);
+err_unmap_nandirq:
+ irq_dispose_mapping(fsl_ifc_ctrl_dev->nand_irq);
irq_dispose_mapping(fsl_ifc_ctrl_dev->irq);
err:
iounmap(fsl_ifc_ctrl_dev->gregs);
diff --git a/drivers/memory/jedec_ddr.h b/drivers/memory/jedec_ddr.h
index e59ccbd982d0..6cd508478b14 100644
--- a/drivers/memory/jedec_ddr.h
+++ b/drivers/memory/jedec_ddr.h
@@ -112,6 +112,26 @@
#define NUM_DDR_ADDR_TABLE_ENTRIES 11
#define NUM_DDR_TIMING_TABLE_ENTRIES 4
+#define LPDDR2_MANID_SAMSUNG 1
+#define LPDDR2_MANID_QIMONDA 2
+#define LPDDR2_MANID_ELPIDA 3
+#define LPDDR2_MANID_ETRON 4
+#define LPDDR2_MANID_NANYA 5
+#define LPDDR2_MANID_HYNIX 6
+#define LPDDR2_MANID_MOSEL 7
+#define LPDDR2_MANID_WINBOND 8
+#define LPDDR2_MANID_ESMT 9
+#define LPDDR2_MANID_SPANSION 11
+#define LPDDR2_MANID_SST 12
+#define LPDDR2_MANID_ZMOS 13
+#define LPDDR2_MANID_INTEL 14
+#define LPDDR2_MANID_NUMONYX 254
+#define LPDDR2_MANID_MICRON 255
+
+#define LPDDR2_TYPE_S4 0
+#define LPDDR2_TYPE_S2 1
+#define LPDDR2_TYPE_NVM 2
+
/* Structure for DDR addressing info from the JEDEC spec */
struct lpddr2_addressing {
u32 num_banks;
@@ -170,6 +190,33 @@ extern const struct lpddr2_timings
lpddr2_jedec_timings[NUM_DDR_TIMING_TABLE_ENTRIES];
extern const struct lpddr2_min_tck lpddr2_jedec_min_tck;
+/* Structure of MR8 */
+union lpddr2_basic_config4 {
+ u32 value;
+
+ struct {
+ unsigned int arch_type : 2;
+ unsigned int density : 4;
+ unsigned int io_width : 2;
+ } __packed;
+};
+
+/*
+ * Structure for information about LPDDR2 chip. All parameters are
+ * matching raw values of standard mode register bitfields or set to
+ * -ENOENT if info unavailable.
+ */
+struct lpddr2_info {
+ int arch_type;
+ int density;
+ int io_width;
+ int manufacturer_id;
+ int revision_id1;
+ int revision_id2;
+};
+
+const char *lpddr2_jedec_manufacturer(unsigned int manufacturer_id);
+
/*
* Structure for timings for LPDDR3 based on LPDDR2 plus additional fields.
* All parameters are in pico seconds(ps) excluding max_freq, min_freq which
diff --git a/drivers/memory/jedec_ddr_data.c b/drivers/memory/jedec_ddr_data.c
index ed601d813175..2cca4fa188f9 100644
--- a/drivers/memory/jedec_ddr_data.c
+++ b/drivers/memory/jedec_ddr_data.c
@@ -131,3 +131,44 @@ const struct lpddr2_min_tck lpddr2_jedec_min_tck = {
.tFAW = 8
};
EXPORT_SYMBOL_GPL(lpddr2_jedec_min_tck);
+
+const char *lpddr2_jedec_manufacturer(unsigned int manufacturer_id)
+{
+ switch (manufacturer_id) {
+ case LPDDR2_MANID_SAMSUNG:
+ return "Samsung";
+ case LPDDR2_MANID_QIMONDA:
+ return "Qimonda";
+ case LPDDR2_MANID_ELPIDA:
+ return "Elpida";
+ case LPDDR2_MANID_ETRON:
+ return "Etron";
+ case LPDDR2_MANID_NANYA:
+ return "Nanya";
+ case LPDDR2_MANID_HYNIX:
+ return "Hynix";
+ case LPDDR2_MANID_MOSEL:
+ return "Mosel";
+ case LPDDR2_MANID_WINBOND:
+ return "Winbond";
+ case LPDDR2_MANID_ESMT:
+ return "ESMT";
+ case LPDDR2_MANID_SPANSION:
+ return "Spansion";
+ case LPDDR2_MANID_SST:
+ return "SST";
+ case LPDDR2_MANID_ZMOS:
+ return "ZMOS";
+ case LPDDR2_MANID_INTEL:
+ return "Intel";
+ case LPDDR2_MANID_NUMONYX:
+ return "Numonyx";
+ case LPDDR2_MANID_MICRON:
+ return "Micron";
+ default:
+ break;
+ }
+
+ return "invalid";
+}
+EXPORT_SYMBOL_GPL(lpddr2_jedec_manufacturer);
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index c5fb51f73b34..b883dcc0bbfa 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -17,13 +17,33 @@
#include <dt-bindings/memory/mt2701-larb-port.h>
#include <dt-bindings/memory/mtk-memory-port.h>
-/* mt8173 */
-#define SMI_LARB_MMU_EN 0xf00
+/* SMI COMMON */
+#define SMI_L1LEN 0x100
-/* mt8167 */
-#define MT8167_SMI_LARB_MMU_EN 0xfc0
+#define SMI_BUS_SEL 0x220
+#define SMI_BUS_LARB_SHIFT(larbid) ((larbid) << 1)
+/* All are MMU0 defaultly. Only specialize mmu1 here. */
+#define F_MMU1_LARB(larbid) (0x1 << SMI_BUS_LARB_SHIFT(larbid))
+
+#define SMI_M4U_TH 0x234
+#define SMI_FIFO_TH1 0x238
+#define SMI_FIFO_TH2 0x23c
+#define SMI_DCM 0x300
+#define SMI_DUMMY 0x444
-/* mt2701 */
+/* SMI LARB */
+#define SMI_LARB_CMD_THRT_CON 0x24
+#define SMI_LARB_THRT_RD_NU_LMT_MSK GENMASK(7, 4)
+#define SMI_LARB_THRT_RD_NU_LMT (5 << 4)
+
+#define SMI_LARB_SW_FLAG 0x40
+#define SMI_LARB_SW_FLAG_1 0x1
+
+#define SMI_LARB_OSTDL_PORT 0x200
+#define SMI_LARB_OSTDL_PORTx(id) (SMI_LARB_OSTDL_PORT + (((id) & 0x1f) << 2))
+
+/* Below are about mmu enable registers, they are different in SoCs */
+/* gen1: mt2701 */
#define REG_SMI_SECUR_CON_BASE 0x5c0
/* every register control 8 port, register offset 0x4 */
@@ -41,99 +61,94 @@
/* mt2701 domain should be set to 3 */
#define SMI_SECUR_CON_VAL_DOMAIN(id) (0x3 << ((((id) & 0x7) << 2) + 1))
-/* mt2712 */
-#define SMI_LARB_NONSEC_CON(id) (0x380 + ((id) * 4))
-#define F_MMU_EN BIT(0)
-#define BANK_SEL(id) ({ \
+/* gen2: */
+/* mt8167 */
+#define MT8167_SMI_LARB_MMU_EN 0xfc0
+
+/* mt8173 */
+#define MT8173_SMI_LARB_MMU_EN 0xf00
+
+/* general */
+#define SMI_LARB_NONSEC_CON(id) (0x380 + ((id) * 4))
+#define F_MMU_EN BIT(0)
+#define BANK_SEL(id) ({ \
u32 _id = (id) & 0x3; \
(_id << 8 | _id << 10 | _id << 12 | _id << 14); \
})
-/* SMI COMMON */
-#define SMI_BUS_SEL 0x220
-#define SMI_BUS_LARB_SHIFT(larbid) ((larbid) << 1)
-/* All are MMU0 defaultly. Only specialize mmu1 here. */
-#define F_MMU1_LARB(larbid) (0x1 << SMI_BUS_LARB_SHIFT(larbid))
+#define SMI_COMMON_INIT_REGS_NR 6
+#define SMI_LARB_PORT_NR_MAX 32
+
+#define MTK_SMI_FLAG_THRT_UPDATE BIT(0)
+#define MTK_SMI_FLAG_SW_FLAG BIT(1)
+#define MTK_SMI_CAPS(flags, _x) (!!((flags) & (_x)))
+
+struct mtk_smi_reg_pair {
+ unsigned int offset;
+ u32 value;
+};
-enum mtk_smi_gen {
+enum mtk_smi_type {
MTK_SMI_GEN1,
- MTK_SMI_GEN2
+ MTK_SMI_GEN2, /* gen2 smi common */
+ MTK_SMI_GEN2_SUB_COMM, /* gen2 smi sub common */
};
+#define MTK_SMI_CLK_NR_MAX 4
+
+/* larbs: Require apb/smi clocks while gals is optional. */
+static const char * const mtk_smi_larb_clks[] = {"apb", "smi", "gals"};
+#define MTK_SMI_LARB_REQ_CLK_NR 2
+#define MTK_SMI_LARB_OPT_CLK_NR 1
+
+/*
+ * common: Require these four clocks in has_gals case. Otherwise, only apb/smi are required.
+ * sub common: Require apb/smi/gals0 clocks in has_gals case. Otherwise, only apb/smi are required.
+ */
+static const char * const mtk_smi_common_clks[] = {"apb", "smi", "gals0", "gals1"};
+#define MTK_SMI_COM_REQ_CLK_NR 2
+#define MTK_SMI_COM_GALS_REQ_CLK_NR MTK_SMI_CLK_NR_MAX
+#define MTK_SMI_SUB_COM_GALS_REQ_CLK_NR 3
+
struct mtk_smi_common_plat {
- enum mtk_smi_gen gen;
- bool has_gals;
- u32 bus_sel; /* Balance some larbs to enter mmu0 or mmu1 */
+ enum mtk_smi_type type;
+ bool has_gals;
+ u32 bus_sel; /* Balance some larbs to enter mmu0 or mmu1 */
+
+ const struct mtk_smi_reg_pair *init;
};
struct mtk_smi_larb_gen {
int port_in_larb[MTK_LARB_NR_MAX + 1];
void (*config_port)(struct device *dev);
unsigned int larb_direct_to_common_mask;
- bool has_gals;
+ unsigned int flags_general;
+ const u8 (*ostd)[SMI_LARB_PORT_NR_MAX];
};
struct mtk_smi {
struct device *dev;
- struct clk *clk_apb, *clk_smi;
- struct clk *clk_gals0, *clk_gals1;
+ unsigned int clk_num;
+ struct clk_bulk_data clks[MTK_SMI_CLK_NR_MAX];
struct clk *clk_async; /*only needed by mt2701*/
union {
void __iomem *smi_ao_base; /* only for gen1 */
void __iomem *base; /* only for gen2 */
};
+ struct device *smi_common_dev; /* for sub common */
const struct mtk_smi_common_plat *plat;
};
struct mtk_smi_larb { /* larb: local arbiter */
struct mtk_smi smi;
void __iomem *base;
- struct device *smi_common_dev;
+ struct device *smi_common_dev; /* common or sub-common dev */
const struct mtk_smi_larb_gen *larb_gen;
int larbid;
u32 *mmu;
unsigned char *bank;
};
-static int mtk_smi_clk_enable(const struct mtk_smi *smi)
-{
- int ret;
-
- ret = clk_prepare_enable(smi->clk_apb);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(smi->clk_smi);
- if (ret)
- goto err_disable_apb;
-
- ret = clk_prepare_enable(smi->clk_gals0);
- if (ret)
- goto err_disable_smi;
-
- ret = clk_prepare_enable(smi->clk_gals1);
- if (ret)
- goto err_disable_gals0;
-
- return 0;
-
-err_disable_gals0:
- clk_disable_unprepare(smi->clk_gals0);
-err_disable_smi:
- clk_disable_unprepare(smi->clk_smi);
-err_disable_apb:
- clk_disable_unprepare(smi->clk_apb);
- return ret;
-}
-
-static void mtk_smi_clk_disable(const struct mtk_smi *smi)
-{
- clk_disable_unprepare(smi->clk_gals1);
- clk_disable_unprepare(smi->clk_gals0);
- clk_disable_unprepare(smi->clk_smi);
- clk_disable_unprepare(smi->clk_apb);
-}
-
int mtk_smi_larb_get(struct device *larbdev)
{
int ret = pm_runtime_resume_and_get(larbdev);
@@ -166,36 +181,16 @@ mtk_smi_larb_bind(struct device *dev, struct device *master, void *data)
return -ENODEV;
}
-static void mtk_smi_larb_config_port_gen2_general(struct device *dev)
-{
- struct mtk_smi_larb *larb = dev_get_drvdata(dev);
- u32 reg;
- int i;
-
- if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask)
- return;
-
- for_each_set_bit(i, (unsigned long *)larb->mmu, 32) {
- reg = readl_relaxed(larb->base + SMI_LARB_NONSEC_CON(i));
- reg |= F_MMU_EN;
- reg |= BANK_SEL(larb->bank[i]);
- writel(reg, larb->base + SMI_LARB_NONSEC_CON(i));
- }
-}
-
-static void mtk_smi_larb_config_port_mt8173(struct device *dev)
+static void
+mtk_smi_larb_unbind(struct device *dev, struct device *master, void *data)
{
- struct mtk_smi_larb *larb = dev_get_drvdata(dev);
-
- writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN);
+ /* Do nothing as the iommu is always enabled. */
}
-static void mtk_smi_larb_config_port_mt8167(struct device *dev)
-{
- struct mtk_smi_larb *larb = dev_get_drvdata(dev);
-
- writel(*larb->mmu, larb->base + MT8167_SMI_LARB_MMU_EN);
-}
+static const struct component_ops mtk_smi_larb_component_ops = {
+ .bind = mtk_smi_larb_bind,
+ .unbind = mtk_smi_larb_unbind,
+};
static void mtk_smi_larb_config_port_gen1(struct device *dev)
{
@@ -228,25 +223,94 @@ static void mtk_smi_larb_config_port_gen1(struct device *dev)
}
}
-static void
-mtk_smi_larb_unbind(struct device *dev, struct device *master, void *data)
+static void mtk_smi_larb_config_port_mt8167(struct device *dev)
{
- /* Do nothing as the iommu is always enabled. */
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+
+ writel(*larb->mmu, larb->base + MT8167_SMI_LARB_MMU_EN);
}
-static const struct component_ops mtk_smi_larb_component_ops = {
- .bind = mtk_smi_larb_bind,
- .unbind = mtk_smi_larb_unbind,
-};
+static void mtk_smi_larb_config_port_mt8173(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
-static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = {
- /* mt8173 do not need the port in larb */
- .config_port = mtk_smi_larb_config_port_mt8173,
-};
+ writel(*larb->mmu, larb->base + MT8173_SMI_LARB_MMU_EN);
+}
-static const struct mtk_smi_larb_gen mtk_smi_larb_mt8167 = {
- /* mt8167 do not need the port in larb */
- .config_port = mtk_smi_larb_config_port_mt8167,
+static void mtk_smi_larb_config_port_gen2_general(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+ u32 reg, flags_general = larb->larb_gen->flags_general;
+ const u8 *larbostd = larb->larb_gen->ostd[larb->larbid];
+ int i;
+
+ if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask)
+ return;
+
+ if (MTK_SMI_CAPS(flags_general, MTK_SMI_FLAG_THRT_UPDATE)) {
+ reg = readl_relaxed(larb->base + SMI_LARB_CMD_THRT_CON);
+ reg &= ~SMI_LARB_THRT_RD_NU_LMT_MSK;
+ reg |= SMI_LARB_THRT_RD_NU_LMT;
+ writel_relaxed(reg, larb->base + SMI_LARB_CMD_THRT_CON);
+ }
+
+ if (MTK_SMI_CAPS(flags_general, MTK_SMI_FLAG_SW_FLAG))
+ writel_relaxed(SMI_LARB_SW_FLAG_1, larb->base + SMI_LARB_SW_FLAG);
+
+ for (i = 0; i < SMI_LARB_PORT_NR_MAX && larbostd && !!larbostd[i]; i++)
+ writel_relaxed(larbostd[i], larb->base + SMI_LARB_OSTDL_PORTx(i));
+
+ for_each_set_bit(i, (unsigned long *)larb->mmu, 32) {
+ reg = readl_relaxed(larb->base + SMI_LARB_NONSEC_CON(i));
+ reg |= F_MMU_EN;
+ reg |= BANK_SEL(larb->bank[i]);
+ writel(reg, larb->base + SMI_LARB_NONSEC_CON(i));
+ }
+}
+
+static const u8 mtk_smi_larb_mt8195_ostd[][SMI_LARB_PORT_NR_MAX] = {
+ [0] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb0 */
+ [1] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb1 */
+ [2] = {0x12, 0x12, 0x12, 0x12, 0x0a,}, /* ... */
+ [3] = {0x12, 0x12, 0x12, 0x12, 0x28, 0x28, 0x0a,},
+ [4] = {0x06, 0x01, 0x17, 0x06, 0x0a,},
+ [5] = {0x06, 0x01, 0x17, 0x06, 0x06, 0x01, 0x06, 0x0a,},
+ [6] = {0x06, 0x01, 0x06, 0x0a,},
+ [7] = {0x0c, 0x0c, 0x12,},
+ [8] = {0x0c, 0x0c, 0x12,},
+ [9] = {0x0a, 0x08, 0x04, 0x06, 0x01, 0x01, 0x10, 0x18, 0x11, 0x0a,
+ 0x08, 0x04, 0x11, 0x06, 0x02, 0x06, 0x01, 0x11, 0x11, 0x06,},
+ [10] = {0x18, 0x08, 0x01, 0x01, 0x20, 0x12, 0x18, 0x06, 0x05, 0x10,
+ 0x08, 0x08, 0x10, 0x08, 0x08, 0x18, 0x0c, 0x09, 0x0b, 0x0d,
+ 0x0d, 0x06, 0x10, 0x10,},
+ [11] = {0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x01, 0x01, 0x01, 0x01,},
+ [12] = {0x09, 0x09, 0x05, 0x05, 0x0c, 0x18, 0x02, 0x02, 0x04, 0x02,},
+ [13] = {0x02, 0x02, 0x12, 0x12, 0x02, 0x02, 0x02, 0x02, 0x08, 0x01,},
+ [14] = {0x12, 0x12, 0x02, 0x02, 0x02, 0x02, 0x16, 0x01, 0x16, 0x01,
+ 0x01, 0x02, 0x02, 0x08, 0x02,},
+ [15] = {},
+ [16] = {0x28, 0x02, 0x02, 0x12, 0x02, 0x12, 0x10, 0x02, 0x02, 0x0a,
+ 0x12, 0x02, 0x0a, 0x16, 0x02, 0x04,},
+ [17] = {0x1a, 0x0e, 0x0a, 0x0a, 0x0c, 0x0e, 0x10,},
+ [18] = {0x12, 0x06, 0x12, 0x06,},
+ [19] = {0x01, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x04, 0x01,
+ 0x01, 0x01, 0x04, 0x0a, 0x06, 0x01, 0x01, 0x01, 0x0a, 0x06,
+ 0x01, 0x01, 0x05, 0x03, 0x03, 0x04, 0x01,},
+ [20] = {0x01, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x04, 0x01,
+ 0x01, 0x01, 0x04, 0x0a, 0x06, 0x01, 0x01, 0x01, 0x0a, 0x06,
+ 0x01, 0x01, 0x05, 0x03, 0x03, 0x04, 0x01,},
+ [21] = {0x28, 0x19, 0x0c, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04,},
+ [22] = {0x28, 0x19, 0x0c, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04,},
+ [23] = {0x18, 0x01,},
+ [24] = {0x01, 0x01, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x01,
+ 0x01, 0x01,},
+ [25] = {0x02, 0x02, 0x02, 0x28, 0x16, 0x02, 0x02, 0x02, 0x12, 0x16,
+ 0x02, 0x01,},
+ [26] = {0x02, 0x02, 0x02, 0x28, 0x16, 0x02, 0x02, 0x02, 0x12, 0x16,
+ 0x02, 0x01,},
+ [27] = {0x02, 0x02, 0x02, 0x28, 0x16, 0x02, 0x02, 0x02, 0x12, 0x16,
+ 0x02, 0x01,},
+ [28] = {0x1a, 0x0e, 0x0a, 0x0a, 0x0c, 0x0e, 0x10,},
};
static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = {
@@ -269,8 +333,17 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt6779 = {
/* DUMMY | IPU0 | IPU1 | CCU | MDLA */
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8167 = {
+ /* mt8167 do not need the port in larb */
+ .config_port = mtk_smi_larb_config_port_mt8167,
+};
+
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = {
+ /* mt8173 do not need the port in larb */
+ .config_port = mtk_smi_larb_config_port_mt8173,
+};
+
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = {
- .has_gals = true,
.config_port = mtk_smi_larb_config_port_gen2_general,
.larb_direct_to_common_mask = BIT(2) | BIT(3) | BIT(7),
/* IPU0 | IPU1 | CCU */
@@ -280,99 +353,114 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8192 = {
.config_port = mtk_smi_larb_config_port_gen2_general,
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8195 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+ .flags_general = MTK_SMI_FLAG_THRT_UPDATE | MTK_SMI_FLAG_SW_FLAG,
+ .ostd = mtk_smi_larb_mt8195_ostd,
+};
+
static const struct of_device_id mtk_smi_larb_of_ids[] = {
- {
- .compatible = "mediatek,mt8167-smi-larb",
- .data = &mtk_smi_larb_mt8167
- },
- {
- .compatible = "mediatek,mt8173-smi-larb",
- .data = &mtk_smi_larb_mt8173
- },
- {
- .compatible = "mediatek,mt2701-smi-larb",
- .data = &mtk_smi_larb_mt2701
- },
- {
- .compatible = "mediatek,mt2712-smi-larb",
- .data = &mtk_smi_larb_mt2712
- },
- {
- .compatible = "mediatek,mt6779-smi-larb",
- .data = &mtk_smi_larb_mt6779
- },
- {
- .compatible = "mediatek,mt8183-smi-larb",
- .data = &mtk_smi_larb_mt8183
- },
- {
- .compatible = "mediatek,mt8192-smi-larb",
- .data = &mtk_smi_larb_mt8192
- },
+ {.compatible = "mediatek,mt2701-smi-larb", .data = &mtk_smi_larb_mt2701},
+ {.compatible = "mediatek,mt2712-smi-larb", .data = &mtk_smi_larb_mt2712},
+ {.compatible = "mediatek,mt6779-smi-larb", .data = &mtk_smi_larb_mt6779},
+ {.compatible = "mediatek,mt8167-smi-larb", .data = &mtk_smi_larb_mt8167},
+ {.compatible = "mediatek,mt8173-smi-larb", .data = &mtk_smi_larb_mt8173},
+ {.compatible = "mediatek,mt8183-smi-larb", .data = &mtk_smi_larb_mt8183},
+ {.compatible = "mediatek,mt8192-smi-larb", .data = &mtk_smi_larb_mt8192},
+ {.compatible = "mediatek,mt8195-smi-larb", .data = &mtk_smi_larb_mt8195},
{}
};
-static int mtk_smi_larb_probe(struct platform_device *pdev)
+static int mtk_smi_device_link_common(struct device *dev, struct device **com_dev)
{
- struct mtk_smi_larb *larb;
- struct resource *res;
- struct device *dev = &pdev->dev;
- struct device_node *smi_node;
- struct platform_device *smi_pdev;
+ struct platform_device *smi_com_pdev;
+ struct device_node *smi_com_node;
+ struct device *smi_com_dev;
struct device_link *link;
- larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);
- if (!larb)
- return -ENOMEM;
-
- larb->larb_gen = of_device_get_match_data(dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- larb->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(larb->base))
- return PTR_ERR(larb->base);
-
- larb->smi.clk_apb = devm_clk_get(dev, "apb");
- if (IS_ERR(larb->smi.clk_apb))
- return PTR_ERR(larb->smi.clk_apb);
-
- larb->smi.clk_smi = devm_clk_get(dev, "smi");
- if (IS_ERR(larb->smi.clk_smi))
- return PTR_ERR(larb->smi.clk_smi);
-
- if (larb->larb_gen->has_gals) {
- /* The larbs may still haven't gals even if the SoC support.*/
- larb->smi.clk_gals0 = devm_clk_get(dev, "gals");
- if (PTR_ERR(larb->smi.clk_gals0) == -ENOENT)
- larb->smi.clk_gals0 = NULL;
- else if (IS_ERR(larb->smi.clk_gals0))
- return PTR_ERR(larb->smi.clk_gals0);
- }
- larb->smi.dev = dev;
-
- smi_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0);
- if (!smi_node)
+ smi_com_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0);
+ if (!smi_com_node)
return -EINVAL;
- smi_pdev = of_find_device_by_node(smi_node);
- of_node_put(smi_node);
- if (smi_pdev) {
- if (!platform_get_drvdata(smi_pdev))
+ smi_com_pdev = of_find_device_by_node(smi_com_node);
+ of_node_put(smi_com_node);
+ if (smi_com_pdev) {
+ /* smi common is the supplier, Make sure it is ready before */
+ if (!platform_get_drvdata(smi_com_pdev))
return -EPROBE_DEFER;
- larb->smi_common_dev = &smi_pdev->dev;
- link = device_link_add(dev, larb->smi_common_dev,
+ smi_com_dev = &smi_com_pdev->dev;
+ link = device_link_add(dev, smi_com_dev,
DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
if (!link) {
dev_err(dev, "Unable to link smi-common dev\n");
return -ENODEV;
}
+ *com_dev = smi_com_dev;
} else {
dev_err(dev, "Failed to get the smi_common device\n");
return -EINVAL;
}
+ return 0;
+}
+
+static int mtk_smi_dts_clk_init(struct device *dev, struct mtk_smi *smi,
+ const char * const clks[],
+ unsigned int clk_nr_required,
+ unsigned int clk_nr_optional)
+{
+ int i, ret;
+
+ for (i = 0; i < clk_nr_required; i++)
+ smi->clks[i].id = clks[i];
+ ret = devm_clk_bulk_get(dev, clk_nr_required, smi->clks);
+ if (ret)
+ return ret;
+
+ for (i = clk_nr_required; i < clk_nr_required + clk_nr_optional; i++)
+ smi->clks[i].id = clks[i];
+ ret = devm_clk_bulk_get_optional(dev, clk_nr_optional,
+ smi->clks + clk_nr_required);
+ smi->clk_num = clk_nr_required + clk_nr_optional;
+ return ret;
+}
+
+static int mtk_smi_larb_probe(struct platform_device *pdev)
+{
+ struct mtk_smi_larb *larb;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);
+ if (!larb)
+ return -ENOMEM;
+
+ larb->larb_gen = of_device_get_match_data(dev);
+ larb->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(larb->base))
+ return PTR_ERR(larb->base);
+
+ ret = mtk_smi_dts_clk_init(dev, &larb->smi, mtk_smi_larb_clks,
+ MTK_SMI_LARB_REQ_CLK_NR, MTK_SMI_LARB_OPT_CLK_NR);
+ if (ret)
+ return ret;
+
+ larb->smi.dev = dev;
+
+ ret = mtk_smi_device_link_common(dev, &larb->smi_common_dev);
+ if (ret < 0)
+ return ret;
pm_runtime_enable(dev);
platform_set_drvdata(pdev, larb);
- return component_add(dev, &mtk_smi_larb_component_ops);
+ ret = component_add(dev, &mtk_smi_larb_component_ops);
+ if (ret)
+ goto err_pm_disable;
+ return 0;
+
+err_pm_disable:
+ pm_runtime_disable(dev);
+ device_link_remove(dev, larb->smi_common_dev);
+ return ret;
}
static int mtk_smi_larb_remove(struct platform_device *pdev)
@@ -391,11 +479,9 @@ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
int ret;
- ret = mtk_smi_clk_enable(&larb->smi);
- if (ret < 0) {
- dev_err(dev, "Failed to enable clock(%d).\n", ret);
+ ret = clk_bulk_prepare_enable(larb->smi.clk_num, larb->smi.clks);
+ if (ret < 0)
return ret;
- }
/* Configure the basic setting for this larb */
larb_gen->config_port(dev);
@@ -407,7 +493,7 @@ static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
- mtk_smi_clk_disable(&larb->smi);
+ clk_bulk_disable_unprepare(larb->smi.clk_num, larb->smi.clks);
return 0;
}
@@ -427,64 +513,75 @@ static struct platform_driver mtk_smi_larb_driver = {
}
};
+static const struct mtk_smi_reg_pair mtk_smi_common_mt8195_init[SMI_COMMON_INIT_REGS_NR] = {
+ {SMI_L1LEN, 0xb},
+ {SMI_M4U_TH, 0xe100e10},
+ {SMI_FIFO_TH1, 0x506090a},
+ {SMI_FIFO_TH2, 0x506090a},
+ {SMI_DCM, 0x4f1},
+ {SMI_DUMMY, 0x1},
+};
+
static const struct mtk_smi_common_plat mtk_smi_common_gen1 = {
- .gen = MTK_SMI_GEN1,
+ .type = MTK_SMI_GEN1,
};
static const struct mtk_smi_common_plat mtk_smi_common_gen2 = {
- .gen = MTK_SMI_GEN2,
+ .type = MTK_SMI_GEN2,
};
static const struct mtk_smi_common_plat mtk_smi_common_mt6779 = {
- .gen = MTK_SMI_GEN2,
- .has_gals = true,
- .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(4) |
- F_MMU1_LARB(5) | F_MMU1_LARB(6) | F_MMU1_LARB(7),
+ .type = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(4) |
+ F_MMU1_LARB(5) | F_MMU1_LARB(6) | F_MMU1_LARB(7),
};
static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
- .gen = MTK_SMI_GEN2,
+ .type = MTK_SMI_GEN2,
.has_gals = true,
.bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(5) |
F_MMU1_LARB(7),
};
static const struct mtk_smi_common_plat mtk_smi_common_mt8192 = {
- .gen = MTK_SMI_GEN2,
+ .type = MTK_SMI_GEN2,
.has_gals = true,
.bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(5) |
F_MMU1_LARB(6),
};
+static const struct mtk_smi_common_plat mtk_smi_common_mt8195_vdo = {
+ .type = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(3) | F_MMU1_LARB(5) |
+ F_MMU1_LARB(7),
+ .init = mtk_smi_common_mt8195_init,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_mt8195_vpp = {
+ .type = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(7),
+ .init = mtk_smi_common_mt8195_init,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_sub_common_mt8195 = {
+ .type = MTK_SMI_GEN2_SUB_COMM,
+ .has_gals = true,
+};
+
static const struct of_device_id mtk_smi_common_of_ids[] = {
- {
- .compatible = "mediatek,mt8173-smi-common",
- .data = &mtk_smi_common_gen2,
- },
- {
- .compatible = "mediatek,mt8167-smi-common",
- .data = &mtk_smi_common_gen2,
- },
- {
- .compatible = "mediatek,mt2701-smi-common",
- .data = &mtk_smi_common_gen1,
- },
- {
- .compatible = "mediatek,mt2712-smi-common",
- .data = &mtk_smi_common_gen2,
- },
- {
- .compatible = "mediatek,mt6779-smi-common",
- .data = &mtk_smi_common_mt6779,
- },
- {
- .compatible = "mediatek,mt8183-smi-common",
- .data = &mtk_smi_common_mt8183,
- },
- {
- .compatible = "mediatek,mt8192-smi-common",
- .data = &mtk_smi_common_mt8192,
- },
+ {.compatible = "mediatek,mt2701-smi-common", .data = &mtk_smi_common_gen1},
+ {.compatible = "mediatek,mt2712-smi-common", .data = &mtk_smi_common_gen2},
+ {.compatible = "mediatek,mt6779-smi-common", .data = &mtk_smi_common_mt6779},
+ {.compatible = "mediatek,mt8167-smi-common", .data = &mtk_smi_common_gen2},
+ {.compatible = "mediatek,mt8173-smi-common", .data = &mtk_smi_common_gen2},
+ {.compatible = "mediatek,mt8183-smi-common", .data = &mtk_smi_common_mt8183},
+ {.compatible = "mediatek,mt8192-smi-common", .data = &mtk_smi_common_mt8192},
+ {.compatible = "mediatek,mt8195-smi-common-vdo", .data = &mtk_smi_common_mt8195_vdo},
+ {.compatible = "mediatek,mt8195-smi-common-vpp", .data = &mtk_smi_common_mt8195_vpp},
+ {.compatible = "mediatek,mt8195-smi-sub-common", .data = &mtk_smi_sub_common_mt8195},
{}
};
@@ -492,8 +589,7 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_smi *common;
- struct resource *res;
- int ret;
+ int ret, clk_required = MTK_SMI_COM_REQ_CLK_NR;
common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
if (!common)
@@ -501,23 +597,15 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
common->dev = dev;
common->plat = of_device_get_match_data(dev);
- common->clk_apb = devm_clk_get(dev, "apb");
- if (IS_ERR(common->clk_apb))
- return PTR_ERR(common->clk_apb);
-
- common->clk_smi = devm_clk_get(dev, "smi");
- if (IS_ERR(common->clk_smi))
- return PTR_ERR(common->clk_smi);
-
if (common->plat->has_gals) {
- common->clk_gals0 = devm_clk_get(dev, "gals0");
- if (IS_ERR(common->clk_gals0))
- return PTR_ERR(common->clk_gals0);
-
- common->clk_gals1 = devm_clk_get(dev, "gals1");
- if (IS_ERR(common->clk_gals1))
- return PTR_ERR(common->clk_gals1);
+ if (common->plat->type == MTK_SMI_GEN2)
+ clk_required = MTK_SMI_COM_GALS_REQ_CLK_NR;
+ else if (common->plat->type == MTK_SMI_GEN2_SUB_COMM)
+ clk_required = MTK_SMI_SUB_COM_GALS_REQ_CLK_NR;
}
+ ret = mtk_smi_dts_clk_init(dev, common, mtk_smi_common_clks, clk_required, 0);
+ if (ret)
+ return ret;
/*
* for mtk smi gen 1, we need to get the ao(always on) base to config
@@ -525,9 +613,8 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
* clock into emi clock domain, but for mtk smi gen2, there's no smi ao
* base.
*/
- if (common->plat->gen == MTK_SMI_GEN1) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- common->smi_ao_base = devm_ioremap_resource(dev, res);
+ if (common->plat->type == MTK_SMI_GEN1) {
+ common->smi_ao_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(common->smi_ao_base))
return PTR_ERR(common->smi_ao_base);
@@ -539,11 +626,18 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
if (ret)
return ret;
} else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- common->base = devm_ioremap_resource(dev, res);
+ common->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(common->base))
return PTR_ERR(common->base);
}
+
+ /* link its smi-common if this is smi-sub-common */
+ if (common->plat->type == MTK_SMI_GEN2_SUB_COMM) {
+ ret = mtk_smi_device_link_common(dev, &common->smi_common_dev);
+ if (ret < 0)
+ return ret;
+ }
+
pm_runtime_enable(dev);
platform_set_drvdata(pdev, common);
return 0;
@@ -551,6 +645,10 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
static int mtk_smi_common_remove(struct platform_device *pdev)
{
+ struct mtk_smi *common = dev_get_drvdata(&pdev->dev);
+
+ if (common->plat->type == MTK_SMI_GEN2_SUB_COMM)
+ device_link_remove(&pdev->dev, common->smi_common_dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -558,17 +656,21 @@ static int mtk_smi_common_remove(struct platform_device *pdev)
static int __maybe_unused mtk_smi_common_resume(struct device *dev)
{
struct mtk_smi *common = dev_get_drvdata(dev);
- u32 bus_sel = common->plat->bus_sel;
- int ret;
+ const struct mtk_smi_reg_pair *init = common->plat->init;
+ u32 bus_sel = common->plat->bus_sel; /* default is 0 */
+ int ret, i;
- ret = mtk_smi_clk_enable(common);
- if (ret) {
- dev_err(common->dev, "Failed to enable clock(%d).\n", ret);
+ ret = clk_bulk_prepare_enable(common->clk_num, common->clks);
+ if (ret)
return ret;
- }
- if (common->plat->gen == MTK_SMI_GEN2 && bus_sel)
- writel(bus_sel, common->base + SMI_BUS_SEL);
+ if (common->plat->type != MTK_SMI_GEN2)
+ return 0;
+
+ for (i = 0; i < SMI_COMMON_INIT_REGS_NR && init && init[i].offset; i++)
+ writel_relaxed(init[i].value, common->base + init[i].offset);
+
+ writel(bus_sel, common->base + SMI_BUS_SEL);
return 0;
}
@@ -576,7 +678,7 @@ static int __maybe_unused mtk_smi_common_suspend(struct device *dev)
{
struct mtk_smi *common = dev_get_drvdata(dev);
- mtk_smi_clk_disable(common);
+ clk_bulk_disable_unprepare(common->clk_num, common->clks);
return 0;
}
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
index d9f5437d3bce..b94408954d85 100644
--- a/drivers/memory/of_memory.c
+++ b/drivers/memory/of_memory.c
@@ -298,3 +298,90 @@ default_timings:
return NULL;
}
EXPORT_SYMBOL(of_lpddr3_get_ddr_timings);
+
+/**
+ * of_lpddr2_get_info() - extracts information about the lpddr2 chip.
+ * @np: Pointer to device tree node containing lpddr2 info
+ * @dev: Device requesting info
+ *
+ * Populates lpddr2_info structure by extracting data from device
+ * tree node. Returns pointer to populated structure. If error
+ * happened while populating, returns NULL. If property is missing
+ * in a device-tree, then the corresponding value is set to -ENOENT.
+ */
+const struct lpddr2_info
+*of_lpddr2_get_info(struct device_node *np, struct device *dev)
+{
+ struct lpddr2_info *ret_info, info = {};
+ struct property *prop;
+ const char *cp;
+ int err;
+
+ err = of_property_read_u32(np, "revision-id1", &info.revision_id1);
+ if (err)
+ info.revision_id1 = -ENOENT;
+
+ err = of_property_read_u32(np, "revision-id2", &info.revision_id2);
+ if (err)
+ info.revision_id2 = -ENOENT;
+
+ err = of_property_read_u32(np, "io-width", &info.io_width);
+ if (err)
+ return NULL;
+
+ info.io_width = 32 / info.io_width - 1;
+
+ err = of_property_read_u32(np, "density", &info.density);
+ if (err)
+ return NULL;
+
+ info.density = ffs(info.density) - 7;
+
+ if (of_device_is_compatible(np, "jedec,lpddr2-s4"))
+ info.arch_type = LPDDR2_TYPE_S4;
+ else if (of_device_is_compatible(np, "jedec,lpddr2-s2"))
+ info.arch_type = LPDDR2_TYPE_S2;
+ else if (of_device_is_compatible(np, "jedec,lpddr2-nvm"))
+ info.arch_type = LPDDR2_TYPE_NVM;
+ else
+ return NULL;
+
+ prop = of_find_property(np, "compatible", NULL);
+ for (cp = of_prop_next_string(prop, NULL); cp;
+ cp = of_prop_next_string(prop, cp)) {
+
+#define OF_LPDDR2_VENDOR_CMP(compat, ID) \
+ if (!of_compat_cmp(cp, compat ",", strlen(compat ","))) { \
+ info.manufacturer_id = LPDDR2_MANID_##ID; \
+ break; \
+ }
+
+ OF_LPDDR2_VENDOR_CMP("samsung", SAMSUNG)
+ OF_LPDDR2_VENDOR_CMP("qimonda", QIMONDA)
+ OF_LPDDR2_VENDOR_CMP("elpida", ELPIDA)
+ OF_LPDDR2_VENDOR_CMP("etron", ETRON)
+ OF_LPDDR2_VENDOR_CMP("nanya", NANYA)
+ OF_LPDDR2_VENDOR_CMP("hynix", HYNIX)
+ OF_LPDDR2_VENDOR_CMP("mosel", MOSEL)
+ OF_LPDDR2_VENDOR_CMP("winbond", WINBOND)
+ OF_LPDDR2_VENDOR_CMP("esmt", ESMT)
+ OF_LPDDR2_VENDOR_CMP("spansion", SPANSION)
+ OF_LPDDR2_VENDOR_CMP("sst", SST)
+ OF_LPDDR2_VENDOR_CMP("zmos", ZMOS)
+ OF_LPDDR2_VENDOR_CMP("intel", INTEL)
+ OF_LPDDR2_VENDOR_CMP("numonyx", NUMONYX)
+ OF_LPDDR2_VENDOR_CMP("micron", MICRON)
+
+#undef OF_LPDDR2_VENDOR_CMP
+ }
+
+ if (!info.manufacturer_id)
+ info.manufacturer_id = -ENOENT;
+
+ ret_info = devm_kzalloc(dev, sizeof(*ret_info), GFP_KERNEL);
+ if (ret_info)
+ *ret_info = info;
+
+ return ret_info;
+}
+EXPORT_SYMBOL(of_lpddr2_get_info);
diff --git a/drivers/memory/of_memory.h b/drivers/memory/of_memory.h
index 4a99b232ab0a..1c4e47fede8a 100644
--- a/drivers/memory/of_memory.h
+++ b/drivers/memory/of_memory.h
@@ -20,6 +20,9 @@ const struct lpddr3_min_tck *of_lpddr3_get_min_tck(struct device_node *np,
const struct lpddr3_timings *
of_lpddr3_get_ddr_timings(struct device_node *np_ddr,
struct device *dev, u32 device_type, u32 *nr_frequencies);
+
+const struct lpddr2_info *of_lpddr2_get_info(struct device_node *np,
+ struct device *dev);
#else
static inline const struct lpddr2_min_tck
*of_get_min_tck(struct device_node *np, struct device *dev)
@@ -46,6 +49,12 @@ static inline const struct lpddr3_timings
{
return NULL;
}
+
+static inline const struct lpddr2_info
+ *of_lpddr2_get_info(struct device_node *np, struct device *dev)
+{
+ return NULL;
+}
#endif /* CONFIG_OF && CONFIG_DDR */
#endif /* __LINUX_MEMORY_OF_REG_ */
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index 45eed659b0c6..7435baad0007 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -160,10 +160,61 @@ static const struct regmap_access_table rpcif_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(rpcif_volatile_ranges),
};
+
+/*
+ * Custom accessor functions to ensure SMRDR0 and SMWDR0 are always accessed
+ * with proper width. Requires SMENR_SPIDE to be correctly set before!
+ */
+static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct rpcif *rpc = context;
+
+ if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) {
+ u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF);
+
+ if (spide == 0x8) {
+ *val = readb(rpc->base + reg);
+ return 0;
+ } else if (spide == 0xC) {
+ *val = readw(rpc->base + reg);
+ return 0;
+ } else if (spide != 0xF) {
+ return -EILSEQ;
+ }
+ }
+
+ *val = readl(rpc->base + reg);
+ return 0;
+}
+
+static int rpcif_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct rpcif *rpc = context;
+
+ if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) {
+ u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF);
+
+ if (spide == 0x8) {
+ writeb(val, rpc->base + reg);
+ return 0;
+ } else if (spide == 0xC) {
+ writew(val, rpc->base + reg);
+ return 0;
+ } else if (spide != 0xF) {
+ return -EILSEQ;
+ }
+ }
+
+ writel(val, rpc->base + reg);
+ return 0;
+}
+
static const struct regmap_config rpcif_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
+ .reg_read = rpcif_reg_read,
+ .reg_write = rpcif_reg_write,
.fast_io = true,
.max_register = RPCIF_PHYINT,
.volatile_table = &rpcif_volatile_table,
@@ -173,17 +224,15 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct resource *res;
- void __iomem *base;
rpc->dev = dev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ rpc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rpc->base))
+ return PTR_ERR(rpc->base);
- rpc->regmap = devm_regmap_init_mmio(&pdev->dev, base,
- &rpcif_regmap_config);
+ rpc->regmap = devm_regmap_init(&pdev->dev, NULL, rpc, &rpcif_regmap_config);
if (IS_ERR(rpc->regmap)) {
dev_err(&pdev->dev,
"failed to init regmap for rpcif, error %ld\n",
@@ -354,20 +403,16 @@ void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs,
nbytes = op->data.nbytes;
rpc->xferlen = nbytes;
- rpc->enable |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes)) |
- RPCIF_SMENR_SPIDB(rpcif_bit_size(op->data.buswidth));
+ rpc->enable |= RPCIF_SMENR_SPIDB(rpcif_bit_size(op->data.buswidth));
}
}
EXPORT_SYMBOL(rpcif_prepare);
int rpcif_manual_xfer(struct rpcif *rpc)
{
- u32 smenr, smcr, pos = 0, max = 4;
+ u32 smenr, smcr, pos = 0, max = rpc->bus_size == 2 ? 8 : 4;
int ret = 0;
- if (rpc->bus_size == 2)
- max = 8;
-
pm_runtime_get_sync(rpc->dev);
regmap_update_bits(rpc->regmap, RPCIF_PHYCNT,
@@ -378,37 +423,36 @@ int rpcif_manual_xfer(struct rpcif *rpc)
regmap_write(rpc->regmap, RPCIF_SMOPR, rpc->option);
regmap_write(rpc->regmap, RPCIF_SMDMCR, rpc->dummy);
regmap_write(rpc->regmap, RPCIF_SMDRENR, rpc->ddr);
+ regmap_write(rpc->regmap, RPCIF_SMADR, rpc->smadr);
smenr = rpc->enable;
switch (rpc->dir) {
case RPCIF_DATA_OUT:
while (pos < rpc->xferlen) {
- u32 nbytes = rpc->xferlen - pos;
- u32 data[2];
+ u32 bytes_left = rpc->xferlen - pos;
+ u32 nbytes, data[2];
smcr = rpc->smcr | RPCIF_SMCR_SPIE;
- if (nbytes > max) {
- nbytes = max;
+
+ /* nbytes may only be 1, 2, 4, or 8 */
+ nbytes = bytes_left >= max ? max : (1 << ilog2(bytes_left));
+ if (bytes_left > nbytes)
smcr |= RPCIF_SMCR_SSLKP;
- }
+
+ smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes));
+ regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
memcpy(data, rpc->buffer + pos, nbytes);
- if (nbytes > 4) {
+ if (nbytes == 8) {
regmap_write(rpc->regmap, RPCIF_SMWDR1,
data[0]);
regmap_write(rpc->regmap, RPCIF_SMWDR0,
data[1]);
- } else if (nbytes > 2) {
+ } else {
regmap_write(rpc->regmap, RPCIF_SMWDR0,
data[0]);
- } else {
- regmap_write(rpc->regmap, RPCIF_SMWDR0,
- data[0] << 16);
}
- regmap_write(rpc->regmap, RPCIF_SMADR,
- rpc->smadr + pos);
- regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
regmap_write(rpc->regmap, RPCIF_SMCR, smcr);
ret = wait_msg_xfer_end(rpc);
if (ret)
@@ -448,14 +492,16 @@ int rpcif_manual_xfer(struct rpcif *rpc)
break;
}
while (pos < rpc->xferlen) {
- u32 nbytes = rpc->xferlen - pos;
- u32 data[2];
+ u32 bytes_left = rpc->xferlen - pos;
+ u32 nbytes, data[2];
- if (nbytes > max)
- nbytes = max;
+ /* nbytes may only be 1, 2, 4, or 8 */
+ nbytes = bytes_left >= max ? max : (1 << ilog2(bytes_left));
regmap_write(rpc->regmap, RPCIF_SMADR,
rpc->smadr + pos);
+ smenr &= ~RPCIF_SMENR_SPIDE(0xF);
+ smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes));
regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
regmap_write(rpc->regmap, RPCIF_SMCR,
rpc->smcr | RPCIF_SMCR_SPIE);
@@ -463,18 +509,14 @@ int rpcif_manual_xfer(struct rpcif *rpc)
if (ret)
goto err_out;
- if (nbytes > 4) {
+ if (nbytes == 8) {
regmap_read(rpc->regmap, RPCIF_SMRDR1,
&data[0]);
regmap_read(rpc->regmap, RPCIF_SMRDR0,
&data[1]);
- } else if (nbytes > 2) {
- regmap_read(rpc->regmap, RPCIF_SMRDR0,
- &data[0]);
- } else {
+ } else {
regmap_read(rpc->regmap, RPCIF_SMRDR0,
&data[0]);
- data[0] >>= 16;
}
memcpy(rpc->buffer + pos, data, nbytes);
@@ -502,6 +544,48 @@ err_out:
}
EXPORT_SYMBOL(rpcif_manual_xfer);
+static void memcpy_fromio_readw(void *to,
+ const void __iomem *from,
+ size_t count)
+{
+ const int maxw = (IS_ENABLED(CONFIG_64BIT)) ? 8 : 4;
+ u8 buf[2];
+
+ if (count && ((unsigned long)from & 1)) {
+ *(u16 *)buf = __raw_readw((void __iomem *)((unsigned long)from & ~1));
+ *(u8 *)to = buf[1];
+ from++;
+ to++;
+ count--;
+ }
+ while (count >= 2 && !IS_ALIGNED((unsigned long)from, maxw)) {
+ *(u16 *)to = __raw_readw(from);
+ from += 2;
+ to += 2;
+ count -= 2;
+ }
+ while (count >= maxw) {
+#ifdef CONFIG_64BIT
+ *(u64 *)to = __raw_readq(from);
+#else
+ *(u32 *)to = __raw_readl(from);
+#endif
+ from += maxw;
+ to += maxw;
+ count -= maxw;
+ }
+ while (count >= 2) {
+ *(u16 *)to = __raw_readw(from);
+ from += 2;
+ to += 2;
+ count -= 2;
+ }
+ if (count) {
+ *(u16 *)buf = __raw_readw(from);
+ *(u8 *)to = buf[0];
+ }
+}
+
ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf)
{
loff_t from = offs & (RPCIF_DIRMAP_SIZE - 1);
@@ -523,7 +607,10 @@ ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf)
regmap_write(rpc->regmap, RPCIF_DRDMCR, rpc->dummy);
regmap_write(rpc->regmap, RPCIF_DRDRENR, rpc->ddr);
- memcpy_fromio(buf, rpc->dirmap + from, len);
+ if (rpc->bus_size == 2)
+ memcpy_fromio_readw(buf, rpc->dirmap + from, len);
+ else
+ memcpy_fromio(buf, rpc->dirmap + from, len);
pm_runtime_put(rpc->dev);
diff --git a/drivers/memory/samsung/Kconfig b/drivers/memory/samsung/Kconfig
index 8e240f078afc..7fb70f573031 100644
--- a/drivers/memory/samsung/Kconfig
+++ b/drivers/memory/samsung/Kconfig
@@ -14,11 +14,12 @@ config EXYNOS5422_DMC
depends on DEVFREQ_GOV_SIMPLE_ONDEMAND
depends on (PM_DEVFREQ && PM_DEVFREQ_EVENT)
help
- This adds driver for Exynos5422 DMC (Dynamic Memory Controller).
- The driver provides support for Dynamic Voltage and Frequency Scaling in
- DMC and DRAM. It also supports changing timings of DRAM running with
- different frequency. The timings are calculated based on DT memory
- information.
+ This adds driver for Samsung Exynos5422 SoC DMC (Dynamic Memory
+ Controller). The driver provides support for Dynamic Voltage and
+ Frequency Scaling in DMC and DRAM. It also supports changing timings
+ of DRAM running with different frequency. The timings are calculated
+ based on DT memory information.
+ If unsure, say Y on devices with Samsung Exynos SoCs.
config EXYNOS_SROM
bool "Exynos SROM controller driver" if COMPILE_TEST
@@ -29,6 +30,6 @@ config EXYNOS_SROM
during suspend. If however appropriate device tree configuration
is provided, the driver enables support for external memory
or external devices.
- If unsure, say Y on devices with Samsung Exynos SocS.
+ If unsure, say Y on devices with Samsung Exynos SoCs.
endif
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
index f9bae36c03a3..7951764b4efe 100644
--- a/drivers/memory/tegra/Kconfig
+++ b/drivers/memory/tegra/Kconfig
@@ -16,6 +16,7 @@ config TEGRA20_EMC
depends on ARCH_TEGRA_2x_SOC || COMPILE_TEST
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select PM_DEVFREQ
+ select DDR
help
This driver is for the External Memory Controller (EMC) found on
Tegra20 chips. The EMC controls the external DRAM on the board.
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index 3c5aae7abf35..44b4a4080920 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -87,11 +87,9 @@ struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev)
return ERR_PTR(-EPROBE_DEFER);
}
- err = devm_add_action(dev, tegra_mc_devm_action_put_device, mc);
- if (err) {
- put_device(mc->dev);
+ err = devm_add_action_or_reset(dev, tegra_mc_devm_action_put_device, mc);
+ if (err)
return ERR_PTR(err);
- }
return mc;
}
@@ -706,15 +704,6 @@ static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
goto remove_nodes;
}
- /*
- * MC driver is registered too early, so early that generic driver
- * syncing doesn't work for the MC. But it doesn't really matter
- * since syncing works for the EMC drivers, hence we can sync the
- * MC driver by ourselves and then EMC will complete syncing of
- * the whole ICC state.
- */
- icc_sync_state(mc->dev);
-
return 0;
remove_nodes:
@@ -835,6 +824,15 @@ static int __maybe_unused tegra_mc_resume(struct device *dev)
return 0;
}
+static void tegra_mc_sync_state(struct device *dev)
+{
+ struct tegra_mc *mc = dev_get_drvdata(dev);
+
+ /* check whether ICC provider is registered */
+ if (mc->provider.dev == dev)
+ icc_sync_state(dev);
+}
+
static const struct dev_pm_ops tegra_mc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra_mc_suspend, tegra_mc_resume)
};
@@ -845,6 +843,7 @@ static struct platform_driver tegra_mc_driver = {
.of_match_table = tegra_mc_of_match,
.pm = &tegra_mc_pm_ops,
.suppress_bind_attrs = true,
+ .sync_state = tegra_mc_sync_state,
},
.prevent_deferred_probe = true,
.probe = tegra_mc_probe,
diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
index d65e7c2a580b..746c4ef2c0af 100644
--- a/drivers/memory/tegra/tegra186-emc.c
+++ b/drivers/memory/tegra/tegra186-emc.c
@@ -197,6 +197,11 @@ static int tegra186_emc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to EMC DVFS pairs: %d\n", err);
goto put_bpmp;
}
+ if (msg.rx.ret < 0) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "EMC DVFS MRQ failed: %d (BPMP error code)\n", msg.rx.ret);
+ goto put_bpmp;
+ }
emc->debugfs.min_rate = ULONG_MAX;
emc->debugfs.max_rate = 0;
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index c3462dbc8c22..497b6edbf3ca 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -5,6 +5,7 @@
* Author: Dmitry Osipenko <digetx@gmail.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
@@ -27,11 +28,15 @@
#include <soc/tegra/common.h>
#include <soc/tegra/fuse.h>
+#include "../jedec_ddr.h"
+#include "../of_memory.h"
+
#include "mc.h"
#define EMC_INTSTATUS 0x000
#define EMC_INTMASK 0x004
#define EMC_DBG 0x008
+#define EMC_ADR_CFG_0 0x010
#define EMC_TIMING_CONTROL 0x028
#define EMC_RC 0x02c
#define EMC_RFC 0x030
@@ -68,6 +73,7 @@
#define EMC_QUSE_EXTRA 0x0ac
#define EMC_ODT_WRITE 0x0b0
#define EMC_ODT_READ 0x0b4
+#define EMC_MRR 0x0ec
#define EMC_FBIO_CFG5 0x104
#define EMC_FBIO_CFG6 0x114
#define EMC_STAT_CONTROL 0x160
@@ -94,6 +100,7 @@
#define EMC_REFRESH_OVERFLOW_INT BIT(3)
#define EMC_CLKCHANGE_COMPLETE_INT BIT(4)
+#define EMC_MRR_DIVLD_INT BIT(5)
#define EMC_DBG_READ_MUX_ASSEMBLY BIT(0)
#define EMC_DBG_WRITE_MUX_ACTIVE BIT(1)
@@ -102,11 +109,25 @@
#define EMC_DBG_CFG_PRIORITY BIT(24)
#define EMC_FBIO_CFG5_DRAM_WIDTH_X16 BIT(4)
+#define EMC_FBIO_CFG5_DRAM_TYPE GENMASK(1, 0)
+
+#define EMC_MRR_DEV_SELECTN GENMASK(31, 30)
+#define EMC_MRR_MRR_MA GENMASK(23, 16)
+#define EMC_MRR_MRR_DATA GENMASK(15, 0)
+
+#define EMC_ADR_CFG_0_EMEM_NUMDEV GENMASK(25, 24)
#define EMC_PWR_GATHER_CLEAR (1 << 8)
#define EMC_PWR_GATHER_DISABLE (2 << 8)
#define EMC_PWR_GATHER_ENABLE (3 << 8)
+enum emc_dram_type {
+ DRAM_TYPE_RESERVED,
+ DRAM_TYPE_DDR1,
+ DRAM_TYPE_LPDDR2,
+ DRAM_TYPE_DDR2,
+};
+
static const u16 emc_timing_registers[] = {
EMC_RC,
EMC_RFC,
@@ -201,6 +222,14 @@ struct tegra_emc {
struct mutex rate_lock;
struct devfreq_simple_ondemand_data ondemand_data;
+
+ /* memory chip identity information */
+ union lpddr2_basic_config4 basic_conf4;
+ unsigned int manufacturer_id;
+ unsigned int revision_id1;
+ unsigned int revision_id2;
+
+ bool mrr_error;
};
static irqreturn_t tegra_emc_isr(int irq, void *data)
@@ -397,15 +426,19 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
if (!emc->timings)
return -ENOMEM;
- emc->num_timings = child_count;
timing = emc->timings;
for_each_child_of_node(node, child) {
+ if (of_node_name_eq(child, "lpddr2"))
+ continue;
+
err = load_one_timing_from_dt(emc, timing++, child);
if (err) {
of_node_put(child);
return err;
}
+
+ emc->num_timings++;
}
sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
@@ -422,12 +455,18 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
}
static struct device_node *
-tegra_emc_find_node_by_ram_code(struct device *dev)
+tegra_emc_find_node_by_ram_code(struct tegra_emc *emc)
{
+ struct device *dev = emc->dev;
struct device_node *np;
u32 value, ram_code;
int err;
+ if (emc->mrr_error) {
+ dev_warn(dev, "memory timings skipped due to MRR error\n");
+ return NULL;
+ }
+
if (of_get_child_count(dev->of_node) == 0) {
dev_info_once(dev, "device-tree doesn't have memory timings\n");
return NULL;
@@ -442,8 +481,49 @@ tegra_emc_find_node_by_ram_code(struct device *dev)
np = of_find_node_by_name(np, "emc-tables")) {
err = of_property_read_u32(np, "nvidia,ram-code", &value);
if (err || value != ram_code) {
- of_node_put(np);
- continue;
+ struct device_node *lpddr2_np;
+ bool cfg_mismatches = false;
+
+ lpddr2_np = of_find_node_by_name(np, "lpddr2");
+ if (lpddr2_np) {
+ const struct lpddr2_info *info;
+
+ info = of_lpddr2_get_info(lpddr2_np, dev);
+ if (info) {
+ if (info->manufacturer_id >= 0 &&
+ info->manufacturer_id != emc->manufacturer_id)
+ cfg_mismatches = true;
+
+ if (info->revision_id1 >= 0 &&
+ info->revision_id1 != emc->revision_id1)
+ cfg_mismatches = true;
+
+ if (info->revision_id2 >= 0 &&
+ info->revision_id2 != emc->revision_id2)
+ cfg_mismatches = true;
+
+ if (info->density != emc->basic_conf4.density)
+ cfg_mismatches = true;
+
+ if (info->io_width != emc->basic_conf4.io_width)
+ cfg_mismatches = true;
+
+ if (info->arch_type != emc->basic_conf4.arch_type)
+ cfg_mismatches = true;
+ } else {
+ dev_err(dev, "failed to parse %pOF\n", lpddr2_np);
+ cfg_mismatches = true;
+ }
+
+ of_node_put(lpddr2_np);
+ } else {
+ cfg_mismatches = true;
+ }
+
+ if (cfg_mismatches) {
+ of_node_put(np);
+ continue;
+ }
}
return np;
@@ -455,10 +535,72 @@ tegra_emc_find_node_by_ram_code(struct device *dev)
return NULL;
}
+static int emc_read_lpddr_mode_register(struct tegra_emc *emc,
+ unsigned int emem_dev,
+ unsigned int register_addr,
+ unsigned int *register_data)
+{
+ u32 memory_dev = emem_dev + 1;
+ u32 val, mr_mask = 0xff;
+ int err;
+
+ /* clear data-valid interrupt status */
+ writel_relaxed(EMC_MRR_DIVLD_INT, emc->regs + EMC_INTSTATUS);
+
+ /* issue mode register read request */
+ val = FIELD_PREP(EMC_MRR_DEV_SELECTN, memory_dev);
+ val |= FIELD_PREP(EMC_MRR_MRR_MA, register_addr);
+
+ writel_relaxed(val, emc->regs + EMC_MRR);
+
+ /* wait for the LPDDR2 data-valid interrupt */
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, val,
+ val & EMC_MRR_DIVLD_INT,
+ 1, 100);
+ if (err) {
+ dev_err(emc->dev, "mode register %u read failed: %d\n",
+ register_addr, err);
+ emc->mrr_error = true;
+ return err;
+ }
+
+ /* read out mode register data */
+ val = readl_relaxed(emc->regs + EMC_MRR);
+ *register_data = FIELD_GET(EMC_MRR_MRR_DATA, val) & mr_mask;
+
+ return 0;
+}
+
+static void emc_read_lpddr_sdram_info(struct tegra_emc *emc,
+ unsigned int emem_dev,
+ bool print_out)
+{
+ /* these registers are standard for all LPDDR JEDEC memory chips */
+ emc_read_lpddr_mode_register(emc, emem_dev, 5, &emc->manufacturer_id);
+ emc_read_lpddr_mode_register(emc, emem_dev, 6, &emc->revision_id1);
+ emc_read_lpddr_mode_register(emc, emem_dev, 7, &emc->revision_id2);
+ emc_read_lpddr_mode_register(emc, emem_dev, 8, &emc->basic_conf4.value);
+
+ if (!print_out)
+ return;
+
+ dev_info(emc->dev, "SDRAM[dev%u]: manufacturer: 0x%x (%s) rev1: 0x%x rev2: 0x%x prefetch: S%u density: %uMbit iowidth: %ubit\n",
+ emem_dev, emc->manufacturer_id,
+ lpddr2_jedec_manufacturer(emc->manufacturer_id),
+ emc->revision_id1, emc->revision_id2,
+ 4 >> emc->basic_conf4.arch_type,
+ 64 << emc->basic_conf4.density,
+ 32 >> emc->basic_conf4.io_width);
+}
+
static int emc_setup_hw(struct tegra_emc *emc)
{
+ u32 emc_cfg, emc_dbg, emc_fbio, emc_adr_cfg;
u32 intmask = EMC_REFRESH_OVERFLOW_INT;
- u32 emc_cfg, emc_dbg, emc_fbio;
+ static bool print_sdram_info_once;
+ enum emc_dram_type dram_type;
+ const char *dram_type_str;
+ unsigned int emem_numdev;
emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
@@ -496,7 +638,36 @@ static int emc_setup_hw(struct tegra_emc *emc)
else
emc->dram_bus_width = 32;
- dev_info_once(emc->dev, "%ubit DRAM bus\n", emc->dram_bus_width);
+ dram_type = FIELD_GET(EMC_FBIO_CFG5_DRAM_TYPE, emc_fbio);
+
+ switch (dram_type) {
+ case DRAM_TYPE_RESERVED:
+ dram_type_str = "INVALID";
+ break;
+ case DRAM_TYPE_DDR1:
+ dram_type_str = "DDR1";
+ break;
+ case DRAM_TYPE_LPDDR2:
+ dram_type_str = "LPDDR2";
+ break;
+ case DRAM_TYPE_DDR2:
+ dram_type_str = "DDR2";
+ break;
+ }
+
+ emc_adr_cfg = readl_relaxed(emc->regs + EMC_ADR_CFG_0);
+ emem_numdev = FIELD_GET(EMC_ADR_CFG_0_EMEM_NUMDEV, emc_adr_cfg) + 1;
+
+ dev_info_once(emc->dev, "%ubit DRAM bus, %u %s %s attached\n",
+ emc->dram_bus_width, emem_numdev, dram_type_str,
+ emem_numdev == 2 ? "devices" : "device");
+
+ if (dram_type == DRAM_TYPE_LPDDR2) {
+ while (emem_numdev--)
+ emc_read_lpddr_sdram_info(emc, emem_numdev,
+ !print_sdram_info_once);
+ print_sdram_info_once = true;
+ }
return 0;
}
@@ -1049,14 +1220,6 @@ static int tegra_emc_probe(struct platform_device *pdev)
emc->clk_nb.notifier_call = tegra_emc_clk_change_notify;
emc->dev = &pdev->dev;
- np = tegra_emc_find_node_by_ram_code(&pdev->dev);
- if (np) {
- err = tegra_emc_load_timings_from_dt(emc, np);
- of_node_put(np);
- if (err)
- return err;
- }
-
emc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(emc->regs))
return PTR_ERR(emc->regs);
@@ -1065,6 +1228,14 @@ static int tegra_emc_probe(struct platform_device *pdev)
if (err)
return err;
+ np = tegra_emc_find_node_by_ram_code(emc);
+ if (np) {
+ err = tegra_emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ }
+
err = devm_request_irq(&pdev->dev, irq, tegra_emc_isr, 0,
dev_name(&pdev->dev), emc);
if (err) {
@@ -1117,4 +1288,5 @@ module_platform_driver(tegra_emc_driver);
MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
MODULE_DESCRIPTION("NVIDIA Tegra20 EMC driver");
+MODULE_SOFTDEP("pre: governor_simpleondemand");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra210-emc-cc-r21021.c b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
index 0ebfa8eccf0c..cc76adb8d7e8 100644
--- a/drivers/memory/tegra/tegra210-emc-cc-r21021.c
+++ b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
@@ -478,7 +478,7 @@ static u32 periodic_compensation_handler(struct tegra210_emc *emc, u32 type,
static u32 tegra210_emc_r21021_periodic_compensation(struct tegra210_emc *emc)
{
u32 emc_cfg, emc_cfg_o, emc_cfg_update, del, value;
- u32 list[] = {
+ static const u32 list[] = {
EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0,
EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1,
EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2,
diff --git a/drivers/memory/tegra/tegra210-emc-core.c b/drivers/memory/tegra/tegra210-emc-core.c
index 06c0f17fa429..13584f9317a4 100644
--- a/drivers/memory/tegra/tegra210-emc-core.c
+++ b/drivers/memory/tegra/tegra210-emc-core.c
@@ -1662,7 +1662,7 @@ static int tegra210_emc_debug_min_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra210_emc_debug_min_rate_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(tegra210_emc_debug_min_rate_fops,
tegra210_emc_debug_min_rate_get,
tegra210_emc_debug_min_rate_set, "%llu\n");
@@ -1692,7 +1692,7 @@ static int tegra210_emc_debug_max_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra210_emc_debug_max_rate_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(tegra210_emc_debug_max_rate_fops,
tegra210_emc_debug_max_rate_get,
tegra210_emc_debug_max_rate_set, "%llu\n");
@@ -1723,7 +1723,7 @@ static int tegra210_emc_debug_temperature_set(void *data, u64 temperature)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra210_emc_debug_temperature_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(tegra210_emc_debug_temperature_fops,
tegra210_emc_debug_temperature_get,
tegra210_emc_debug_temperature_set, "%llu\n");
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index 7e21a852f2e1..80f98d717e13 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -1289,7 +1289,7 @@ static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
tegra_emc_debug_min_rate_get,
tegra_emc_debug_min_rate_set, "%llu\n");
@@ -1319,7 +1319,7 @@ static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
tegra_emc_debug_max_rate_get,
tegra_emc_debug_max_rate_set, "%llu\n");
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 32d5ff8df747..07813fb1ef37 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -505,6 +505,7 @@ EXPORT_SYMBOL_GPL(of_platform_default_populate);
static const struct of_device_id reserved_mem_matches[] = {
{ .compatible = "qcom,rmtfs-mem" },
{ .compatible = "qcom,cmd-db" },
+ { .compatible = "qcom,smem" },
{ .compatible = "ramoops" },
{ .compatible = "nvmem-rmem" },
{}
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index b0056ae5d463..85024eb1d2ea 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -58,7 +58,7 @@ config RESET_BRCMSTB
a SUN_TOP_CTRL_SW_INIT style controller.
config RESET_BRCMSTB_RESCAL
- bool "Broadcom STB RESCAL reset controller"
+ tristate "Broadcom STB RESCAL reset controller"
depends on HAS_IOMEM
depends on ARCH_BRCMSTB || COMPILE_TEST
default ARCH_BRCMSTB
@@ -116,7 +116,7 @@ config RESET_LPC18XX
config RESET_MCHP_SPARX5
bool "Microchip Sparx5 reset driver"
- depends on ARCH_SPARX5 || COMPILE_TEST
+ depends on ARCH_SPARX5 || SOC_LAN966 || COMPILE_TEST
default y if SPARX5_SWITCH
select MFD_SYSCON
help
diff --git a/drivers/reset/reset-microchip-sparx5.c b/drivers/reset/reset-microchip-sparx5.c
index f01e7db8e83b..00b612a0effa 100644
--- a/drivers/reset/reset-microchip-sparx5.c
+++ b/drivers/reset/reset-microchip-sparx5.c
@@ -13,15 +13,18 @@
#include <linux/regmap.h>
#include <linux/reset-controller.h>
-#define PROTECT_REG 0x84
-#define PROTECT_BIT BIT(10)
-#define SOFT_RESET_REG 0x00
-#define SOFT_RESET_BIT BIT(1)
+struct reset_props {
+ u32 protect_reg;
+ u32 protect_bit;
+ u32 reset_reg;
+ u32 reset_bit;
+};
struct mchp_reset_context {
struct regmap *cpu_ctrl;
struct regmap *gcb_ctrl;
struct reset_controller_dev rcdev;
+ const struct reset_props *props;
};
static struct regmap_config sparx5_reset_regmap_config = {
@@ -38,14 +41,16 @@ static int sparx5_switch_reset(struct reset_controller_dev *rcdev,
u32 val;
/* Make sure the core is PROTECTED from reset */
- regmap_update_bits(ctx->cpu_ctrl, PROTECT_REG, PROTECT_BIT, PROTECT_BIT);
+ regmap_update_bits(ctx->cpu_ctrl, ctx->props->protect_reg,
+ ctx->props->protect_bit, ctx->props->protect_bit);
/* Start soft reset */
- regmap_write(ctx->gcb_ctrl, SOFT_RESET_REG, SOFT_RESET_BIT);
+ regmap_write(ctx->gcb_ctrl, ctx->props->reset_reg,
+ ctx->props->reset_bit);
/* Wait for soft reset done */
- return regmap_read_poll_timeout(ctx->gcb_ctrl, SOFT_RESET_REG, val,
- (val & SOFT_RESET_BIT) == 0,
+ return regmap_read_poll_timeout(ctx->gcb_ctrl, ctx->props->reset_reg, val,
+ (val & ctx->props->reset_bit) == 0,
1, 100);
}
@@ -115,13 +120,32 @@ static int mchp_sparx5_reset_probe(struct platform_device *pdev)
ctx->rcdev.nr_resets = 1;
ctx->rcdev.ops = &sparx5_reset_ops;
ctx->rcdev.of_node = dn;
+ ctx->props = device_get_match_data(&pdev->dev);
return devm_reset_controller_register(&pdev->dev, &ctx->rcdev);
}
+static const struct reset_props reset_props_sparx5 = {
+ .protect_reg = 0x84,
+ .protect_bit = BIT(10),
+ .reset_reg = 0x0,
+ .reset_bit = BIT(1),
+};
+
+static const struct reset_props reset_props_lan966x = {
+ .protect_reg = 0x88,
+ .protect_bit = BIT(5),
+ .reset_reg = 0x0,
+ .reset_bit = BIT(1),
+};
+
static const struct of_device_id mchp_sparx5_reset_of_match[] = {
{
.compatible = "microchip,sparx5-switch-reset",
+ .data = &reset_props_sparx5,
+ }, {
+ .compatible = "microchip,lan966x-switch-reset",
+ .data = &reset_props_lan966x,
},
{ }
};
diff --git a/drivers/reset/reset-uniphier-glue.c b/drivers/reset/reset-uniphier-glue.c
index 027990b79f61..908c1d5bc41e 100644
--- a/drivers/reset/reset-uniphier-glue.c
+++ b/drivers/reset/reset-uniphier-glue.c
@@ -156,6 +156,10 @@ static const struct of_device_id uniphier_glue_reset_match[] = {
.data = &uniphier_pxs2_data,
},
{
+ .compatible = "socionext,uniphier-nx1-usb3-reset",
+ .data = &uniphier_pxs2_data,
+ },
+ {
.compatible = "socionext,uniphier-pro4-ahci-reset",
.data = &uniphier_pro4_data,
},
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
index 5f75783f9397..ff7580f38056 100644
--- a/drivers/reset/reset-uniphier.c
+++ b/drivers/reset/reset-uniphier.c
@@ -136,6 +136,21 @@ static const struct uniphier_reset_data uniphier_pxs3_sys_reset_data[] = {
UNIPHIER_RESETX(28, 0x200c, 7), /* SATA0 */
UNIPHIER_RESETX(29, 0x200c, 8), /* SATA1 */
UNIPHIER_RESETX(30, 0x200c, 21), /* SATA-PHY */
+ UNIPHIER_RESETX(40, 0x2008, 0), /* AIO */
+ UNIPHIER_RESETX(42, 0x2010, 2), /* EXIV */
+ UNIPHIER_RESET_END,
+};
+
+static const struct uniphier_reset_data uniphier_nx1_sys_reset_data[] = {
+ UNIPHIER_RESETX(4, 0x2008, 8), /* eMMC */
+ UNIPHIER_RESETX(6, 0x200c, 0), /* Ether */
+ UNIPHIER_RESETX(12, 0x200c, 16), /* USB30 link */
+ UNIPHIER_RESETX(16, 0x200c, 24), /* USB30-PHY0 */
+ UNIPHIER_RESETX(17, 0x200c, 25), /* USB30-PHY1 */
+ UNIPHIER_RESETX(18, 0x200c, 26), /* USB30-PHY2 */
+ UNIPHIER_RESETX(24, 0x200c, 8), /* PCIe */
+ UNIPHIER_RESETX(52, 0x2010, 0), /* VOC */
+ UNIPHIER_RESETX(58, 0x2010, 8), /* HDMI-Tx */
UNIPHIER_RESET_END,
};
@@ -400,6 +415,10 @@ static const struct of_device_id uniphier_reset_match[] = {
.compatible = "socionext,uniphier-pxs3-reset",
.data = uniphier_pxs3_sys_reset_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-reset",
+ .data = uniphier_nx1_sys_reset_data,
+ },
/* Media I/O reset, SD reset */
{
.compatible = "socionext,uniphier-ld4-mio-reset",
@@ -437,6 +456,10 @@ static const struct of_device_id uniphier_reset_match[] = {
.compatible = "socionext,uniphier-pxs3-sd-reset",
.data = uniphier_pro5_sd_reset_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-sd-reset",
+ .data = uniphier_pro5_sd_reset_data,
+ },
/* Peripheral reset */
{
.compatible = "socionext,uniphier-ld4-peri-reset",
@@ -470,6 +493,10 @@ static const struct of_device_id uniphier_reset_match[] = {
.compatible = "socionext,uniphier-pxs3-peri-reset",
.data = uniphier_pro4_peri_reset_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
/* Analog signal amplifiers reset */
{
.compatible = "socionext,uniphier-ld11-adamv-reset",
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e1bc5214494e..7208eeb8459a 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1404,16 +1404,10 @@ config RTC_DRV_OMAP
This driver can also be built as a module, if so, module
will be called rtc-omap.
-config HAVE_S3C_RTC
- bool
- help
- This will include RTC support for Samsung SoCs. If
- you want to include RTC support for any machine, kindly
- select this in the respective mach-XXXX/Kconfig file.
-
config RTC_DRV_S3C
tristate "Samsung S3C series SoC RTC"
- depends on ARCH_S3C64XX || HAVE_S3C_RTC || COMPILE_TEST
+ depends on ARCH_EXYNOS || ARCH_S3C64XX || ARCH_S3C24XX || ARCH_S5PV210 || \
+ COMPILE_TEST
help
RTC (Realtime Clock) driver for the clock inbuilt into the
Samsung S3C24XX series of SoCs. This can provide periodic
diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c
index d0329ad170d1..383b0cfc584e 100644
--- a/drivers/soc/amlogic/meson-canvas.c
+++ b/drivers/soc/amlogic/meson-canvas.c
@@ -168,7 +168,6 @@ EXPORT_SYMBOL_GPL(meson_canvas_free);
static int meson_canvas_probe(struct platform_device *pdev)
{
- struct resource *res;
struct meson_canvas *canvas;
struct device *dev = &pdev->dev;
@@ -176,8 +175,7 @@ static int meson_canvas_probe(struct platform_device *pdev)
if (!canvas)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- canvas->reg_base = devm_ioremap_resource(dev, res);
+ canvas->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(canvas->reg_base))
return PTR_ERR(canvas->reg_base);
diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c
index 6dd190270123..3f3039600357 100644
--- a/drivers/soc/amlogic/meson-clk-measure.c
+++ b/drivers/soc/amlogic/meson-clk-measure.c
@@ -606,7 +606,6 @@ static int meson_msr_probe(struct platform_device *pdev)
{
const struct meson_msr_id *match_data;
struct meson_msr *priv;
- struct resource *res;
struct dentry *root, *clks;
void __iomem *base;
int i;
@@ -624,8 +623,7 @@ static int meson_msr_probe(struct platform_device *pdev)
memcpy(priv->msr_table, match_data, sizeof(priv->msr_table));
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
index 6f54bd832c8b..165f7548401b 100644
--- a/drivers/soc/amlogic/meson-gx-socinfo.c
+++ b/drivers/soc/amlogic/meson-gx-socinfo.c
@@ -65,6 +65,7 @@ static const struct meson_gx_package_id {
{ "A113X", 0x25, 0x37, 0xff },
{ "A113D", 0x25, 0x22, 0xff },
{ "S905D2", 0x28, 0x10, 0xf0 },
+ { "S905Y2", 0x28, 0x30, 0xf0 },
{ "S905X2", 0x28, 0x40, 0xf0 },
{ "A311D", 0x29, 0x10, 0xf0 },
{ "S922X", 0x29, 0x40, 0xf0 },
diff --git a/drivers/soc/aspeed/Kconfig b/drivers/soc/aspeed/Kconfig
index 243ca196e6ad..f579ee0b5afa 100644
--- a/drivers/soc/aspeed/Kconfig
+++ b/drivers/soc/aspeed/Kconfig
@@ -24,6 +24,16 @@ config ASPEED_LPC_SNOOP
allows the BMC to listen on and save the data written by
the host to an arbitrary LPC I/O port.
+config ASPEED_UART_ROUTING
+ tristate "ASPEED uart routing control"
+ select REGMAP
+ select MFD_SYSCON
+ default ARCH_ASPEED
+ help
+ Provides a driver to control the UART routing paths, allowing
+ users to perform runtime configuration of the RX muxes among
+ the UART controllers and I/O pins.
+
config ASPEED_P2A_CTRL
tristate "ASPEED P2A (VGA MMIO to BMC) bridge control"
select REGMAP
diff --git a/drivers/soc/aspeed/Makefile b/drivers/soc/aspeed/Makefile
index fcab7192e1a4..b35d74592964 100644
--- a/drivers/soc/aspeed/Makefile
+++ b/drivers/soc/aspeed/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
-obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
-obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
-obj-$(CONFIG_ASPEED_SOCINFO) += aspeed-socinfo.o
+obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
+obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
+obj-$(CONFIG_ASPEED_UART_ROUTING) += aspeed-uart-routing.o
+obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
+obj-$(CONFIG_ASPEED_SOCINFO) += aspeed-socinfo.o
diff --git a/drivers/soc/aspeed/aspeed-uart-routing.c b/drivers/soc/aspeed/aspeed-uart-routing.c
new file mode 100644
index 000000000000..ef8b24fd1851
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-uart-routing.c
@@ -0,0 +1,603 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2018 Google LLC
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+
+/* register offsets */
+#define HICR9 0x98
+#define HICRA 0x9c
+
+/* attributes options */
+#define UART_ROUTING_IO1 "io1"
+#define UART_ROUTING_IO2 "io2"
+#define UART_ROUTING_IO3 "io3"
+#define UART_ROUTING_IO4 "io4"
+#define UART_ROUTING_IO5 "io5"
+#define UART_ROUTING_IO6 "io6"
+#define UART_ROUTING_IO10 "io10"
+#define UART_ROUTING_UART1 "uart1"
+#define UART_ROUTING_UART2 "uart2"
+#define UART_ROUTING_UART3 "uart3"
+#define UART_ROUTING_UART4 "uart4"
+#define UART_ROUTING_UART5 "uart5"
+#define UART_ROUTING_UART6 "uart6"
+#define UART_ROUTING_UART10 "uart10"
+#define UART_ROUTING_RES "reserved"
+
+struct aspeed_uart_routing {
+ struct regmap *map;
+ struct attribute_group const *attr_grp;
+};
+
+struct aspeed_uart_routing_selector {
+ struct device_attribute dev_attr;
+ uint8_t reg;
+ uint8_t mask;
+ uint8_t shift;
+ const char *const options[];
+};
+
+#define to_routing_selector(_dev_attr) \
+ container_of(_dev_attr, struct aspeed_uart_routing_selector, dev_attr)
+
+static ssize_t aspeed_uart_routing_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+static ssize_t aspeed_uart_routing_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+
+#define ROUTING_ATTR(_name) { \
+ .attr = {.name = _name, \
+ .mode = VERIFY_OCTAL_PERMISSIONS(0644) }, \
+ .show = aspeed_uart_routing_show, \
+ .store = aspeed_uart_routing_store, \
+}
+
+/* routing selector for AST25xx */
+static struct aspeed_uart_routing_selector ast2500_io6_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO6),
+ .reg = HICR9,
+ .shift = 8,
+ .mask = 0xf,
+ .options = {
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART5,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO5,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_uart5_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART5),
+ .reg = HICRA,
+ .shift = 28,
+ .mask = 0xf,
+ .options = {
+ UART_ROUTING_IO5,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_uart4_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART4),
+ .reg = HICRA,
+ .shift = 25,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_uart3_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART3),
+ .reg = HICRA,
+ .shift = 22,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_uart2_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART2),
+ .reg = HICRA,
+ .shift = 19,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART1,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_uart1_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART1),
+ .reg = HICRA,
+ .shift = 16,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_io5_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO5),
+ .reg = HICRA,
+ .shift = 12,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART5,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_io4_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO4),
+ .reg = HICRA,
+ .shift = 9,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART5,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_io3_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO3),
+ .reg = HICRA,
+ .shift = 6,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART5,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_io2_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO2),
+ .reg = HICRA,
+ .shift = 3,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART5,
+ UART_ROUTING_UART1,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_io1_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO1),
+ .reg = HICRA,
+ .shift = 0,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART5,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct attribute *ast2500_uart_routing_attrs[] = {
+ &ast2500_io6_sel.dev_attr.attr,
+ &ast2500_uart5_sel.dev_attr.attr,
+ &ast2500_uart4_sel.dev_attr.attr,
+ &ast2500_uart3_sel.dev_attr.attr,
+ &ast2500_uart2_sel.dev_attr.attr,
+ &ast2500_uart1_sel.dev_attr.attr,
+ &ast2500_io5_sel.dev_attr.attr,
+ &ast2500_io4_sel.dev_attr.attr,
+ &ast2500_io3_sel.dev_attr.attr,
+ &ast2500_io2_sel.dev_attr.attr,
+ &ast2500_io1_sel.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ast2500_uart_routing_attr_group = {
+ .attrs = ast2500_uart_routing_attrs,
+};
+
+/* routing selector for AST26xx */
+static struct aspeed_uart_routing_selector ast2600_uart10_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART10),
+ .reg = HICR9,
+ .shift = 12,
+ .mask = 0xf,
+ .options = {
+ UART_ROUTING_IO10,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_RES,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_io10_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO10),
+ .reg = HICR9,
+ .shift = 8,
+ .mask = 0xf,
+ .options = {
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_RES,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_RES,
+ UART_ROUTING_UART10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_uart4_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART4),
+ .reg = HICRA,
+ .shift = 25,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_uart3_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART3),
+ .reg = HICRA,
+ .shift = 22,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_uart2_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART2),
+ .reg = HICRA,
+ .shift = 19,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART1,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_uart1_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART1),
+ .reg = HICRA,
+ .shift = 16,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_io4_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO4),
+ .reg = HICRA,
+ .shift = 9,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART10,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_io3_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO3),
+ .reg = HICRA,
+ .shift = 6,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART10,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_io2_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO2),
+ .reg = HICRA,
+ .shift = 3,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART10,
+ UART_ROUTING_UART1,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_io1_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO1),
+ .reg = HICRA,
+ .shift = 0,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART10,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct attribute *ast2600_uart_routing_attrs[] = {
+ &ast2600_uart10_sel.dev_attr.attr,
+ &ast2600_io10_sel.dev_attr.attr,
+ &ast2600_uart4_sel.dev_attr.attr,
+ &ast2600_uart3_sel.dev_attr.attr,
+ &ast2600_uart2_sel.dev_attr.attr,
+ &ast2600_uart1_sel.dev_attr.attr,
+ &ast2600_io4_sel.dev_attr.attr,
+ &ast2600_io3_sel.dev_attr.attr,
+ &ast2600_io2_sel.dev_attr.attr,
+ &ast2600_io1_sel.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ast2600_uart_routing_attr_group = {
+ .attrs = ast2600_uart_routing_attrs,
+};
+
+static ssize_t aspeed_uart_routing_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct aspeed_uart_routing *uart_routing = dev_get_drvdata(dev);
+ struct aspeed_uart_routing_selector *sel = to_routing_selector(attr);
+ int val, pos, len;
+
+ regmap_read(uart_routing->map, sel->reg, &val);
+ val = (val >> sel->shift) & sel->mask;
+
+ len = 0;
+ for (pos = 0; sel->options[pos] != NULL; ++pos) {
+ if (pos == val)
+ len += sysfs_emit_at(buf, len, "[%s] ", sel->options[pos]);
+ else
+ len += sysfs_emit_at(buf, len, "%s ", sel->options[pos]);
+ }
+
+ if (val >= pos)
+ len += sysfs_emit_at(buf, len, "[unknown(%d)]", val);
+
+ len += sysfs_emit_at(buf, len, "\n");
+
+ return len;
+}
+
+static ssize_t aspeed_uart_routing_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct aspeed_uart_routing *uart_routing = dev_get_drvdata(dev);
+ struct aspeed_uart_routing_selector *sel = to_routing_selector(attr);
+ int val;
+
+ val = match_string(sel->options, -1, buf);
+ if (val < 0) {
+ dev_err(dev, "invalid value \"%s\"\n", buf);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(uart_routing->map, sel->reg,
+ (sel->mask << sel->shift),
+ (val & sel->mask) << sel->shift);
+
+ return count;
+}
+
+static int aspeed_uart_routing_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct device *dev = &pdev->dev;
+ struct aspeed_uart_routing *uart_routing;
+
+ uart_routing = devm_kzalloc(&pdev->dev, sizeof(*uart_routing), GFP_KERNEL);
+ if (!uart_routing)
+ return -ENOMEM;
+
+ uart_routing->map = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(uart_routing->map)) {
+ dev_err(dev, "cannot get regmap\n");
+ return PTR_ERR(uart_routing->map);
+ }
+
+ uart_routing->attr_grp = of_device_get_match_data(dev);
+
+ rc = sysfs_create_group(&dev->kobj, uart_routing->attr_grp);
+ if (rc < 0)
+ return rc;
+
+ dev_set_drvdata(dev, uart_routing);
+
+ dev_info(dev, "module loaded\n");
+
+ return 0;
+}
+
+static int aspeed_uart_routing_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct aspeed_uart_routing *uart_routing = platform_get_drvdata(pdev);
+
+ sysfs_remove_group(&dev->kobj, uart_routing->attr_grp);
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_uart_routing_table[] = {
+ { .compatible = "aspeed,ast2400-uart-routing",
+ .data = &ast2500_uart_routing_attr_group },
+ { .compatible = "aspeed,ast2500-uart-routing",
+ .data = &ast2500_uart_routing_attr_group },
+ { .compatible = "aspeed,ast2600-uart-routing",
+ .data = &ast2600_uart_routing_attr_group },
+ { },
+};
+
+static struct platform_driver aspeed_uart_routing_driver = {
+ .driver = {
+ .name = "aspeed-uart-routing",
+ .of_match_table = aspeed_uart_routing_table,
+ },
+ .probe = aspeed_uart_routing_probe,
+ .remove = aspeed_uart_routing_remove,
+};
+
+module_platform_driver(aspeed_uart_routing_driver);
+
+MODULE_AUTHOR("Oskar Senft <osk@google.com>");
+MODULE_AUTHOR("Chia-Wei Wang <chiawei_wang@aspeedtech.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Driver to configure Aspeed UART routing");
diff --git a/drivers/soc/bcm/bcm63xx/bcm-pmb.c b/drivers/soc/bcm/bcm63xx/bcm-pmb.c
index 774465c119be..7bbe46ea5f94 100644
--- a/drivers/soc/bcm/bcm63xx/bcm-pmb.c
+++ b/drivers/soc/bcm/bcm63xx/bcm-pmb.c
@@ -276,7 +276,6 @@ static int bcm_pmb_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct bcm_pmb_pd_data *table;
const struct bcm_pmb_pd_data *e;
- struct resource *res;
struct bcm_pmb *pmb;
int max_id;
int err;
@@ -287,8 +286,7 @@ static int bcm_pmb_probe(struct platform_device *pdev)
pmb->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pmb->base = devm_ioremap_resource(&pdev->dev, res);
+ pmb->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmb->base))
return PTR_ERR(pmb->base);
diff --git a/drivers/soc/bcm/bcm63xx/bcm63xx-power.c b/drivers/soc/bcm/bcm63xx/bcm63xx-power.c
index 515fe182dc34..aa72e13d5d0e 100644
--- a/drivers/soc/bcm/bcm63xx/bcm63xx-power.c
+++ b/drivers/soc/bcm/bcm63xx/bcm63xx-power.c
@@ -91,7 +91,6 @@ static int bcm63xx_power_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct resource *res;
const struct bcm63xx_power_data *entry, *table;
struct bcm63xx_power *power;
unsigned int ndom;
@@ -102,8 +101,7 @@ static int bcm63xx_power_probe(struct platform_device *pdev)
if (!power)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- power->base = devm_ioremap_resource(&pdev->dev, res);
+ power->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(power->base))
return PTR_ERR(power->base);
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
index 7f8dc302ae6e..2c975d79fe8e 100644
--- a/drivers/soc/bcm/brcmstb/biuctrl.c
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -136,6 +136,8 @@ static int __init mcp_write_pairing_set(void)
static const u32 a72_b53_mach_compat[] = {
0x7211,
+ 0x72113,
+ 0x72116,
0x7216,
0x72164,
0x72165,
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 3fd0d0840287..db0d3910fee4 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -500,7 +500,7 @@ int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d,
qbman_eq_desc_set_no_orp(&ed, 0);
qbman_eq_desc_set_fq(&ed, fqid);
- return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb);
+ return qbman_swp_enqueue_multiple(d->swp, &ed, fd, NULL, nb);
}
EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index 3474bf5f88d5..e46bcf78ed59 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -693,9 +693,9 @@ int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
p[0] = cl[0] | s->eqcr.pi_vb;
if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
- struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+ struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
- d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
}
eqcr_pi++;
@@ -775,9 +775,9 @@ int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
p[0] = cl[0] | s->eqcr.pi_vb;
if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
- struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+ struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
- d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
}
eqcr_pi++;
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index d5e9a5f2c087..072473a16f4d 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -140,7 +140,6 @@ static int fsl_guts_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- struct resource *res;
const struct fsl_soc_die_attr *soc_die;
const char *machine;
u32 svr;
@@ -152,8 +151,7 @@ static int fsl_guts_probe(struct platform_device *pdev)
guts->little_endian = of_property_read_bool(np, "little-endian");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- guts->regs = devm_ioremap_resource(dev, res);
+ guts->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(guts->regs))
return PTR_ERR(guts->regs);
diff --git a/drivers/soc/fsl/rcpm.c b/drivers/soc/fsl/rcpm.c
index 90d3f4060b0c..3d0cae30c769 100644
--- a/drivers/soc/fsl/rcpm.c
+++ b/drivers/soc/fsl/rcpm.c
@@ -146,7 +146,6 @@ static const struct dev_pm_ops rcpm_pm_ops = {
static int rcpm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *r;
struct rcpm *rcpm;
int ret;
@@ -154,11 +153,7 @@ static int rcpm_probe(struct platform_device *pdev)
if (!rcpm)
return -ENOMEM;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENODEV;
-
- rcpm->ippdexpcr_base = devm_ioremap_resource(&pdev->dev, r);
+ rcpm->ippdexpcr_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rcpm->ippdexpcr_base)) {
ret = PTR_ERR(rcpm->ippdexpcr_base);
return ret;
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index 05812f8ae734..a840494e849a 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -6,6 +6,7 @@ config IMX_GPCV2_PM_DOMAINS
depends on ARCH_MXC || (COMPILE_TEST && OF)
depends on PM
select PM_GENERIC_DOMAINS
+ select REGMAP_MMIO
default y if SOC_IMX7D
config SOC_IMX8M
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 078dc918f4f3..8a707077914c 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -5,3 +5,4 @@ endif
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
+obj-$(CONFIG_SOC_IMX8M) += imx8m-blk-ctrl.o
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 34a9ac1f2b9b..b8d52d8d29db 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -192,7 +192,7 @@ struct imx_pgc_domain {
struct clk_bulk_data *clks;
int num_clks;
- unsigned int pgc;
+ unsigned long pgc;
const struct {
u32 pxx;
@@ -202,6 +202,7 @@ struct imx_pgc_domain {
} bits;
const int voltage;
+ const bool keep_clocks;
struct device *dev;
};
@@ -220,7 +221,7 @@ to_imx_pgc_domain(struct generic_pm_domain *genpd)
static int imx_pgc_power_up(struct generic_pm_domain *genpd)
{
struct imx_pgc_domain *domain = to_imx_pgc_domain(genpd);
- u32 reg_val;
+ u32 reg_val, pgc;
int ret;
ret = pm_runtime_get_sync(domain->dev);
@@ -244,6 +245,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
goto out_regulator_disable;
}
+ reset_control_assert(domain->reset);
+
if (domain->bits.pxx) {
/* request the domain to power up */
regmap_update_bits(domain->regmap, GPC_PU_PGC_SW_PUP_REQ,
@@ -262,12 +265,12 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
}
/* disable power control */
- regmap_clear_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
- GPC_PGC_CTRL_PCR);
+ for_each_set_bit(pgc, &domain->pgc, 32) {
+ regmap_clear_bits(domain->regmap, GPC_PGC_CTRL(pgc),
+ GPC_PGC_CTRL_PCR);
+ }
}
- reset_control_assert(domain->reset);
-
/* delay for reset to propagate */
udelay(5);
@@ -293,7 +296,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
}
/* Disable reset clocks for all devices in the domain */
- clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
+ if (!domain->keep_clocks)
+ clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
return 0;
@@ -311,14 +315,16 @@ out_put_pm:
static int imx_pgc_power_down(struct generic_pm_domain *genpd)
{
struct imx_pgc_domain *domain = to_imx_pgc_domain(genpd);
- u32 reg_val;
+ u32 reg_val, pgc;
int ret;
/* Enable reset clocks for all devices in the domain */
- ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
- if (ret) {
- dev_err(domain->dev, "failed to enable reset clocks\n");
- return ret;
+ if (!domain->keep_clocks) {
+ ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
+ if (ret) {
+ dev_err(domain->dev, "failed to enable reset clocks\n");
+ return ret;
+ }
}
/* request the ADB400 to power down */
@@ -338,8 +344,10 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd)
if (domain->bits.pxx) {
/* enable power control */
- regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
- GPC_PGC_CTRL_PCR, GPC_PGC_CTRL_PCR);
+ for_each_set_bit(pgc, &domain->pgc, 32) {
+ regmap_update_bits(domain->regmap, GPC_PGC_CTRL(pgc),
+ GPC_PGC_CTRL_PCR, GPC_PGC_CTRL_PCR);
+ }
/* request the domain to power down */
regmap_update_bits(domain->regmap, GPC_PU_PGC_SW_PDN_REQ,
@@ -389,7 +397,7 @@ static const struct imx_pgc_domain imx7_pgc_domains[] = {
.map = IMX7_MIPI_PHY_A_CORE_DOMAIN,
},
.voltage = 1000000,
- .pgc = IMX7_PGC_MIPI,
+ .pgc = BIT(IMX7_PGC_MIPI),
},
[IMX7_POWER_DOMAIN_PCIE_PHY] = {
@@ -401,7 +409,7 @@ static const struct imx_pgc_domain imx7_pgc_domains[] = {
.map = IMX7_PCIE_PHY_A_CORE_DOMAIN,
},
.voltage = 1000000,
- .pgc = IMX7_PGC_PCIE,
+ .pgc = BIT(IMX7_PGC_PCIE),
},
[IMX7_POWER_DOMAIN_USB_HSIC_PHY] = {
@@ -413,7 +421,7 @@ static const struct imx_pgc_domain imx7_pgc_domains[] = {
.map = IMX7_USB_HSIC_PHY_A_CORE_DOMAIN,
},
.voltage = 1200000,
- .pgc = IMX7_PGC_USB_HSIC,
+ .pgc = BIT(IMX7_PGC_USB_HSIC),
},
};
@@ -448,7 +456,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.pxx = IMX8M_MIPI_SW_Pxx_REQ,
.map = IMX8M_MIPI_A53_DOMAIN,
},
- .pgc = IMX8M_PGC_MIPI,
+ .pgc = BIT(IMX8M_PGC_MIPI),
},
[IMX8M_POWER_DOMAIN_PCIE1] = {
@@ -459,7 +467,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.pxx = IMX8M_PCIE1_SW_Pxx_REQ,
.map = IMX8M_PCIE1_A53_DOMAIN,
},
- .pgc = IMX8M_PGC_PCIE1,
+ .pgc = BIT(IMX8M_PGC_PCIE1),
},
[IMX8M_POWER_DOMAIN_USB_OTG1] = {
@@ -470,7 +478,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.pxx = IMX8M_OTG1_SW_Pxx_REQ,
.map = IMX8M_OTG1_A53_DOMAIN,
},
- .pgc = IMX8M_PGC_OTG1,
+ .pgc = BIT(IMX8M_PGC_OTG1),
},
[IMX8M_POWER_DOMAIN_USB_OTG2] = {
@@ -481,7 +489,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.pxx = IMX8M_OTG2_SW_Pxx_REQ,
.map = IMX8M_OTG2_A53_DOMAIN,
},
- .pgc = IMX8M_PGC_OTG2,
+ .pgc = BIT(IMX8M_PGC_OTG2),
},
[IMX8M_POWER_DOMAIN_DDR1] = {
@@ -492,7 +500,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.pxx = IMX8M_DDR1_SW_Pxx_REQ,
.map = IMX8M_DDR2_A53_DOMAIN,
},
- .pgc = IMX8M_PGC_DDR1,
+ .pgc = BIT(IMX8M_PGC_DDR1),
},
[IMX8M_POWER_DOMAIN_GPU] = {
@@ -505,7 +513,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.hskreq = IMX8M_GPU_HSK_PWRDNREQN,
.hskack = IMX8M_GPU_HSK_PWRDNACKN,
},
- .pgc = IMX8M_PGC_GPU,
+ .pgc = BIT(IMX8M_PGC_GPU),
},
[IMX8M_POWER_DOMAIN_VPU] = {
@@ -518,7 +526,8 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.hskreq = IMX8M_VPU_HSK_PWRDNREQN,
.hskack = IMX8M_VPU_HSK_PWRDNACKN,
},
- .pgc = IMX8M_PGC_VPU,
+ .pgc = BIT(IMX8M_PGC_VPU),
+ .keep_clocks = true,
},
[IMX8M_POWER_DOMAIN_DISP] = {
@@ -531,7 +540,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.hskreq = IMX8M_DISP_HSK_PWRDNREQN,
.hskack = IMX8M_DISP_HSK_PWRDNACKN,
},
- .pgc = IMX8M_PGC_DISP,
+ .pgc = BIT(IMX8M_PGC_DISP),
},
[IMX8M_POWER_DOMAIN_MIPI_CSI1] = {
@@ -542,7 +551,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.pxx = IMX8M_MIPI_CSI1_SW_Pxx_REQ,
.map = IMX8M_MIPI_CSI1_A53_DOMAIN,
},
- .pgc = IMX8M_PGC_MIPI_CSI1,
+ .pgc = BIT(IMX8M_PGC_MIPI_CSI1),
},
[IMX8M_POWER_DOMAIN_MIPI_CSI2] = {
@@ -553,7 +562,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.pxx = IMX8M_MIPI_CSI2_SW_Pxx_REQ,
.map = IMX8M_MIPI_CSI2_A53_DOMAIN,
},
- .pgc = IMX8M_PGC_MIPI_CSI2,
+ .pgc = BIT(IMX8M_PGC_MIPI_CSI2),
},
[IMX8M_POWER_DOMAIN_PCIE2] = {
@@ -564,7 +573,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.pxx = IMX8M_PCIE2_SW_Pxx_REQ,
.map = IMX8M_PCIE2_A53_DOMAIN,
},
- .pgc = IMX8M_PGC_PCIE2,
+ .pgc = BIT(IMX8M_PGC_PCIE2),
},
};
@@ -617,6 +626,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.hskreq = IMX8MM_HSIO_HSK_PWRDNREQN,
.hskack = IMX8MM_HSIO_HSK_PWRDNACKN,
},
+ .keep_clocks = true,
},
[IMX8MM_POWER_DOMAIN_PCIE] = {
@@ -627,7 +637,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.pxx = IMX8MM_PCIE_SW_Pxx_REQ,
.map = IMX8MM_PCIE_A53_DOMAIN,
},
- .pgc = IMX8MM_PGC_PCIE,
+ .pgc = BIT(IMX8MM_PGC_PCIE),
},
[IMX8MM_POWER_DOMAIN_OTG1] = {
@@ -638,7 +648,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.pxx = IMX8MM_OTG1_SW_Pxx_REQ,
.map = IMX8MM_OTG1_A53_DOMAIN,
},
- .pgc = IMX8MM_PGC_OTG1,
+ .pgc = BIT(IMX8MM_PGC_OTG1),
},
[IMX8MM_POWER_DOMAIN_OTG2] = {
@@ -649,7 +659,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.pxx = IMX8MM_OTG2_SW_Pxx_REQ,
.map = IMX8MM_OTG2_A53_DOMAIN,
},
- .pgc = IMX8MM_PGC_OTG2,
+ .pgc = BIT(IMX8MM_PGC_OTG2),
},
[IMX8MM_POWER_DOMAIN_GPUMIX] = {
@@ -662,7 +672,8 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.hskreq = IMX8MM_GPUMIX_HSK_PWRDNREQN,
.hskack = IMX8MM_GPUMIX_HSK_PWRDNACKN,
},
- .pgc = IMX8MM_PGC_GPUMIX,
+ .pgc = BIT(IMX8MM_PGC_GPUMIX),
+ .keep_clocks = true,
},
[IMX8MM_POWER_DOMAIN_GPU] = {
@@ -675,7 +686,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.hskreq = IMX8MM_GPU_HSK_PWRDNREQN,
.hskack = IMX8MM_GPU_HSK_PWRDNACKN,
},
- .pgc = IMX8MM_PGC_GPU2D,
+ .pgc = BIT(IMX8MM_PGC_GPU2D) | BIT(IMX8MM_PGC_GPU3D),
},
[IMX8MM_POWER_DOMAIN_VPUMIX] = {
@@ -688,7 +699,8 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.hskreq = IMX8MM_VPUMIX_HSK_PWRDNREQN,
.hskack = IMX8MM_VPUMIX_HSK_PWRDNACKN,
},
- .pgc = IMX8MM_PGC_VPUMIX,
+ .pgc = BIT(IMX8MM_PGC_VPUMIX),
+ .keep_clocks = true,
},
[IMX8MM_POWER_DOMAIN_VPUG1] = {
@@ -699,7 +711,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.pxx = IMX8MM_VPUG1_SW_Pxx_REQ,
.map = IMX8MM_VPUG1_A53_DOMAIN,
},
- .pgc = IMX8MM_PGC_VPUG1,
+ .pgc = BIT(IMX8MM_PGC_VPUG1),
},
[IMX8MM_POWER_DOMAIN_VPUG2] = {
@@ -710,7 +722,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.pxx = IMX8MM_VPUG2_SW_Pxx_REQ,
.map = IMX8MM_VPUG2_A53_DOMAIN,
},
- .pgc = IMX8MM_PGC_VPUG2,
+ .pgc = BIT(IMX8MM_PGC_VPUG2),
},
[IMX8MM_POWER_DOMAIN_VPUH1] = {
@@ -721,7 +733,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.pxx = IMX8MM_VPUH1_SW_Pxx_REQ,
.map = IMX8MM_VPUH1_A53_DOMAIN,
},
- .pgc = IMX8MM_PGC_VPUH1,
+ .pgc = BIT(IMX8MM_PGC_VPUH1),
},
[IMX8MM_POWER_DOMAIN_DISPMIX] = {
@@ -734,7 +746,8 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.hskreq = IMX8MM_DISPMIX_HSK_PWRDNREQN,
.hskack = IMX8MM_DISPMIX_HSK_PWRDNACKN,
},
- .pgc = IMX8MM_PGC_DISPMIX,
+ .pgc = BIT(IMX8MM_PGC_DISPMIX),
+ .keep_clocks = true,
},
[IMX8MM_POWER_DOMAIN_MIPI] = {
@@ -745,7 +758,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.pxx = IMX8MM_MIPI_SW_Pxx_REQ,
.map = IMX8MM_MIPI_A53_DOMAIN,
},
- .pgc = IMX8MM_PGC_MIPI,
+ .pgc = BIT(IMX8MM_PGC_MIPI),
},
};
@@ -802,6 +815,7 @@ static const struct imx_pgc_domain imx8mn_pgc_domains[] = {
.hskreq = IMX8MN_HSIO_HSK_PWRDNREQN,
.hskack = IMX8MN_HSIO_HSK_PWRDNACKN,
},
+ .keep_clocks = true,
},
[IMX8MN_POWER_DOMAIN_OTG1] = {
@@ -812,7 +826,7 @@ static const struct imx_pgc_domain imx8mn_pgc_domains[] = {
.pxx = IMX8MN_OTG1_SW_Pxx_REQ,
.map = IMX8MN_OTG1_A53_DOMAIN,
},
- .pgc = IMX8MN_PGC_OTG1,
+ .pgc = BIT(IMX8MN_PGC_OTG1),
},
[IMX8MN_POWER_DOMAIN_GPUMIX] = {
@@ -825,7 +839,7 @@ static const struct imx_pgc_domain imx8mn_pgc_domains[] = {
.hskreq = IMX8MN_GPUMIX_HSK_PWRDNREQN,
.hskack = IMX8MN_GPUMIX_HSK_PWRDNACKN,
},
- .pgc = IMX8MN_PGC_GPUMIX,
+ .pgc = BIT(IMX8MN_PGC_GPUMIX),
},
};
@@ -894,6 +908,10 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
goto out_domain_unmap;
}
+ if (IS_ENABLED(CONFIG_LOCKDEP) &&
+ of_property_read_bool(domain->dev->of_node, "power-domains"))
+ lockdep_set_subclass(&domain->genpd.mlock, 1);
+
ret = of_genpd_add_provider_simple(domain->dev->of_node,
&domain->genpd);
if (ret) {
@@ -930,6 +948,36 @@ static int imx_pgc_domain_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int imx_pgc_domain_suspend(struct device *dev)
+{
+ int ret;
+
+ /*
+ * This may look strange, but is done so the generic PM_SLEEP code
+ * can power down our domain and more importantly power it up again
+ * after resume, without tripping over our usage of runtime PM to
+ * power up/down the nested domains.
+ */
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int imx_pgc_domain_resume(struct device *dev)
+{
+ return pm_runtime_put(dev);
+}
+#endif
+
+static const struct dev_pm_ops imx_pgc_domain_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(imx_pgc_domain_suspend, imx_pgc_domain_resume)
+};
+
static const struct platform_device_id imx_pgc_domain_id[] = {
{ "imx-pgc-domain", },
{ },
@@ -938,6 +986,7 @@ static const struct platform_device_id imx_pgc_domain_id[] = {
static struct platform_driver imx_pgc_domain_driver = {
.driver = {
.name = "imx-pgc",
+ .pm = &imx_pgc_domain_pm_ops,
},
.probe = imx_pgc_domain_probe,
.remove = imx_pgc_domain_remove,
@@ -986,6 +1035,9 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
struct imx_pgc_domain *domain;
u32 domain_index;
+ if (!of_device_is_available(np))
+ continue;
+
ret = of_property_read_u32(np, "reg", &domain_index);
if (ret) {
dev_err(dev, "Failed to read 'reg' property\n");
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
new file mode 100644
index 000000000000..519b3651d1d9
--- /dev/null
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2021 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+
+#include <dt-bindings/power/imx8mm-power.h>
+
+#define BLK_SFT_RSTN 0x0
+#define BLK_CLK_EN 0x4
+
+struct imx8m_blk_ctrl_domain;
+
+struct imx8m_blk_ctrl {
+ struct device *dev;
+ struct notifier_block power_nb;
+ struct device *bus_power_dev;
+ struct regmap *regmap;
+ struct imx8m_blk_ctrl_domain *domains;
+ struct genpd_onecell_data onecell_data;
+};
+
+struct imx8m_blk_ctrl_domain_data {
+ const char *name;
+ const char * const *clk_names;
+ int num_clks;
+ const char *gpc_name;
+ u32 rst_mask;
+ u32 clk_mask;
+};
+
+#define DOMAIN_MAX_CLKS 3
+
+struct imx8m_blk_ctrl_domain {
+ struct generic_pm_domain genpd;
+ const struct imx8m_blk_ctrl_domain_data *data;
+ struct clk_bulk_data clks[DOMAIN_MAX_CLKS];
+ struct device *power_dev;
+ struct imx8m_blk_ctrl *bc;
+};
+
+struct imx8m_blk_ctrl_data {
+ int max_reg;
+ notifier_fn_t power_notifier_fn;
+ const struct imx8m_blk_ctrl_domain_data *domains;
+ int num_domains;
+};
+
+static inline struct imx8m_blk_ctrl_domain *
+to_imx8m_blk_ctrl_domain(struct generic_pm_domain *genpd)
+{
+ return container_of(genpd, struct imx8m_blk_ctrl_domain, genpd);
+}
+
+static int imx8m_blk_ctrl_power_on(struct generic_pm_domain *genpd)
+{
+ struct imx8m_blk_ctrl_domain *domain = to_imx8m_blk_ctrl_domain(genpd);
+ const struct imx8m_blk_ctrl_domain_data *data = domain->data;
+ struct imx8m_blk_ctrl *bc = domain->bc;
+ int ret;
+
+ /* make sure bus domain is awake */
+ ret = pm_runtime_get_sync(bc->bus_power_dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(bc->bus_power_dev);
+ dev_err(bc->dev, "failed to power up bus domain\n");
+ return ret;
+ }
+
+ /* put devices into reset */
+ regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+
+ /* enable upstream and blk-ctrl clocks to allow reset to propagate */
+ ret = clk_bulk_prepare_enable(data->num_clks, domain->clks);
+ if (ret) {
+ dev_err(bc->dev, "failed to enable clocks\n");
+ goto bus_put;
+ }
+ regmap_set_bits(bc->regmap, BLK_CLK_EN, data->clk_mask);
+
+ /* power up upstream GPC domain */
+ ret = pm_runtime_get_sync(domain->power_dev);
+ if (ret < 0) {
+ dev_err(bc->dev, "failed to power up peripheral domain\n");
+ goto clk_disable;
+ }
+
+ /* wait for reset to propagate */
+ udelay(5);
+
+ /* release reset */
+ regmap_set_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+
+ /* disable upstream clocks */
+ clk_bulk_disable_unprepare(data->num_clks, domain->clks);
+
+ return 0;
+
+clk_disable:
+ clk_bulk_disable_unprepare(data->num_clks, domain->clks);
+bus_put:
+ pm_runtime_put(bc->bus_power_dev);
+
+ return ret;
+}
+
+static int imx8m_blk_ctrl_power_off(struct generic_pm_domain *genpd)
+{
+ struct imx8m_blk_ctrl_domain *domain = to_imx8m_blk_ctrl_domain(genpd);
+ const struct imx8m_blk_ctrl_domain_data *data = domain->data;
+ struct imx8m_blk_ctrl *bc = domain->bc;
+
+ /* put devices into reset and disable clocks */
+ regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+ regmap_clear_bits(bc->regmap, BLK_CLK_EN, data->clk_mask);
+
+ /* power down upstream GPC domain */
+ pm_runtime_put(domain->power_dev);
+
+ /* allow bus domain to suspend */
+ pm_runtime_put(bc->bus_power_dev);
+
+ return 0;
+}
+
+static struct generic_pm_domain *
+imx8m_blk_ctrl_xlate(struct of_phandle_args *args, void *data)
+{
+ struct genpd_onecell_data *onecell_data = data;
+ unsigned int index = args->args[0];
+
+ if (args->args_count != 1 ||
+ index >= onecell_data->num_domains)
+ return ERR_PTR(-EINVAL);
+
+ return onecell_data->domains[index];
+}
+
+static struct lock_class_key blk_ctrl_genpd_lock_class;
+
+static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
+{
+ const struct imx8m_blk_ctrl_data *bc_data;
+ struct device *dev = &pdev->dev;
+ struct imx8m_blk_ctrl *bc;
+ void __iomem *base;
+ int i, ret;
+
+ struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
+
+ bc = devm_kzalloc(dev, sizeof(*bc), GFP_KERNEL);
+ if (!bc)
+ return -ENOMEM;
+
+ bc->dev = dev;
+
+ bc_data = of_device_get_match_data(dev);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap_config.max_register = bc_data->max_reg;
+ bc->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ if (IS_ERR(bc->regmap))
+ return dev_err_probe(dev, PTR_ERR(bc->regmap),
+ "failed to init regmap\n");
+
+ bc->domains = devm_kcalloc(dev, bc_data->num_domains,
+ sizeof(struct imx8m_blk_ctrl_domain),
+ GFP_KERNEL);
+ if (!bc->domains)
+ return -ENOMEM;
+
+ bc->onecell_data.num_domains = bc_data->num_domains;
+ bc->onecell_data.xlate = imx8m_blk_ctrl_xlate;
+ bc->onecell_data.domains =
+ devm_kcalloc(dev, bc_data->num_domains,
+ sizeof(struct generic_pm_domain *), GFP_KERNEL);
+ if (!bc->onecell_data.domains)
+ return -ENOMEM;
+
+ bc->bus_power_dev = genpd_dev_pm_attach_by_name(dev, "bus");
+ if (IS_ERR(bc->bus_power_dev))
+ return dev_err_probe(dev, PTR_ERR(bc->bus_power_dev),
+ "failed to attach power domain\n");
+
+ for (i = 0; i < bc_data->num_domains; i++) {
+ const struct imx8m_blk_ctrl_domain_data *data = &bc_data->domains[i];
+ struct imx8m_blk_ctrl_domain *domain = &bc->domains[i];
+ int j;
+
+ domain->data = data;
+
+ for (j = 0; j < data->num_clks; j++)
+ domain->clks[j].id = data->clk_names[j];
+
+ ret = devm_clk_bulk_get(dev, data->num_clks, domain->clks);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to get clock\n");
+ goto cleanup_pds;
+ }
+
+ domain->power_dev =
+ dev_pm_domain_attach_by_name(dev, data->gpc_name);
+ if (IS_ERR(domain->power_dev)) {
+ dev_err_probe(dev, PTR_ERR(domain->power_dev),
+ "failed to attach power domain\n");
+ ret = PTR_ERR(domain->power_dev);
+ goto cleanup_pds;
+ }
+
+ domain->genpd.name = data->name;
+ domain->genpd.power_on = imx8m_blk_ctrl_power_on;
+ domain->genpd.power_off = imx8m_blk_ctrl_power_off;
+ domain->bc = bc;
+
+ ret = pm_genpd_init(&domain->genpd, NULL, true);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to init power domain\n");
+ dev_pm_domain_detach(domain->power_dev, true);
+ goto cleanup_pds;
+ }
+
+ /*
+ * We use runtime PM to trigger power on/off of the upstream GPC
+ * domain, as a strict hierarchical parent/child power domain
+ * setup doesn't allow us to meet the sequencing requirements.
+ * This means we have nested locking of genpd locks, without the
+ * nesting being visible at the genpd level, so we need a
+ * separate lock class to make lockdep aware of the fact that
+ * this are separate domain locks that can be nested without a
+ * self-deadlock.
+ */
+ lockdep_set_class(&domain->genpd.mlock,
+ &blk_ctrl_genpd_lock_class);
+
+ bc->onecell_data.domains[i] = &domain->genpd;
+ }
+
+ ret = of_genpd_add_provider_onecell(dev->of_node, &bc->onecell_data);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to add power domain provider\n");
+ goto cleanup_pds;
+ }
+
+ bc->power_nb.notifier_call = bc_data->power_notifier_fn;
+ ret = dev_pm_genpd_add_notifier(bc->bus_power_dev, &bc->power_nb);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to add power notifier\n");
+ goto cleanup_provider;
+ }
+
+ dev_set_drvdata(dev, bc);
+
+ return 0;
+
+cleanup_provider:
+ of_genpd_del_provider(dev->of_node);
+cleanup_pds:
+ for (i--; i >= 0; i--) {
+ pm_genpd_remove(&bc->domains[i].genpd);
+ dev_pm_domain_detach(bc->domains[i].power_dev, true);
+ }
+
+ dev_pm_domain_detach(bc->bus_power_dev, true);
+
+ return ret;
+}
+
+static int imx8m_blk_ctrl_remove(struct platform_device *pdev)
+{
+ struct imx8m_blk_ctrl *bc = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ of_genpd_del_provider(pdev->dev.of_node);
+
+ for (i = 0; bc->onecell_data.num_domains; i++) {
+ struct imx8m_blk_ctrl_domain *domain = &bc->domains[i];
+
+ pm_genpd_remove(&domain->genpd);
+ dev_pm_domain_detach(domain->power_dev, true);
+ }
+
+ dev_pm_genpd_remove_notifier(bc->bus_power_dev);
+
+ dev_pm_domain_detach(bc->bus_power_dev, true);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int imx8m_blk_ctrl_suspend(struct device *dev)
+{
+ struct imx8m_blk_ctrl *bc = dev_get_drvdata(dev);
+ int ret, i;
+
+ /*
+ * This may look strange, but is done so the generic PM_SLEEP code
+ * can power down our domains and more importantly power them up again
+ * after resume, without tripping over our usage of runtime PM to
+ * control the upstream GPC domains. Things happen in the right order
+ * in the system suspend/resume paths due to the device parent/child
+ * hierarchy.
+ */
+ ret = pm_runtime_get_sync(bc->bus_power_dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(bc->bus_power_dev);
+ return ret;
+ }
+
+ for (i = 0; i < bc->onecell_data.num_domains; i++) {
+ struct imx8m_blk_ctrl_domain *domain = &bc->domains[i];
+
+ ret = pm_runtime_get_sync(domain->power_dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(domain->power_dev);
+ goto out_fail;
+ }
+ }
+
+ return 0;
+
+out_fail:
+ for (i--; i >= 0; i--)
+ pm_runtime_put(bc->domains[i].power_dev);
+
+ pm_runtime_put(bc->bus_power_dev);
+
+ return ret;
+}
+
+static int imx8m_blk_ctrl_resume(struct device *dev)
+{
+ struct imx8m_blk_ctrl *bc = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < bc->onecell_data.num_domains; i++)
+ pm_runtime_put(bc->domains[i].power_dev);
+
+ pm_runtime_put(bc->bus_power_dev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops imx8m_blk_ctrl_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(imx8m_blk_ctrl_suspend, imx8m_blk_ctrl_resume)
+};
+
+static int imx8mm_vpu_power_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct imx8m_blk_ctrl *bc = container_of(nb, struct imx8m_blk_ctrl,
+ power_nb);
+
+ if (action != GENPD_NOTIFY_ON && action != GENPD_NOTIFY_PRE_OFF)
+ return NOTIFY_OK;
+
+ /*
+ * The ADB in the VPUMIX domain has no separate reset and clock
+ * enable bits, but is ungated together with the VPU clocks. To
+ * allow the handshake with the GPC to progress we put the VPUs
+ * in reset and ungate the clocks.
+ */
+ regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, BIT(0) | BIT(1) | BIT(2));
+ regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(0) | BIT(1) | BIT(2));
+
+ if (action == GENPD_NOTIFY_ON) {
+ /*
+ * On power up we have no software backchannel to the GPC to
+ * wait for the ADB handshake to happen, so we just delay for a
+ * bit. On power down the GPC driver waits for the handshake.
+ */
+ udelay(5);
+
+ /* set "fuse" bits to enable the VPUs */
+ regmap_set_bits(bc->regmap, 0x8, 0xffffffff);
+ regmap_set_bits(bc->regmap, 0xc, 0xffffffff);
+ regmap_set_bits(bc->regmap, 0x10, 0xffffffff);
+ regmap_set_bits(bc->regmap, 0x14, 0xffffffff);
+ }
+
+ return NOTIFY_OK;
+}
+
+static const struct imx8m_blk_ctrl_domain_data imx8mm_vpu_blk_ctl_domain_data[] = {
+ [IMX8MM_VPUBLK_PD_G1] = {
+ .name = "vpublk-g1",
+ .clk_names = (const char *[]){ "g1", },
+ .num_clks = 1,
+ .gpc_name = "g1",
+ .rst_mask = BIT(1),
+ .clk_mask = BIT(1),
+ },
+ [IMX8MM_VPUBLK_PD_G2] = {
+ .name = "vpublk-g2",
+ .clk_names = (const char *[]){ "g2", },
+ .num_clks = 1,
+ .gpc_name = "g2",
+ .rst_mask = BIT(0),
+ .clk_mask = BIT(0),
+ },
+ [IMX8MM_VPUBLK_PD_H1] = {
+ .name = "vpublk-h1",
+ .clk_names = (const char *[]){ "h1", },
+ .num_clks = 1,
+ .gpc_name = "h1",
+ .rst_mask = BIT(2),
+ .clk_mask = BIT(2),
+ },
+};
+
+static const struct imx8m_blk_ctrl_data imx8mm_vpu_blk_ctl_dev_data = {
+ .max_reg = 0x18,
+ .power_notifier_fn = imx8mm_vpu_power_notifier,
+ .domains = imx8mm_vpu_blk_ctl_domain_data,
+ .num_domains = ARRAY_SIZE(imx8mm_vpu_blk_ctl_domain_data),
+};
+
+static int imx8mm_disp_power_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct imx8m_blk_ctrl *bc = container_of(nb, struct imx8m_blk_ctrl,
+ power_nb);
+
+ if (action != GENPD_NOTIFY_ON && action != GENPD_NOTIFY_PRE_OFF)
+ return NOTIFY_OK;
+
+ /* Enable bus clock and deassert bus reset */
+ regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(12));
+ regmap_set_bits(bc->regmap, BLK_SFT_RSTN, BIT(6));
+
+ /*
+ * On power up we have no software backchannel to the GPC to
+ * wait for the ADB handshake to happen, so we just delay for a
+ * bit. On power down the GPC driver waits for the handshake.
+ */
+ if (action == GENPD_NOTIFY_ON)
+ udelay(5);
+
+
+ return NOTIFY_OK;
+}
+
+static const struct imx8m_blk_ctrl_domain_data imx8mm_disp_blk_ctl_domain_data[] = {
+ [IMX8MM_DISPBLK_PD_CSI_BRIDGE] = {
+ .name = "dispblk-csi-bridge",
+ .clk_names = (const char *[]){ "csi-bridge-axi", "csi-bridge-apb",
+ "csi-bridge-core", },
+ .num_clks = 3,
+ .gpc_name = "csi-bridge",
+ .rst_mask = BIT(0) | BIT(1) | BIT(2),
+ .clk_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5),
+ },
+ [IMX8MM_DISPBLK_PD_LCDIF] = {
+ .name = "dispblk-lcdif",
+ .clk_names = (const char *[]){ "lcdif-axi", "lcdif-apb", "lcdif-pix", },
+ .num_clks = 3,
+ .gpc_name = "lcdif",
+ .clk_mask = BIT(6) | BIT(7),
+ },
+ [IMX8MM_DISPBLK_PD_MIPI_DSI] = {
+ .name = "dispblk-mipi-dsi",
+ .clk_names = (const char *[]){ "dsi-pclk", "dsi-ref", },
+ .num_clks = 2,
+ .gpc_name = "mipi-dsi",
+ .rst_mask = BIT(5),
+ .clk_mask = BIT(8) | BIT(9),
+ },
+ [IMX8MM_DISPBLK_PD_MIPI_CSI] = {
+ .name = "dispblk-mipi-csi",
+ .clk_names = (const char *[]){ "csi-aclk", "csi-pclk" },
+ .num_clks = 2,
+ .gpc_name = "mipi-csi",
+ .rst_mask = BIT(3) | BIT(4),
+ .clk_mask = BIT(10) | BIT(11),
+ },
+};
+
+static const struct imx8m_blk_ctrl_data imx8mm_disp_blk_ctl_dev_data = {
+ .max_reg = 0x2c,
+ .power_notifier_fn = imx8mm_disp_power_notifier,
+ .domains = imx8mm_disp_blk_ctl_domain_data,
+ .num_domains = ARRAY_SIZE(imx8mm_disp_blk_ctl_domain_data),
+};
+
+static const struct of_device_id imx8m_blk_ctrl_of_match[] = {
+ {
+ .compatible = "fsl,imx8mm-vpu-blk-ctrl",
+ .data = &imx8mm_vpu_blk_ctl_dev_data
+ }, {
+ .compatible = "fsl,imx8mm-disp-blk-ctrl",
+ .data = &imx8mm_disp_blk_ctl_dev_data
+ } ,{
+ /* Sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, imx8m_blk_ctrl_of_match);
+
+static struct platform_driver imx8m_blk_ctrl_driver = {
+ .probe = imx8m_blk_ctrl_probe,
+ .remove = imx8m_blk_ctrl_remove,
+ .driver = {
+ .name = "imx8m-blk-ctrl",
+ .pm = &imx8m_blk_ctrl_pm_ops,
+ .of_match_table = imx8m_blk_ctrl_of_match,
+ },
+};
+module_platform_driver(imx8m_blk_ctrl_driver);
diff --git a/drivers/soc/mediatek/mt8192-mmsys.h b/drivers/soc/mediatek/mt8192-mmsys.h
new file mode 100644
index 000000000000..6f0a57044a7b
--- /dev/null
+++ b/drivers/soc/mediatek/mt8192-mmsys.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8192_MMSYS_H
+#define __SOC_MEDIATEK_MT8192_MMSYS_H
+
+#define MT8192_MMSYS_OVL_MOUT_EN 0xf04
+#define MT8192_DISP_OVL1_2L_MOUT_EN 0xf08
+#define MT8192_DISP_OVL0_2L_MOUT_EN 0xf18
+#define MT8192_DISP_OVL0_MOUT_EN 0xf1c
+#define MT8192_DISP_RDMA0_SEL_IN 0xf2c
+#define MT8192_DISP_RDMA0_SOUT_SEL 0xf30
+#define MT8192_DISP_CCORR0_SOUT_SEL 0xf34
+#define MT8192_DISP_AAL0_SEL_IN 0xf38
+#define MT8192_DISP_DITHER0_MOUT_EN 0xf3c
+#define MT8192_DISP_DSI0_SEL_IN 0xf40
+#define MT8192_DISP_OVL2_2L_MOUT_EN 0xf4c
+
+#define MT8192_DISP_OVL0_GO_BLEND BIT(0)
+#define MT8192_DITHER0_MOUT_IN_DSI0 BIT(0)
+#define MT8192_OVL0_MOUT_EN_DISP_RDMA0 BIT(0)
+#define MT8192_OVL2_2L_MOUT_EN_RDMA4 BIT(0)
+#define MT8192_DISP_OVL0_GO_BG BIT(1)
+#define MT8192_DISP_OVL0_2L_GO_BLEND BIT(2)
+#define MT8192_DISP_OVL0_2L_GO_BG BIT(3)
+#define MT8192_OVL1_2L_MOUT_EN_RDMA1 BIT(4)
+#define MT8192_OVL0_MOUT_EN_OVL0_2L BIT(4)
+#define MT8192_RDMA0_SEL_IN_OVL0_2L 0x3
+#define MT8192_RDMA0_SOUT_COLOR0 0x1
+#define MT8192_CCORR0_SOUT_AAL0 0x1
+#define MT8192_AAL0_SEL_IN_CCORR0 0x1
+#define MT8192_DSI0_SEL_IN_DITHER0 0x1
+
+static const struct mtk_mmsys_routes mmsys_mt8192_routing_table[] = {
+ {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
+ MT8192_DISP_OVL0_2L_MOUT_EN, MT8192_OVL0_MOUT_EN_DISP_RDMA0,
+ MT8192_OVL0_MOUT_EN_DISP_RDMA0
+ }, {
+ DDP_COMPONENT_OVL_2L2, DDP_COMPONENT_RDMA4,
+ MT8192_DISP_OVL2_2L_MOUT_EN, MT8192_OVL2_2L_MOUT_EN_RDMA4,
+ MT8192_OVL2_2L_MOUT_EN_RDMA4
+ }, {
+ DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
+ MT8192_DISP_DITHER0_MOUT_EN, MT8192_DITHER0_MOUT_IN_DSI0,
+ MT8192_DITHER0_MOUT_IN_DSI0
+ }, {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
+ MT8192_DISP_RDMA0_SEL_IN, MT8192_RDMA0_SEL_IN_OVL0_2L,
+ MT8192_RDMA0_SEL_IN_OVL0_2L
+ }, {
+ DDP_COMPONENT_CCORR, DDP_COMPONENT_AAL0,
+ MT8192_DISP_AAL0_SEL_IN, MT8192_AAL0_SEL_IN_CCORR0,
+ MT8192_AAL0_SEL_IN_CCORR0
+ }, {
+ DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
+ MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
+ MT8192_DISP_RDMA0_SOUT_SEL, MT8192_RDMA0_SOUT_COLOR0,
+ MT8192_RDMA0_SOUT_COLOR0
+ }, {
+ DDP_COMPONENT_CCORR, DDP_COMPONENT_AAL0,
+ MT8192_DISP_CCORR0_SOUT_SEL, MT8192_CCORR0_SOUT_AAL0,
+ MT8192_CCORR0_SOUT_AAL0
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
+ MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_GO_BG,
+ MT8192_DISP_OVL0_GO_BG
+ }, {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
+ MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_2L_GO_BLEND,
+ MT8192_DISP_OVL0_2L_GO_BLEND
+ }
+};
+
+#endif /* __SOC_MEDIATEK_MT8192_MMSYS_H */
diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
index a78e88f27b62..1e448f1ffefb 100644
--- a/drivers/soc/mediatek/mtk-mmsys.c
+++ b/drivers/soc/mediatek/mtk-mmsys.c
@@ -4,15 +4,18 @@
* Author: James Liao <jamesjj.liao@mediatek.com>
*/
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
#include "mtk-mmsys.h"
#include "mt8167-mmsys.h"
#include "mt8183-mmsys.h"
+#include "mt8192-mmsys.h"
#include "mt8365-mmsys.h"
static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
@@ -53,6 +56,12 @@ static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
.num_routes = ARRAY_SIZE(mmsys_mt8183_routing_table),
};
+static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
+ .clk_driver = "clk-mt8192-mm",
+ .routes = mmsys_mt8192_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_mt8192_routing_table),
+};
+
static const struct mtk_mmsys_driver_data mt8365_mmsys_driver_data = {
.clk_driver = "clk-mt8365-mm",
.routes = mt8365_mmsys_routing_table,
@@ -62,6 +71,8 @@ static const struct mtk_mmsys_driver_data mt8365_mmsys_driver_data = {
struct mtk_mmsys {
void __iomem *regs;
const struct mtk_mmsys_driver_data *data;
+ spinlock_t lock; /* protects mmsys_sw_rst_b reg */
+ struct reset_controller_dev rcdev;
};
void mtk_mmsys_ddp_connect(struct device *dev,
@@ -101,6 +112,58 @@ void mtk_mmsys_ddp_disconnect(struct device *dev,
}
EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_disconnect);
+static int mtk_mmsys_reset_update(struct reset_controller_dev *rcdev, unsigned long id,
+ bool assert)
+{
+ struct mtk_mmsys *mmsys = container_of(rcdev, struct mtk_mmsys, rcdev);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&mmsys->lock, flags);
+
+ reg = readl_relaxed(mmsys->regs + MMSYS_SW0_RST_B);
+
+ if (assert)
+ reg &= ~BIT(id);
+ else
+ reg |= BIT(id);
+
+ writel_relaxed(reg, mmsys->regs + MMSYS_SW0_RST_B);
+
+ spin_unlock_irqrestore(&mmsys->lock, flags);
+
+ return 0;
+}
+
+static int mtk_mmsys_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ return mtk_mmsys_reset_update(rcdev, id, true);
+}
+
+static int mtk_mmsys_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ return mtk_mmsys_reset_update(rcdev, id, false);
+}
+
+static int mtk_mmsys_reset(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ int ret;
+
+ ret = mtk_mmsys_reset_assert(rcdev, id);
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 1100);
+
+ return mtk_mmsys_reset_deassert(rcdev, id);
+}
+
+static const struct reset_control_ops mtk_mmsys_reset_ops = {
+ .assert = mtk_mmsys_reset_assert,
+ .deassert = mtk_mmsys_reset_deassert,
+ .reset = mtk_mmsys_reset,
+};
+
static int mtk_mmsys_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -120,6 +183,18 @@ static int mtk_mmsys_probe(struct platform_device *pdev)
return ret;
}
+ spin_lock_init(&mmsys->lock);
+
+ mmsys->rcdev.owner = THIS_MODULE;
+ mmsys->rcdev.nr_resets = 32;
+ mmsys->rcdev.ops = &mtk_mmsys_reset_ops;
+ mmsys->rcdev.of_node = pdev->dev.of_node;
+ ret = devm_reset_controller_register(&pdev->dev, &mmsys->rcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't register mmsys reset controller: %d\n", ret);
+ return ret;
+ }
+
mmsys->data = of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, mmsys);
@@ -168,6 +243,10 @@ static const struct of_device_id of_match_mtk_mmsys[] = {
.data = &mt8183_mmsys_driver_data,
},
{
+ .compatible = "mediatek,mt8192-mmsys",
+ .data = &mt8192_mmsys_driver_data,
+ },
+ {
.compatible = "mediatek,mt8365-mmsys",
.data = &mt8365_mmsys_driver_data,
},
diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h
index 9e2b81bd38db..8b0ed05117ea 100644
--- a/drivers/soc/mediatek/mtk-mmsys.h
+++ b/drivers/soc/mediatek/mtk-mmsys.h
@@ -78,6 +78,8 @@
#define DSI_SEL_IN_RDMA 0x1
#define DSI_SEL_IN_MASK 0x1
+#define MMSYS_SW0_RST_B 0x140
+
struct mtk_mmsys_routes {
u32 from_comp;
u32 to_comp;
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index 2e4bcc300576..2ca55bb5a8be 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -39,6 +39,18 @@
#define MT8167_MUTEX_MOD_DISP_DITHER 15
#define MT8167_MUTEX_MOD_DISP_UFOE 16
+#define MT8192_MUTEX_MOD_DISP_OVL0 0
+#define MT8192_MUTEX_MOD_DISP_OVL0_2L 1
+#define MT8192_MUTEX_MOD_DISP_RDMA0 2
+#define MT8192_MUTEX_MOD_DISP_COLOR0 4
+#define MT8192_MUTEX_MOD_DISP_CCORR0 5
+#define MT8192_MUTEX_MOD_DISP_AAL0 6
+#define MT8192_MUTEX_MOD_DISP_GAMMA0 7
+#define MT8192_MUTEX_MOD_DISP_POSTMASK0 8
+#define MT8192_MUTEX_MOD_DISP_DITHER0 9
+#define MT8192_MUTEX_MOD_DISP_OVL2_2L 16
+#define MT8192_MUTEX_MOD_DISP_RDMA4 17
+
#define MT8183_MUTEX_MOD_DISP_RDMA0 0
#define MT8183_MUTEX_MOD_DISP_RDMA1 1
#define MT8183_MUTEX_MOD_DISP_OVL0 9
@@ -214,6 +226,20 @@ static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA0] = MT8183_MUTEX_MOD_DISP_WDMA0,
};
+static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL0] = MT8192_MUTEX_MOD_DISP_AAL0,
+ [DDP_COMPONENT_CCORR] = MT8192_MUTEX_MOD_DISP_CCORR0,
+ [DDP_COMPONENT_COLOR0] = MT8192_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_DITHER] = MT8192_MUTEX_MOD_DISP_DITHER0,
+ [DDP_COMPONENT_GAMMA] = MT8192_MUTEX_MOD_DISP_GAMMA0,
+ [DDP_COMPONENT_POSTMASK0] = MT8192_MUTEX_MOD_DISP_POSTMASK0,
+ [DDP_COMPONENT_OVL0] = MT8192_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL_2L0] = MT8192_MUTEX_MOD_DISP_OVL0_2L,
+ [DDP_COMPONENT_OVL_2L2] = MT8192_MUTEX_MOD_DISP_OVL2_2L,
+ [DDP_COMPONENT_RDMA0] = MT8192_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA4] = MT8192_MUTEX_MOD_DISP_RDMA4,
+};
+
static const unsigned int mt2712_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
@@ -275,6 +301,13 @@ static const struct mtk_mutex_data mt8183_mutex_driver_data = {
.no_clk = true,
};
+static const struct mtk_mutex_data mt8192_mutex_driver_data = {
+ .mutex_mod = mt8192_mutex_mod,
+ .mutex_sof = mt8183_mutex_sof,
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+};
+
struct mtk_mutex *mtk_mutex_get(struct device *dev)
{
struct mtk_mutex_ctx *mtx = dev_get_drvdata(dev);
@@ -507,6 +540,8 @@ static const struct of_device_id mutex_driver_dt_match[] = {
.data = &mt8173_mutex_driver_data},
{ .compatible = "mediatek,mt8183-disp-mutex",
.data = &mt8183_mutex_driver_data},
+ { .compatible = "mediatek,mt8192-disp-mutex",
+ .data = &mt8192_mutex_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mutex_driver_dt_match);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index bfa2ab5772cf..e718b8735444 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -190,6 +190,25 @@ config QCOM_SOCINFO
Say yes here to support the Qualcomm socinfo driver, providing
information about the SoC to user space.
+config QCOM_SPM
+ tristate "Qualcomm Subsystem Power Manager (SPM)"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select QCOM_SCM
+ help
+ Enable the support for the Qualcomm Subsystem Power Manager, used
+ to manage cores, L2 low power modes and to configure the internal
+ Adaptive Voltage Scaler parameters, where supported.
+
+config QCOM_STATS
+ tristate "Qualcomm Technologies, Inc. (QTI) Sleep stats driver"
+ depends on (ARCH_QCOM && DEBUG_FS) || COMPILE_TEST
+ depends on QCOM_SMEM
+ help
+ Qualcomm Technologies, Inc. (QTI) Sleep stats driver to read
+ the shared memory exported by the remote processor related to
+ various SoC level low power modes statistics and export to debugfs
+ interface.
+
config QCOM_WCNSS_CTRL
tristate "Qualcomm WCNSS control driver"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index ad675a6593d0..70d5de69fd7b 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -20,6 +20,8 @@ obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
obj-$(CONFIG_QCOM_SMP2P) += smp2p.o
obj-$(CONFIG_QCOM_SMSM) += smsm.o
obj-$(CONFIG_QCOM_SOCINFO) += socinfo.o
+obj-$(CONFIG_QCOM_SPM) += spm.o
+obj-$(CONFIG_QCOM_STATS) += qcom_stats.o
obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
obj-$(CONFIG_QCOM_APR) += apr.o
obj-$(CONFIG_QCOM_LLCC) += llcc-qcom.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 8a9bfbcd4bb9..82ca12c9328a 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -492,12 +492,14 @@ static int of_apr_add_pd_lookups(struct device *dev)
1, &service_path);
if (ret < 0) {
dev_err(dev, "pdr service path missing: %d\n", ret);
+ of_node_put(node);
return ret;
}
pds = pdr_add_lookup(apr->pdr, service_name, service_path);
if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
dev_err(dev, "pdr add lookup failed: %ld\n", PTR_ERR(pds));
+ of_node_put(node);
return PTR_ERR(pds);
}
}
diff --git a/drivers/soc/qcom/cpr.c b/drivers/soc/qcom/cpr.c
index 4ce8e816154f..1d818a8ba208 100644
--- a/drivers/soc/qcom/cpr.c
+++ b/drivers/soc/qcom/cpr.c
@@ -1614,7 +1614,6 @@ static void cpr_debugfs_init(struct cpr_drv *drv)
static int cpr_probe(struct platform_device *pdev)
{
- struct resource *res;
struct device *dev = &pdev->dev;
struct cpr_drv *drv;
int irq, ret;
@@ -1648,8 +1647,7 @@ static int cpr_probe(struct platform_device *pdev)
if (IS_ERR(drv->tcsr))
return PTR_ERR(drv->tcsr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- drv->base = devm_ioremap_resource(dev, res);
+ drv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(drv->base))
return PTR_ERR(drv->base);
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 15a36dcab990..6bf2f1d1f2c5 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -115,7 +115,7 @@ static const struct llcc_slice_config sc7280_data[] = {
{ LLCC_CMPT, 10, 768, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
{ LLCC_GPUHTW, 11, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
{ LLCC_GPU, 12, 512, 1, 0, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
- { LLCC_MMUHWT, 13, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 1, 0},
+ { LLCC_MMUHWT, 13, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 0, 1, 0},
{ LLCC_MDMPNG, 21, 768, 0, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
{ LLCC_WLHW, 24, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
{ LLCC_MODPE, 29, 64, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
@@ -142,6 +142,16 @@ static const struct llcc_slice_config sdm845_data[] = {
{ LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0 },
};
+static const struct llcc_slice_config sm6350_data[] = {
+ { LLCC_CPUSS, 1, 768, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 1 },
+ { LLCC_MDM, 8, 512, 2, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW, 11, 256, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 512, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMPNG, 21, 768, 0, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_NPU, 23, 768, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_MODPE, 29, 64, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+};
+
static const struct llcc_slice_config sm8150_data[] = {
{ LLCC_CPUSS, 1, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 1 },
{ LLCC_VIDSC0, 2, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
@@ -203,6 +213,11 @@ static const struct qcom_llcc_config sdm845_cfg = {
.need_llcc_cfg = false,
};
+static const struct qcom_llcc_config sm6350_cfg = {
+ .sct_data = sm6350_data,
+ .size = ARRAY_SIZE(sm6350_data),
+};
+
static const struct qcom_llcc_config sm8150_cfg = {
.sct_data = sm8150_data,
.size = ARRAY_SIZE(sm8150_data),
@@ -626,6 +641,7 @@ static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfg },
{ .compatible = "qcom,sc7280-llcc", .data = &sc7280_cfg },
{ .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfg },
+ { .compatible = "qcom,sm6350-llcc", .data = &sm6350_cfg },
{ .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg },
{ .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfg },
{ }
diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
index f1875dc31ae2..d2dacbbaafbd 100644
--- a/drivers/soc/qcom/ocmem.c
+++ b/drivers/soc/qcom/ocmem.c
@@ -300,7 +300,6 @@ static int ocmem_dev_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
unsigned long reg, region_size;
int i, j, ret, num_banks;
- struct resource *res;
struct ocmem *ocmem;
if (!qcom_scm_is_available())
@@ -321,8 +320,7 @@ static int ocmem_dev_probe(struct platform_device *pdev)
return ret;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
- ocmem->mmio = devm_ioremap_resource(&pdev->dev, res);
+ ocmem->mmio = devm_platform_ioremap_resource_byname(pdev, "ctrl");
if (IS_ERR(ocmem->mmio)) {
dev_err(&pdev->dev, "Failed to ioremap ocmem_ctrl resource\n");
return PTR_ERR(ocmem->mmio);
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
index 915d5bc3d46e..fc580a3c4336 100644
--- a/drivers/soc/qcom/pdr_interface.c
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -131,7 +131,7 @@ static int pdr_register_listener(struct pdr_handle *pdr,
return ret;
req.enable = enable;
- strcpy(req.service_path, pds->service_path);
+ strscpy(req.service_path, pds->service_path, sizeof(req.service_path));
ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr,
&txn, SERVREG_REGISTER_LISTENER_REQ,
@@ -257,7 +257,7 @@ static int pdr_send_indack_msg(struct pdr_handle *pdr, struct pdr_service *pds,
return ret;
req.transaction_id = tid;
- strcpy(req.service_path, pds->service_path);
+ strscpy(req.service_path, pds->service_path, sizeof(req.service_path));
ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr,
&txn, SERVREG_SET_ACK_REQ,
@@ -406,7 +406,7 @@ static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
return -ENOMEM;
/* Prepare req message */
- strcpy(req.service_name, pds->service_name);
+ strscpy(req.service_name, pds->service_name, sizeof(req.service_name));
req.domain_offset_valid = true;
req.domain_offset = 0;
@@ -531,8 +531,8 @@ struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
return ERR_PTR(-ENOMEM);
pds->service = SERVREG_NOTIFIER_SERVICE;
- strcpy(pds->service_name, service_name);
- strcpy(pds->service_path, service_path);
+ strscpy(pds->service_name, service_name, sizeof(pds->service_name));
+ strscpy(pds->service_path, service_path, sizeof(pds->service_path));
pds->need_locator_lookup = true;
mutex_lock(&pdr->list_lock);
@@ -587,7 +587,7 @@ int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds)
break;
/* Prepare req message */
- strcpy(req.service_path, pds->service_path);
+ strscpy(req.service_path, pds->service_path, sizeof(req.service_path));
addr = pds->addr;
break;
}
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index 7d649d2cf31e..28a8c0dda66c 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -871,7 +871,6 @@ EXPORT_SYMBOL(geni_icc_disable);
static int geni_se_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct geni_wrapper *wrapper;
int ret;
@@ -880,8 +879,7 @@ static int geni_se_probe(struct platform_device *pdev)
return -ENOMEM;
wrapper->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wrapper->base = devm_ioremap_resource(dev, res);
+ wrapper->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wrapper->base))
return PTR_ERR(wrapper->base);
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index 536c3e4114fb..34acf58bbb0d 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -2,16 +2,16 @@
/*
* Copyright (c) 2019, Linaro Ltd
*/
-#include <dt-bindings/power/qcom-aoss-qmp.h>
#include <linux/clk-provider.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/pm_domain.h>
#include <linux/thermal.h>
#include <linux/slab.h>
+#include <linux/soc/qcom/qcom_aoss.h>
#define QMP_DESC_MAGIC 0x0
#define QMP_DESC_VERSION 0x4
@@ -64,7 +64,6 @@ struct qmp_cooling_device {
* @event: wait_queue for synchronization with the IRQ
* @tx_lock: provides synchronization between multiple callers of qmp_send()
* @qdss_clk: QDSS clock hw struct
- * @pd_data: genpd data
* @cooling_devs: thermal cooling devices
*/
struct qmp {
@@ -82,17 +81,9 @@ struct qmp {
struct mutex tx_lock;
struct clk_hw qdss_clk;
- struct genpd_onecell_data pd_data;
struct qmp_cooling_device *cooling_devs;
};
-struct qmp_pd {
- struct qmp *qmp;
- struct generic_pm_domain pd;
-};
-
-#define to_qmp_pd_resource(res) container_of(res, struct qmp_pd, pd)
-
static void qmp_kick(struct qmp *qmp)
{
mbox_send_message(qmp->mbox_chan, NULL);
@@ -223,11 +214,14 @@ static bool qmp_message_empty(struct qmp *qmp)
*
* Return: 0 on success, negative errno on failure
*/
-static int qmp_send(struct qmp *qmp, const void *data, size_t len)
+int qmp_send(struct qmp *qmp, const void *data, size_t len)
{
long time_left;
int ret;
+ if (WARN_ON(IS_ERR_OR_NULL(qmp) || !data))
+ return -EINVAL;
+
if (WARN_ON(len + sizeof(u32) > qmp->size))
return -EINVAL;
@@ -261,6 +255,7 @@ static int qmp_send(struct qmp *qmp, const void *data, size_t len)
return ret;
}
+EXPORT_SYMBOL(qmp_send);
static int qmp_qdss_clk_prepare(struct clk_hw *hw)
{
@@ -314,95 +309,6 @@ static void qmp_qdss_clk_remove(struct qmp *qmp)
clk_hw_unregister(&qmp->qdss_clk);
}
-static int qmp_pd_power_toggle(struct qmp_pd *res, bool enable)
-{
- char buf[QMP_MSG_LEN] = {};
-
- snprintf(buf, sizeof(buf),
- "{class: image, res: load_state, name: %s, val: %s}",
- res->pd.name, enable ? "on" : "off");
- return qmp_send(res->qmp, buf, sizeof(buf));
-}
-
-static int qmp_pd_power_on(struct generic_pm_domain *domain)
-{
- return qmp_pd_power_toggle(to_qmp_pd_resource(domain), true);
-}
-
-static int qmp_pd_power_off(struct generic_pm_domain *domain)
-{
- return qmp_pd_power_toggle(to_qmp_pd_resource(domain), false);
-}
-
-static const char * const sdm845_resources[] = {
- [AOSS_QMP_LS_CDSP] = "cdsp",
- [AOSS_QMP_LS_LPASS] = "adsp",
- [AOSS_QMP_LS_MODEM] = "modem",
- [AOSS_QMP_LS_SLPI] = "slpi",
- [AOSS_QMP_LS_SPSS] = "spss",
- [AOSS_QMP_LS_VENUS] = "venus",
-};
-
-static int qmp_pd_add(struct qmp *qmp)
-{
- struct genpd_onecell_data *data = &qmp->pd_data;
- struct device *dev = qmp->dev;
- struct qmp_pd *res;
- size_t num = ARRAY_SIZE(sdm845_resources);
- int ret;
- int i;
-
- res = devm_kcalloc(dev, num, sizeof(*res), GFP_KERNEL);
- if (!res)
- return -ENOMEM;
-
- data->domains = devm_kcalloc(dev, num, sizeof(*data->domains),
- GFP_KERNEL);
- if (!data->domains)
- return -ENOMEM;
-
- for (i = 0; i < num; i++) {
- res[i].qmp = qmp;
- res[i].pd.name = sdm845_resources[i];
- res[i].pd.power_on = qmp_pd_power_on;
- res[i].pd.power_off = qmp_pd_power_off;
-
- ret = pm_genpd_init(&res[i].pd, NULL, true);
- if (ret < 0) {
- dev_err(dev, "failed to init genpd\n");
- goto unroll_genpds;
- }
-
- data->domains[i] = &res[i].pd;
- }
-
- data->num_domains = i;
-
- ret = of_genpd_add_provider_onecell(dev->of_node, data);
- if (ret < 0)
- goto unroll_genpds;
-
- return 0;
-
-unroll_genpds:
- for (i--; i >= 0; i--)
- pm_genpd_remove(data->domains[i]);
-
- return ret;
-}
-
-static void qmp_pd_remove(struct qmp *qmp)
-{
- struct genpd_onecell_data *data = &qmp->pd_data;
- struct device *dev = qmp->dev;
- int i;
-
- of_genpd_del_provider(dev->of_node);
-
- for (i = 0; i < data->num_domains; i++)
- pm_genpd_remove(data->domains[i]);
-}
-
static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
@@ -519,9 +425,53 @@ static void qmp_cooling_devices_remove(struct qmp *qmp)
thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev);
}
+/**
+ * qmp_get() - get a qmp handle from a device
+ * @dev: client device pointer
+ *
+ * Return: handle to qmp device on success, ERR_PTR() on failure
+ */
+struct qmp *qmp_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct qmp *qmp;
+
+ if (!dev || !dev->of_node)
+ return ERR_PTR(-EINVAL);
+
+ np = of_parse_phandle(dev->of_node, "qcom,qmp", 0);
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!pdev)
+ return ERR_PTR(-EINVAL);
+
+ qmp = platform_get_drvdata(pdev);
+
+ return qmp ? qmp : ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(qmp_get);
+
+/**
+ * qmp_put() - release a qmp handle
+ * @qmp: qmp handle obtained from qmp_get()
+ */
+void qmp_put(struct qmp *qmp)
+{
+ /*
+ * Match get_device() inside of_find_device_by_node() in
+ * qmp_get()
+ */
+ if (!IS_ERR_OR_NULL(qmp))
+ put_device(qmp->dev);
+}
+EXPORT_SYMBOL(qmp_put);
+
static int qmp_probe(struct platform_device *pdev)
{
- struct resource *res;
struct qmp *qmp;
int irq;
int ret;
@@ -534,8 +484,7 @@ static int qmp_probe(struct platform_device *pdev)
init_waitqueue_head(&qmp->event);
mutex_init(&qmp->tx_lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- qmp->msgram = devm_ioremap_resource(&pdev->dev, res);
+ qmp->msgram = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(qmp->msgram))
return PTR_ERR(qmp->msgram);
@@ -563,10 +512,6 @@ static int qmp_probe(struct platform_device *pdev)
if (ret)
goto err_close_qmp;
- ret = qmp_pd_add(qmp);
- if (ret)
- goto err_remove_qdss_clk;
-
ret = qmp_cooling_devices_register(qmp);
if (ret)
dev_err(&pdev->dev, "failed to register aoss cooling devices\n");
@@ -575,8 +520,6 @@ static int qmp_probe(struct platform_device *pdev)
return 0;
-err_remove_qdss_clk:
- qmp_qdss_clk_remove(qmp);
err_close_qmp:
qmp_close(qmp);
err_free_mbox:
@@ -590,7 +533,6 @@ static int qmp_remove(struct platform_device *pdev)
struct qmp *qmp = platform_get_drvdata(pdev);
qmp_qdss_clk_remove(qmp);
- qmp_pd_remove(qmp);
qmp_cooling_devices_remove(qmp);
qmp_close(qmp);
@@ -615,6 +557,7 @@ static struct platform_driver qmp_driver = {
.driver = {
.name = "qcom_aoss_qmp",
.of_match_table = qmp_dt_match,
+ .suppress_bind_attrs = true,
},
.probe = qmp_probe,
.remove = qmp_remove,
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
index 304afc223a58..290bdefbf28a 100644
--- a/drivers/soc/qcom/qcom_gsbi.c
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -127,7 +127,6 @@ static int gsbi_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct device_node *tcsr_node;
const struct of_device_id *match;
- struct resource *res;
void __iomem *base;
struct gsbi_info *gsbi;
int i, ret;
@@ -139,8 +138,7 @@ static int gsbi_probe(struct platform_device *pdev)
if (!gsbi)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/soc/qcom/qcom_stats.c b/drivers/soc/qcom/qcom_stats.c
new file mode 100644
index 000000000000..131d24caabf8
--- /dev/null
+++ b/drivers/soc/qcom/qcom_stats.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2011-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+
+#include <linux/soc/qcom/smem.h>
+#include <clocksource/arm_arch_timer.h>
+
+#define RPM_DYNAMIC_ADDR 0x14
+#define RPM_DYNAMIC_ADDR_MASK 0xFFFF
+
+#define STAT_TYPE_OFFSET 0x0
+#define COUNT_OFFSET 0x4
+#define LAST_ENTERED_AT_OFFSET 0x8
+#define LAST_EXITED_AT_OFFSET 0x10
+#define ACCUMULATED_OFFSET 0x18
+#define CLIENT_VOTES_OFFSET 0x20
+
+struct subsystem_data {
+ const char *name;
+ u32 smem_item;
+ u32 pid;
+};
+
+static const struct subsystem_data subsystems[] = {
+ { "modem", 605, 1 },
+ { "wpss", 605, 13 },
+ { "adsp", 606, 2 },
+ { "cdsp", 607, 5 },
+ { "slpi", 608, 3 },
+ { "gpu", 609, 0 },
+ { "display", 610, 0 },
+ { "adsp_island", 613, 2 },
+ { "slpi_island", 613, 3 },
+};
+
+struct stats_config {
+ size_t stats_offset;
+ size_t num_records;
+ bool appended_stats_avail;
+ bool dynamic_offset;
+ bool subsystem_stats_in_smem;
+};
+
+struct stats_data {
+ bool appended_stats_avail;
+ void __iomem *base;
+};
+
+struct sleep_stats {
+ u32 stat_type;
+ u32 count;
+ u64 last_entered_at;
+ u64 last_exited_at;
+ u64 accumulated;
+};
+
+struct appended_stats {
+ u32 client_votes;
+ u32 reserved[3];
+};
+
+static void qcom_print_stats(struct seq_file *s, const struct sleep_stats *stat)
+{
+ u64 accumulated = stat->accumulated;
+ /*
+ * If a subsystem is in sleep when reading the sleep stats adjust
+ * the accumulated sleep duration to show actual sleep time.
+ */
+ if (stat->last_entered_at > stat->last_exited_at)
+ accumulated += arch_timer_read_counter() - stat->last_entered_at;
+
+ seq_printf(s, "Count: %u\n", stat->count);
+ seq_printf(s, "Last Entered At: %llu\n", stat->last_entered_at);
+ seq_printf(s, "Last Exited At: %llu\n", stat->last_exited_at);
+ seq_printf(s, "Accumulated Duration: %llu\n", accumulated);
+}
+
+static int qcom_subsystem_sleep_stats_show(struct seq_file *s, void *unused)
+{
+ struct subsystem_data *subsystem = s->private;
+ struct sleep_stats *stat;
+
+ /* Items are allocated lazily, so lookup pointer each time */
+ stat = qcom_smem_get(subsystem->pid, subsystem->smem_item, NULL);
+ if (IS_ERR(stat))
+ return -EIO;
+
+ qcom_print_stats(s, stat);
+
+ return 0;
+}
+
+static int qcom_soc_sleep_stats_show(struct seq_file *s, void *unused)
+{
+ struct stats_data *d = s->private;
+ void __iomem *reg = d->base;
+ struct sleep_stats stat;
+
+ memcpy_fromio(&stat, reg, sizeof(stat));
+ qcom_print_stats(s, &stat);
+
+ if (d->appended_stats_avail) {
+ struct appended_stats votes;
+
+ memcpy_fromio(&votes, reg + CLIENT_VOTES_OFFSET, sizeof(votes));
+ seq_printf(s, "Client Votes: %#x\n", votes.client_votes);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(qcom_soc_sleep_stats);
+DEFINE_SHOW_ATTRIBUTE(qcom_subsystem_sleep_stats);
+
+static void qcom_create_soc_sleep_stat_files(struct dentry *root, void __iomem *reg,
+ struct stats_data *d,
+ const struct stats_config *config)
+{
+ char stat_type[sizeof(u32) + 1] = {0};
+ size_t stats_offset = config->stats_offset;
+ u32 offset = 0, type;
+ int i, j;
+
+ /*
+ * On RPM targets, stats offset location is dynamic and changes from target
+ * to target and sometimes from build to build for same target.
+ *
+ * In such cases the dynamic address is present at 0x14 offset from base
+ * address in devicetree. The last 16bits indicates the stats_offset.
+ */
+ if (config->dynamic_offset) {
+ stats_offset = readl(reg + RPM_DYNAMIC_ADDR);
+ stats_offset &= RPM_DYNAMIC_ADDR_MASK;
+ }
+
+ for (i = 0; i < config->num_records; i++) {
+ d[i].base = reg + offset + stats_offset;
+
+ /*
+ * Read the low power mode name and create debugfs file for it.
+ * The names read could be of below,
+ * (may change depending on low power mode supported).
+ * For rpmh-sleep-stats: "aosd", "cxsd" and "ddr".
+ * For rpm-sleep-stats: "vmin" and "vlow".
+ */
+ type = readl(d[i].base);
+ for (j = 0; j < sizeof(u32); j++) {
+ stat_type[j] = type & 0xff;
+ type = type >> 8;
+ }
+ strim(stat_type);
+ debugfs_create_file(stat_type, 0400, root, &d[i],
+ &qcom_soc_sleep_stats_fops);
+
+ offset += sizeof(struct sleep_stats);
+ if (d[i].appended_stats_avail)
+ offset += sizeof(struct appended_stats);
+ }
+}
+
+static void qcom_create_subsystem_stat_files(struct dentry *root,
+ const struct stats_config *config)
+{
+ const struct sleep_stats *stat;
+ int i;
+
+ if (!config->subsystem_stats_in_smem)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(subsystems); i++) {
+ stat = qcom_smem_get(subsystems[i].pid, subsystems[i].smem_item, NULL);
+ if (IS_ERR(stat))
+ continue;
+
+ debugfs_create_file(subsystems[i].name, 0400, root, (void *)&subsystems[i],
+ &qcom_subsystem_sleep_stats_fops);
+ }
+}
+
+static int qcom_stats_probe(struct platform_device *pdev)
+{
+ void __iomem *reg;
+ struct dentry *root;
+ const struct stats_config *config;
+ struct stats_data *d;
+ int i;
+
+ config = device_get_match_data(&pdev->dev);
+ if (!config)
+ return -ENODEV;
+
+ reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(reg))
+ return -ENOMEM;
+
+ d = devm_kcalloc(&pdev->dev, config->num_records,
+ sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ for (i = 0; i < config->num_records; i++)
+ d[i].appended_stats_avail = config->appended_stats_avail;
+
+ root = debugfs_create_dir("qcom_stats", NULL);
+
+ qcom_create_subsystem_stat_files(root, config);
+ qcom_create_soc_sleep_stat_files(root, reg, d, config);
+
+ platform_set_drvdata(pdev, root);
+
+ return 0;
+}
+
+static int qcom_stats_remove(struct platform_device *pdev)
+{
+ struct dentry *root = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(root);
+
+ return 0;
+}
+
+static const struct stats_config rpm_data = {
+ .stats_offset = 0,
+ .num_records = 2,
+ .appended_stats_avail = true,
+ .dynamic_offset = true,
+ .subsystem_stats_in_smem = false,
+};
+
+static const struct stats_config rpmh_data = {
+ .stats_offset = 0x48,
+ .num_records = 3,
+ .appended_stats_avail = false,
+ .dynamic_offset = false,
+ .subsystem_stats_in_smem = true,
+};
+
+static const struct of_device_id qcom_stats_table[] = {
+ { .compatible = "qcom,rpm-stats", .data = &rpm_data },
+ { .compatible = "qcom,rpmh-stats", .data = &rpmh_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_stats_table);
+
+static struct platform_driver qcom_stats = {
+ .probe = qcom_stats_probe,
+ .remove = qcom_stats_remove,
+ .driver = {
+ .name = "qcom_stats",
+ .of_match_table = qcom_stats_table,
+ },
+};
+
+static int __init qcom_stats_init(void)
+{
+ return platform_driver_register(&qcom_stats);
+}
+late_initcall(qcom_stats_init);
+
+static void __exit qcom_stats_exit(void)
+{
+ platform_driver_unregister(&qcom_stats);
+}
+module_exit(qcom_stats_exit)
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. (QTI) Stats driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index e749a2b285d8..3a12a482f6b2 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -910,7 +910,6 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
struct rsc_drv *drv;
- struct resource *res;
char drv_id[10] = {0};
int ret, irq;
u32 solver_config;
@@ -941,8 +940,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
drv->name = dev_name(&pdev->dev);
snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource_byname(pdev, drv_id);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index fa209b479ab3..1118345d8824 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -30,6 +30,7 @@
* @active_only: True if it represents an Active only peer
* @corner: current corner
* @active_corner: current active corner
+ * @enable_corner: lowest non-zero corner
* @level: An array of level (vlvl) to corner (hlvl) mappings
* derived from cmd-db
* @level_count: Number of levels supported by the power domain. max
@@ -47,6 +48,7 @@ struct rpmhpd {
const bool active_only;
unsigned int corner;
unsigned int active_corner;
+ unsigned int enable_corner;
u32 level[RPMH_ARC_MAX_LEVELS];
size_t level_count;
bool enabled;
@@ -147,6 +149,21 @@ static const struct rpmhpd_desc sdx55_desc = {
.num_pds = ARRAY_SIZE(sdx55_rpmhpds),
};
+/* SM6350 RPMH powerdomains */
+static struct rpmhpd *sm6350_rpmhpds[] = {
+ [SM6350_CX] = &sdm845_cx,
+ [SM6350_GFX] = &sdm845_gfx,
+ [SM6350_LCX] = &sdm845_lcx,
+ [SM6350_LMX] = &sdm845_lmx,
+ [SM6350_MSS] = &sdm845_mss,
+ [SM6350_MX] = &sdm845_mx,
+};
+
+static const struct rpmhpd_desc sm6350_desc = {
+ .rpmhpds = sm6350_rpmhpds,
+ .num_pds = ARRAY_SIZE(sm6350_rpmhpds),
+};
+
/* SM8150 RPMH powerdomains */
static struct rpmhpd sm8150_mmcx_ao;
@@ -204,7 +221,7 @@ static const struct rpmhpd_desc sm8250_desc = {
static struct rpmhpd sm8350_mxc_ao;
static struct rpmhpd sm8350_mxc = {
.pd = { .name = "mxc", },
- .peer = &sm8150_mmcx_ao,
+ .peer = &sm8350_mxc_ao,
.res_name = "mxc.lvl",
};
@@ -297,6 +314,7 @@ static const struct of_device_id rpmhpd_match_table[] = {
{ .compatible = "qcom,sc8180x-rpmhpd", .data = &sc8180x_desc },
{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
{ .compatible = "qcom,sdx55-rpmhpd", .data = &sdx55_desc},
+ { .compatible = "qcom,sm6350-rpmhpd", .data = &sm6350_desc },
{ .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
{ .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc },
{ .compatible = "qcom,sm8350-rpmhpd", .data = &sm8350_desc },
@@ -385,13 +403,13 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
static int rpmhpd_power_on(struct generic_pm_domain *domain)
{
struct rpmhpd *pd = domain_to_rpmhpd(domain);
- int ret = 0;
+ unsigned int corner;
+ int ret;
mutex_lock(&rpmhpd_lock);
- if (pd->corner)
- ret = rpmhpd_aggregate_corner(pd, pd->corner);
-
+ corner = max(pd->corner, pd->enable_corner);
+ ret = rpmhpd_aggregate_corner(pd, corner);
if (!ret)
pd->enabled = true;
@@ -436,6 +454,10 @@ static int rpmhpd_set_performance_state(struct generic_pm_domain *domain,
i--;
if (pd->enabled) {
+ /* Ensure that the domain isn't turn off */
+ if (i < pd->enable_corner)
+ i = pd->enable_corner;
+
ret = rpmhpd_aggregate_corner(pd, i);
if (ret)
goto out;
@@ -472,6 +494,10 @@ static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)
for (i = 0; i < rpmhpd->level_count; i++) {
rpmhpd->level[i] = buf[i];
+ /* Remember the first corner with non-zero level */
+ if (!rpmhpd->level[rpmhpd->enable_corner] && rpmhpd->level[i])
+ rpmhpd->enable_corner = i;
+
/*
* The AUX data may be zero padded. These 0 valued entries at
* the end of the map must be ignored.
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index dbf494e92574..4f69fb9b2e0e 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -185,6 +185,29 @@ static const struct rpmpd_desc msm8916_desc = {
.max_state = MAX_CORNER_RPMPD_STATE,
};
+/* msm8953 RPM Power Domains */
+DEFINE_RPMPD_PAIR(msm8953, vddmd, vddmd_ao, SMPA, LEVEL, 1);
+DEFINE_RPMPD_PAIR(msm8953, vddcx, vddcx_ao, SMPA, LEVEL, 2);
+DEFINE_RPMPD_PAIR(msm8953, vddmx, vddmx_ao, SMPA, LEVEL, 7);
+
+DEFINE_RPMPD_VFL(msm8953, vddcx_vfl, SMPA, 2);
+
+static struct rpmpd *msm8953_rpmpds[] = {
+ [MSM8953_VDDMD] = &msm8953_vddmd,
+ [MSM8953_VDDMD_AO] = &msm8953_vddmd_ao,
+ [MSM8953_VDDCX] = &msm8953_vddcx,
+ [MSM8953_VDDCX_AO] = &msm8953_vddcx_ao,
+ [MSM8953_VDDCX_VFL] = &msm8953_vddcx_vfl,
+ [MSM8953_VDDMX] = &msm8953_vddmx,
+ [MSM8953_VDDMX_AO] = &msm8953_vddmx_ao,
+};
+
+static const struct rpmpd_desc msm8953_desc = {
+ .rpmpds = msm8953_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8953_rpmpds),
+ .max_state = RPM_SMD_LEVEL_TURBO,
+};
+
/* msm8976 RPM Power Domains */
DEFINE_RPMPD_PAIR(msm8976, vddcx, vddcx_ao, SMPA, LEVEL, 2);
DEFINE_RPMPD_PAIR(msm8976, vddmx, vddmx_ao, SMPA, LEVEL, 6);
@@ -377,6 +400,7 @@ static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc },
{ .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc },
{ .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc },
+ { .compatible = "qcom,msm8953-rpmpd", .data = &msm8953_desc },
{ .compatible = "qcom,msm8976-rpmpd", .data = &msm8976_desc },
{ .compatible = "qcom,msm8994-rpmpd", .data = &msm8994_desc },
{ .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index dfdd4f20f5fd..30dda1af63c8 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -236,6 +236,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-msm8226" },
{ .compatible = "qcom,rpm-msm8916" },
{ .compatible = "qcom,rpm-msm8936" },
+ { .compatible = "qcom,rpm-msm8953" },
{ .compatible = "qcom,rpm-msm8974" },
{ .compatible = "qcom,rpm-msm8976" },
{ .compatible = "qcom,rpm-msm8994" },
@@ -244,6 +245,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-sdm660" },
{ .compatible = "qcom,rpm-sm6115" },
{ .compatible = "qcom,rpm-sm6125" },
+ { .compatible = "qcom,rpm-qcm2290" },
{ .compatible = "qcom,rpm-qcs404" },
{}
};
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 4fb5aeeb0843..c7e519bfdc8a 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -240,7 +241,7 @@ static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
* @size: size of the memory region
*/
struct smem_region {
- u32 aux_base;
+ phys_addr_t aux_base;
void __iomem *virt_base;
size_t size;
};
@@ -499,7 +500,7 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
for (i = 0; i < smem->num_regions; i++) {
region = &smem->regions[i];
- if (region->aux_base == aux_base || !aux_base) {
+ if ((u32)region->aux_base == aux_base || !aux_base) {
if (size != NULL)
*size = le32_to_cpu(entry->size);
return region->virt_base + le32_to_cpu(entry->offset);
@@ -664,7 +665,7 @@ phys_addr_t qcom_smem_virt_to_phys(void *p)
if (p < region->virt_base + region->size) {
u64 offset = p - region->virt_base;
- return (phys_addr_t)region->aux_base + offset;
+ return region->aux_base + offset;
}
}
@@ -863,12 +864,12 @@ qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
return 0;
}
-static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
- const char *name, int i)
+static int qcom_smem_resolve_mem(struct qcom_smem *smem, const char *name,
+ struct smem_region *region)
{
+ struct device *dev = smem->dev;
struct device_node *np;
struct resource r;
- resource_size_t size;
int ret;
np = of_parse_phandle(dev->of_node, name, 0);
@@ -881,13 +882,9 @@ static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
of_node_put(np);
if (ret)
return ret;
- size = resource_size(&r);
- smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, size);
- if (!smem->regions[i].virt_base)
- return -ENOMEM;
- smem->regions[i].aux_base = (u32)r.start;
- smem->regions[i].size = size;
+ region->aux_base = r.start;
+ region->size = resource_size(&r);
return 0;
}
@@ -895,12 +892,14 @@ static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
static int qcom_smem_probe(struct platform_device *pdev)
{
struct smem_header *header;
+ struct reserved_mem *rmem;
struct qcom_smem *smem;
size_t array_size;
int num_regions;
int hwlock_id;
u32 version;
int ret;
+ int i;
num_regions = 1;
if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
@@ -914,13 +913,35 @@ static int qcom_smem_probe(struct platform_device *pdev)
smem->dev = &pdev->dev;
smem->num_regions = num_regions;
- ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
- if (ret)
- return ret;
+ rmem = of_reserved_mem_lookup(pdev->dev.of_node);
+ if (rmem) {
+ smem->regions[0].aux_base = rmem->base;
+ smem->regions[0].size = rmem->size;
+ } else {
+ /*
+ * Fall back to the memory-region reference, if we're not a
+ * reserved-memory node.
+ */
+ ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]);
+ if (ret)
+ return ret;
+ }
- if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev,
- "qcom,rpm-msg-ram", 1)))
- return ret;
+ if (num_regions > 1) {
+ ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < num_regions; i++) {
+ smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev,
+ smem->regions[i].aux_base,
+ smem->regions[i].size);
+ if (!smem->regions[i].virt_base) {
+ dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base);
+ return -ENOMEM;
+ }
+ }
header = smem->regions[0].virt_base;
if (le32_to_cpu(header->initialized) != 1 ||
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 2df488333be9..4a157240f419 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -14,6 +14,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
@@ -40,8 +41,11 @@
#define SMP2P_MAX_ENTRY_NAME 16
#define SMP2P_FEATURE_SSR_ACK 0x1
+#define SMP2P_FLAGS_RESTART_DONE_BIT 0
+#define SMP2P_FLAGS_RESTART_ACK_BIT 1
#define SMP2P_MAGIC 0x504d5324
+#define SMP2P_ALL_FEATURES SMP2P_FEATURE_SSR_ACK
/**
* struct smp2p_smem_item - in memory communication structure
@@ -135,6 +139,10 @@ struct qcom_smp2p {
unsigned valid_entries;
+ bool ssr_ack_enabled;
+ bool ssr_ack;
+ bool negotiation_done;
+
unsigned local_pid;
unsigned remote_pid;
@@ -162,22 +170,53 @@ static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
}
}
-/**
- * qcom_smp2p_intr() - interrupt handler for incoming notifications
- * @irq: unused
- * @data: smp2p driver context
- *
- * Handle notifications from the remote side to handle newly allocated entries
- * or any changes to the state bits of existing entries.
- */
-static irqreturn_t qcom_smp2p_intr(int irq, void *data)
+static bool qcom_smp2p_check_ssr(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *in = smp2p->in;
+ bool restart;
+
+ if (!smp2p->ssr_ack_enabled)
+ return false;
+
+ restart = in->flags & BIT(SMP2P_FLAGS_RESTART_DONE_BIT);
+
+ return restart != smp2p->ssr_ack;
+}
+
+static void qcom_smp2p_do_ssr_ack(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *out = smp2p->out;
+ u32 val;
+
+ smp2p->ssr_ack = !smp2p->ssr_ack;
+
+ val = out->flags & ~BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
+ if (smp2p->ssr_ack)
+ val |= BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
+ out->flags = val;
+
+ qcom_smp2p_kick(smp2p);
+}
+
+static void qcom_smp2p_negotiate(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *out = smp2p->out;
+ struct smp2p_smem_item *in = smp2p->in;
+
+ if (in->version == out->version) {
+ out->features &= in->features;
+
+ if (out->features & SMP2P_FEATURE_SSR_ACK)
+ smp2p->ssr_ack_enabled = true;
+
+ smp2p->negotiation_done = true;
+ }
+}
+
+static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p)
{
struct smp2p_smem_item *in;
struct smp2p_entry *entry;
- struct qcom_smp2p *smp2p = data;
- unsigned smem_id = smp2p->smem_items[SMP2P_INBOUND];
- unsigned pid = smp2p->remote_pid;
- size_t size;
int irq_pin;
u32 status;
char buf[SMP2P_MAX_ENTRY_NAME];
@@ -186,18 +225,6 @@ static irqreturn_t qcom_smp2p_intr(int irq, void *data)
in = smp2p->in;
- /* Acquire smem item, if not already found */
- if (!in) {
- in = qcom_smem_get(pid, smem_id, &size);
- if (IS_ERR(in)) {
- dev_err(smp2p->dev,
- "Unable to acquire remote smp2p item\n");
- return IRQ_HANDLED;
- }
-
- smp2p->in = in;
- }
-
/* Match newly created entries */
for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
list_for_each_entry(entry, &smp2p->inbound, node) {
@@ -236,7 +263,51 @@ static irqreturn_t qcom_smp2p_intr(int irq, void *data)
}
}
}
+}
+
+/**
+ * qcom_smp2p_intr() - interrupt handler for incoming notifications
+ * @irq: unused
+ * @data: smp2p driver context
+ *
+ * Handle notifications from the remote side to handle newly allocated entries
+ * or any changes to the state bits of existing entries.
+ */
+static irqreturn_t qcom_smp2p_intr(int irq, void *data)
+{
+ struct smp2p_smem_item *in;
+ struct qcom_smp2p *smp2p = data;
+ unsigned int smem_id = smp2p->smem_items[SMP2P_INBOUND];
+ unsigned int pid = smp2p->remote_pid;
+ bool ack_restart;
+ size_t size;
+
+ in = smp2p->in;
+
+ /* Acquire smem item, if not already found */
+ if (!in) {
+ in = qcom_smem_get(pid, smem_id, &size);
+ if (IS_ERR(in)) {
+ dev_err(smp2p->dev,
+ "Unable to acquire remote smp2p item\n");
+ goto out;
+ }
+
+ smp2p->in = in;
+ }
+
+ if (!smp2p->negotiation_done)
+ qcom_smp2p_negotiate(smp2p);
+
+ if (smp2p->negotiation_done) {
+ ack_restart = qcom_smp2p_check_ssr(smp2p);
+ qcom_smp2p_notify_in(smp2p);
+
+ if (ack_restart)
+ qcom_smp2p_do_ssr_ack(smp2p);
+ }
+out:
return IRQ_HANDLED;
}
@@ -392,6 +463,7 @@ static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p)
out->remote_pid = smp2p->remote_pid;
out->total_entries = SMP2P_MAX_ENTRY;
out->valid_entries = 0;
+ out->features = SMP2P_ALL_FEATURES;
/*
* Make sure the rest of the header is written before we validate the
@@ -501,6 +573,7 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL);
if (!entry) {
ret = -ENOMEM;
+ of_node_put(node);
goto unwind_interfaces;
}
@@ -508,19 +581,25 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
spin_lock_init(&entry->lock);
ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(node);
goto unwind_interfaces;
+ }
if (of_property_read_bool(node, "interrupt-controller")) {
ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(node);
goto unwind_interfaces;
+ }
list_add(&entry->node, &smp2p->inbound);
} else {
ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(node);
goto unwind_interfaces;
+ }
list_add(&entry->node, &smp2p->outbound);
}
@@ -538,9 +617,26 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
goto unwind_interfaces;
}
+ /*
+ * Treat smp2p interrupt as wakeup source, but keep it disabled
+ * by default. User space can decide enabling it depending on its
+ * use cases. For example if remoteproc crashes and device wants
+ * to handle it immediatedly (e.g. to not miss phone calls) it can
+ * enable wakeup source from user space, while other devices which
+ * do not have proper autosleep feature may want to handle it with
+ * other wakeup events (e.g. Power button) instead waking up immediately.
+ */
+ device_set_wakeup_capable(&pdev->dev, true);
+
+ ret = dev_pm_set_wake_irq(&pdev->dev, irq);
+ if (ret)
+ goto set_wake_irq_fail;
return 0;
+set_wake_irq_fail:
+ dev_pm_clear_wake_irq(&pdev->dev);
+
unwind_interfaces:
list_for_each_entry(entry, &smp2p->inbound, node)
irq_domain_remove(entry->domain);
@@ -565,6 +661,8 @@ static int qcom_smp2p_remove(struct platform_device *pdev)
struct qcom_smp2p *smp2p = platform_get_drvdata(pdev);
struct smp2p_entry *entry;
+ dev_pm_clear_wake_irq(&pdev->dev);
+
list_for_each_entry(entry, &smp2p->inbound, node)
irq_domain_remove(entry->domain);
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 52e581167115..9a0eb59405e8 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -87,8 +87,8 @@ static const char *const pmic_models[] = {
[15] = "PM8901",
[16] = "PM8950/PM8027",
[17] = "PMI8950/ISL9519",
- [18] = "PM8921",
- [19] = "PM8018",
+ [18] = "PMK8001/PM8921",
+ [19] = "PMI8996/PM8018",
[20] = "PM8998/PM8015",
[21] = "PMI8998/PM8014",
[22] = "PM8821",
@@ -102,6 +102,8 @@ static const char *const pmic_models[] = {
[32] = "PM8150B",
[33] = "PMK8002",
[36] = "PM8009",
+ [38] = "PM8150C",
+ [41] = "SMB2351",
};
#endif /* CONFIG_DEBUG_FS */
@@ -281,19 +283,31 @@ static const struct soc_id soc_id[] = {
{ 319, "APQ8098" },
{ 321, "SDM845" },
{ 322, "MDM9206" },
+ { 323, "IPQ8074" },
{ 324, "SDA660" },
{ 325, "SDM658" },
{ 326, "SDA658" },
{ 327, "SDA630" },
{ 338, "SDM450" },
{ 341, "SDA845" },
+ { 342, "IPQ8072" },
+ { 343, "IPQ8076" },
+ { 344, "IPQ8078" },
{ 345, "SDM636" },
{ 346, "SDA636" },
{ 349, "SDM632" },
{ 350, "SDA632" },
{ 351, "SDA450" },
{ 356, "SM8250" },
+ { 375, "IPQ8070" },
+ { 376, "IPQ8071" },
+ { 389, "IPQ8072A" },
+ { 390, "IPQ8074A" },
+ { 391, "IPQ8076A" },
+ { 392, "IPQ8078A" },
{ 394, "SM6125" },
+ { 395, "IPQ8070A" },
+ { 396, "IPQ8071A" },
{ 402, "IPQ6018" },
{ 403, "IPQ6028" },
{ 421, "IPQ6000" },
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
new file mode 100644
index 000000000000..f831420b7fd4
--- /dev/null
+++ b/drivers/soc/qcom/spm.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2015, Linaro Ltd.
+ *
+ * SAW power controller driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/spm.h>
+
+#define SPM_CTL_INDEX 0x7f
+#define SPM_CTL_INDEX_SHIFT 4
+#define SPM_CTL_EN BIT(0)
+
+enum spm_reg {
+ SPM_REG_CFG,
+ SPM_REG_SPM_CTL,
+ SPM_REG_DLY,
+ SPM_REG_PMIC_DLY,
+ SPM_REG_PMIC_DATA_0,
+ SPM_REG_PMIC_DATA_1,
+ SPM_REG_VCTL,
+ SPM_REG_SEQ_ENTRY,
+ SPM_REG_SPM_STS,
+ SPM_REG_PMIC_STS,
+ SPM_REG_AVS_CTL,
+ SPM_REG_AVS_LIMIT,
+ SPM_REG_NR,
+};
+
+static const u16 spm_reg_offset_v4_1[SPM_REG_NR] = {
+ [SPM_REG_AVS_CTL] = 0x904,
+ [SPM_REG_AVS_LIMIT] = 0x908,
+};
+
+static const struct spm_reg_data spm_reg_660_gold_l2 = {
+ .reg_offset = spm_reg_offset_v4_1,
+ .avs_ctl = 0x1010031,
+ .avs_limit = 0x4580458,
+};
+
+static const struct spm_reg_data spm_reg_660_silver_l2 = {
+ .reg_offset = spm_reg_offset_v4_1,
+ .avs_ctl = 0x101c031,
+ .avs_limit = 0x4580458,
+};
+
+static const struct spm_reg_data spm_reg_8998_gold_l2 = {
+ .reg_offset = spm_reg_offset_v4_1,
+ .avs_ctl = 0x1010031,
+ .avs_limit = 0x4700470,
+};
+
+static const struct spm_reg_data spm_reg_8998_silver_l2 = {
+ .reg_offset = spm_reg_offset_v4_1,
+ .avs_ctl = 0x1010031,
+ .avs_limit = 0x4200420,
+};
+
+static const u16 spm_reg_offset_v3_0[SPM_REG_NR] = {
+ [SPM_REG_CFG] = 0x08,
+ [SPM_REG_SPM_CTL] = 0x30,
+ [SPM_REG_DLY] = 0x34,
+ [SPM_REG_SEQ_ENTRY] = 0x400,
+};
+
+/* SPM register data for 8916 */
+static const struct spm_reg_data spm_reg_8916_cpu = {
+ .reg_offset = spm_reg_offset_v3_0,
+ .spm_cfg = 0x1,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90,
+ 0x5B, 0x60, 0x03, 0x60, 0x3B, 0x76, 0x76, 0x0B, 0x94, 0x5B,
+ 0x80, 0x10, 0x26, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 5,
+};
+
+static const u16 spm_reg_offset_v2_1[SPM_REG_NR] = {
+ [SPM_REG_CFG] = 0x08,
+ [SPM_REG_SPM_CTL] = 0x30,
+ [SPM_REG_DLY] = 0x34,
+ [SPM_REG_SEQ_ENTRY] = 0x80,
+};
+
+/* SPM register data for 8974, 8084 */
+static const struct spm_reg_data spm_reg_8974_8084_cpu = {
+ .reg_offset = spm_reg_offset_v2_1,
+ .spm_cfg = 0x1,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x03, 0x0B, 0x0F, 0x00, 0x20, 0x80, 0x10, 0xE8, 0x5B, 0x03,
+ 0x3B, 0xE8, 0x5B, 0x82, 0x10, 0x0B, 0x30, 0x06, 0x26, 0x30,
+ 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 3,
+};
+
+/* SPM register data for 8226 */
+static const struct spm_reg_data spm_reg_8226_cpu = {
+ .reg_offset = spm_reg_offset_v2_1,
+ .spm_cfg = 0x0,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90,
+ 0x5B, 0x60, 0x03, 0x60, 0x3B, 0x76, 0x76, 0x0B, 0x94, 0x5B,
+ 0x80, 0x10, 0x26, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 5,
+};
+
+static const u16 spm_reg_offset_v1_1[SPM_REG_NR] = {
+ [SPM_REG_CFG] = 0x08,
+ [SPM_REG_SPM_CTL] = 0x20,
+ [SPM_REG_PMIC_DLY] = 0x24,
+ [SPM_REG_PMIC_DATA_0] = 0x28,
+ [SPM_REG_PMIC_DATA_1] = 0x2C,
+ [SPM_REG_SEQ_ENTRY] = 0x80,
+};
+
+/* SPM register data for 8064 */
+static const struct spm_reg_data spm_reg_8064_cpu = {
+ .reg_offset = spm_reg_offset_v1_1,
+ .spm_cfg = 0x1F,
+ .pmic_dly = 0x02020004,
+ .pmic_data[0] = 0x0084009C,
+ .pmic_data[1] = 0x00A4001C,
+ .seq = { 0x03, 0x0F, 0x00, 0x24, 0x54, 0x10, 0x09, 0x03, 0x01,
+ 0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 2,
+};
+
+static inline void spm_register_write(struct spm_driver_data *drv,
+ enum spm_reg reg, u32 val)
+{
+ if (drv->reg_data->reg_offset[reg])
+ writel_relaxed(val, drv->reg_base +
+ drv->reg_data->reg_offset[reg]);
+}
+
+/* Ensure a guaranteed write, before return */
+static inline void spm_register_write_sync(struct spm_driver_data *drv,
+ enum spm_reg reg, u32 val)
+{
+ u32 ret;
+
+ if (!drv->reg_data->reg_offset[reg])
+ return;
+
+ do {
+ writel_relaxed(val, drv->reg_base +
+ drv->reg_data->reg_offset[reg]);
+ ret = readl_relaxed(drv->reg_base +
+ drv->reg_data->reg_offset[reg]);
+ if (ret == val)
+ break;
+ cpu_relax();
+ } while (1);
+}
+
+static inline u32 spm_register_read(struct spm_driver_data *drv,
+ enum spm_reg reg)
+{
+ return readl_relaxed(drv->reg_base + drv->reg_data->reg_offset[reg]);
+}
+
+void spm_set_low_power_mode(struct spm_driver_data *drv,
+ enum pm_sleep_mode mode)
+{
+ u32 start_index;
+ u32 ctl_val;
+
+ start_index = drv->reg_data->start_index[mode];
+
+ ctl_val = spm_register_read(drv, SPM_REG_SPM_CTL);
+ ctl_val &= ~(SPM_CTL_INDEX << SPM_CTL_INDEX_SHIFT);
+ ctl_val |= start_index << SPM_CTL_INDEX_SHIFT;
+ ctl_val |= SPM_CTL_EN;
+ spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val);
+}
+
+static const struct of_device_id spm_match_table[] = {
+ { .compatible = "qcom,sdm660-gold-saw2-v4.1-l2",
+ .data = &spm_reg_660_gold_l2 },
+ { .compatible = "qcom,sdm660-silver-saw2-v4.1-l2",
+ .data = &spm_reg_660_silver_l2 },
+ { .compatible = "qcom,msm8226-saw2-v2.1-cpu",
+ .data = &spm_reg_8226_cpu },
+ { .compatible = "qcom,msm8916-saw2-v3.0-cpu",
+ .data = &spm_reg_8916_cpu },
+ { .compatible = "qcom,msm8974-saw2-v2.1-cpu",
+ .data = &spm_reg_8974_8084_cpu },
+ { .compatible = "qcom,msm8998-gold-saw2-v4.1-l2",
+ .data = &spm_reg_8998_gold_l2 },
+ { .compatible = "qcom,msm8998-silver-saw2-v4.1-l2",
+ .data = &spm_reg_8998_silver_l2 },
+ { .compatible = "qcom,apq8084-saw2-v2.1-cpu",
+ .data = &spm_reg_8974_8084_cpu },
+ { .compatible = "qcom,apq8064-saw2-v1.1-cpu",
+ .data = &spm_reg_8064_cpu },
+ { },
+};
+MODULE_DEVICE_TABLE(of, spm_match_table);
+
+static int spm_dev_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match_id;
+ struct spm_driver_data *drv;
+ struct resource *res;
+ void __iomem *addr;
+
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(drv->reg_base))
+ return PTR_ERR(drv->reg_base);
+
+ match_id = of_match_node(spm_match_table, pdev->dev.of_node);
+ if (!match_id)
+ return -ENODEV;
+
+ drv->reg_data = match_id->data;
+ platform_set_drvdata(pdev, drv);
+
+ /* Write the SPM sequences first.. */
+ addr = drv->reg_base + drv->reg_data->reg_offset[SPM_REG_SEQ_ENTRY];
+ __iowrite32_copy(addr, drv->reg_data->seq,
+ ARRAY_SIZE(drv->reg_data->seq) / 4);
+
+ /*
+ * ..and then the control registers.
+ * On some SoC if the control registers are written first and if the
+ * CPU was held in reset, the reset signal could trigger the SPM state
+ * machine, before the sequences are completely written.
+ */
+ spm_register_write(drv, SPM_REG_AVS_CTL, drv->reg_data->avs_ctl);
+ spm_register_write(drv, SPM_REG_AVS_LIMIT, drv->reg_data->avs_limit);
+ spm_register_write(drv, SPM_REG_CFG, drv->reg_data->spm_cfg);
+ spm_register_write(drv, SPM_REG_DLY, drv->reg_data->spm_dly);
+ spm_register_write(drv, SPM_REG_PMIC_DLY, drv->reg_data->pmic_dly);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_0,
+ drv->reg_data->pmic_data[0]);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_1,
+ drv->reg_data->pmic_data[1]);
+
+ /* Set up Standby as the default low power mode */
+ if (drv->reg_data->reg_offset[SPM_REG_SPM_CTL])
+ spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
+
+ return 0;
+}
+
+static struct platform_driver spm_driver = {
+ .probe = spm_dev_probe,
+ .driver = {
+ .name = "qcom_spm",
+ .of_match_table = spm_match_table,
+ },
+};
+
+static int __init qcom_spm_init(void)
+{
+ return platform_driver_register(&spm_driver);
+}
+arch_initcall(qcom_spm_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 07e0ecd64319..ce16ef5c939c 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -186,6 +186,7 @@ config ARCH_R8A77995
select SYSC_R8A77995
help
This enables support for the Renesas R-Car D3 SoC.
+ This includes different gradings like R-Car D3e.
config ARCH_R8A77990
bool "ARM64 Platform support for R-Car E3"
@@ -193,6 +194,7 @@ config ARCH_R8A77990
select SYSC_R8A77990
help
This enables support for the Renesas R-Car E3 SoC.
+ This includes different gradings like R-Car E3e.
config ARCH_R8A77950
bool "ARM64 Platform support for R-Car H3 ES1.x"
@@ -208,7 +210,7 @@ config ARCH_R8A77951
help
This enables support for the Renesas R-Car H3 SoC (revisions 2.0 and
later).
- This includes different gradings like R-Car H3e-2G.
+ This includes different gradings like R-Car H3e, H3e-2G, and H3Ne.
config ARCH_R8A77965
bool "ARM64 Platform support for R-Car M3-N"
@@ -216,6 +218,7 @@ config ARCH_R8A77965
select SYSC_R8A77965
help
This enables support for the Renesas R-Car M3-N SoC.
+ This includes different gradings like R-Car M3Ne and M3Ne-2G.
config ARCH_R8A77960
bool "ARM64 Platform support for R-Car M3-W"
@@ -230,7 +233,7 @@ config ARCH_R8A77961
select SYSC_R8A77961
help
This enables support for the Renesas R-Car M3-W+ SoC.
- This includes different gradings like R-Car M3e-2G.
+ This includes different gradings like R-Car M3e and M3e-2G.
config ARCH_R8A77980
bool "ARM64 Platform support for R-Car V3H"
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index dab9f5a0aad0..7961b0be1850 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -285,17 +285,22 @@ static const struct of_device_id renesas_socs[] __initconst = {
{ .compatible = "renesas,r8a7795", .data = &soc_rcar_h3 },
#endif
#ifdef CONFIG_ARCH_R8A77951
+ { .compatible = "renesas,r8a779m0", .data = &soc_rcar_h3 },
{ .compatible = "renesas,r8a779m1", .data = &soc_rcar_h3 },
+ { .compatible = "renesas,r8a779m8", .data = &soc_rcar_h3 },
#endif
#ifdef CONFIG_ARCH_R8A77960
{ .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w },
#endif
#ifdef CONFIG_ARCH_R8A77961
{ .compatible = "renesas,r8a77961", .data = &soc_rcar_m3_w },
+ { .compatible = "renesas,r8a779m2", .data = &soc_rcar_m3_w },
{ .compatible = "renesas,r8a779m3", .data = &soc_rcar_m3_w },
#endif
#ifdef CONFIG_ARCH_R8A77965
{ .compatible = "renesas,r8a77965", .data = &soc_rcar_m3_n },
+ { .compatible = "renesas,r8a779m4", .data = &soc_rcar_m3_n },
+ { .compatible = "renesas,r8a779m5", .data = &soc_rcar_m3_n },
#endif
#ifdef CONFIG_ARCH_R8A77970
{ .compatible = "renesas,r8a77970", .data = &soc_rcar_v3m },
@@ -305,9 +310,11 @@ static const struct of_device_id renesas_socs[] __initconst = {
#endif
#ifdef CONFIG_ARCH_R8A77990
{ .compatible = "renesas,r8a77990", .data = &soc_rcar_e3 },
+ { .compatible = "renesas,r8a779m6", .data = &soc_rcar_e3 },
#endif
#ifdef CONFIG_ARCH_R8A77995
{ .compatible = "renesas,r8a77995", .data = &soc_rcar_d3 },
+ { .compatible = "renesas,r8a779m7", .data = &soc_rcar_d3 },
#endif
#ifdef CONFIG_ARCH_R8A779A0
{ .compatible = "renesas,r8a779a0", .data = &soc_rcar_v3u },
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index 5745d7e5908e..e2cedef1e8d1 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -13,18 +13,21 @@ config EXYNOS_ASV_ARM
depends on EXYNOS_CHIPID
config EXYNOS_CHIPID
- bool "Exynos ChipID controller and ASV driver" if COMPILE_TEST
+ tristate "Exynos ChipID controller and ASV driver"
depends on ARCH_EXYNOS || COMPILE_TEST
+ default ARCH_EXYNOS
select EXYNOS_ASV_ARM if ARM && ARCH_EXYNOS
select MFD_SYSCON
select SOC_BUS
help
Support for Samsung Exynos SoC ChipID and Adaptive Supply Voltage.
+ This driver can also be built as module (exynos_chipid).
config EXYNOS_PMU
bool "Exynos PMU controller driver" if COMPILE_TEST
depends on ARCH_EXYNOS || ((ARM || ARM64) && COMPILE_TEST)
select EXYNOS_PMU_ARM_DRIVERS if ARM && ARCH_EXYNOS
+ select MFD_CORE
# There is no need to enable these drivers for ARMv8
config EXYNOS_PMU_ARM_DRIVERS
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
index 0c523a8de4eb..2ae4bea804cf 100644
--- a/drivers/soc/samsung/Makefile
+++ b/drivers/soc/samsung/Makefile
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_EXYNOS_ASV_ARM) += exynos5422-asv.o
+obj-$(CONFIG_EXYNOS_CHIPID) += exynos_chipid.o
+exynos_chipid-y += exynos-chipid.o exynos-asv.o
-obj-$(CONFIG_EXYNOS_CHIPID) += exynos-chipid.o exynos-asv.o
obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o
obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index 5c1d0f97f766..a28053ec7e6a 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -15,7 +15,9 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -24,6 +26,17 @@
#include "exynos-asv.h"
+struct exynos_chipid_variant {
+ unsigned int rev_reg; /* revision register offset */
+ unsigned int main_rev_shift; /* main revision offset in rev_reg */
+ unsigned int sub_rev_shift; /* sub revision offset in rev_reg */
+};
+
+struct exynos_chipid_info {
+ u32 product_id;
+ u32 revision;
+};
+
static const struct exynos_soc_id {
const char *name;
unsigned int id;
@@ -42,6 +55,8 @@ static const struct exynos_soc_id {
{ "EXYNOS5440", 0xE5440000 },
{ "EXYNOS5800", 0xE5422000 },
{ "EXYNOS7420", 0xE7420000 },
+ { "EXYNOS850", 0xE3830000 },
+ { "EXYNOSAUTOV9", 0xAAA80000 },
};
static const char *product_id_to_soc_id(unsigned int product_id)
@@ -49,31 +64,57 @@ static const char *product_id_to_soc_id(unsigned int product_id)
int i;
for (i = 0; i < ARRAY_SIZE(soc_ids); i++)
- if ((product_id & EXYNOS_MASK) == soc_ids[i].id)
+ if (product_id == soc_ids[i].id)
return soc_ids[i].name;
return NULL;
}
+static int exynos_chipid_get_chipid_info(struct regmap *regmap,
+ const struct exynos_chipid_variant *data,
+ struct exynos_chipid_info *soc_info)
+{
+ int ret;
+ unsigned int val, main_rev, sub_rev;
+
+ ret = regmap_read(regmap, EXYNOS_CHIPID_REG_PRO_ID, &val);
+ if (ret < 0)
+ return ret;
+ soc_info->product_id = val & EXYNOS_MASK;
+
+ if (data->rev_reg != EXYNOS_CHIPID_REG_PRO_ID) {
+ ret = regmap_read(regmap, data->rev_reg, &val);
+ if (ret < 0)
+ return ret;
+ }
+ main_rev = (val >> data->main_rev_shift) & EXYNOS_REV_PART_MASK;
+ sub_rev = (val >> data->sub_rev_shift) & EXYNOS_REV_PART_MASK;
+ soc_info->revision = (main_rev << EXYNOS_REV_PART_SHIFT) | sub_rev;
+
+ return 0;
+}
+
static int exynos_chipid_probe(struct platform_device *pdev)
{
+ const struct exynos_chipid_variant *drv_data;
+ struct exynos_chipid_info soc_info;
struct soc_device_attribute *soc_dev_attr;
struct soc_device *soc_dev;
struct device_node *root;
struct regmap *regmap;
- u32 product_id;
- u32 revision;
int ret;
+ drv_data = of_device_get_match_data(&pdev->dev);
+ if (!drv_data)
+ return -EINVAL;
+
regmap = device_node_to_regmap(pdev->dev.of_node);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- ret = regmap_read(regmap, EXYNOS_CHIPID_REG_PRO_ID, &product_id);
+ ret = exynos_chipid_get_chipid_info(regmap, drv_data, &soc_info);
if (ret < 0)
return ret;
- revision = product_id & EXYNOS_REV_MASK;
-
soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr),
GFP_KERNEL);
if (!soc_dev_attr)
@@ -86,8 +127,8 @@ static int exynos_chipid_probe(struct platform_device *pdev)
of_node_put(root);
soc_dev_attr->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "%x", revision);
- soc_dev_attr->soc_id = product_id_to_soc_id(product_id);
+ "%x", soc_info.revision);
+ soc_dev_attr->soc_id = product_id_to_soc_id(soc_info.product_id);
if (!soc_dev_attr->soc_id) {
pr_err("Unknown SoC\n");
return -ENODEV;
@@ -104,9 +145,8 @@ static int exynos_chipid_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, soc_dev);
- dev_info(soc_device_to_device(soc_dev),
- "Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n",
- soc_dev_attr->soc_id, product_id, revision);
+ dev_info(&pdev->dev, "Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n",
+ soc_dev_attr->soc_id, soc_info.product_id, soc_info.revision);
return 0;
@@ -125,10 +165,29 @@ static int exynos_chipid_remove(struct platform_device *pdev)
return 0;
}
+static const struct exynos_chipid_variant exynos4210_chipid_drv_data = {
+ .rev_reg = 0x0,
+ .main_rev_shift = 4,
+ .sub_rev_shift = 0,
+};
+
+static const struct exynos_chipid_variant exynos850_chipid_drv_data = {
+ .rev_reg = 0x10,
+ .main_rev_shift = 20,
+ .sub_rev_shift = 16,
+};
+
static const struct of_device_id exynos_chipid_of_device_ids[] = {
- { .compatible = "samsung,exynos4210-chipid" },
- {}
+ {
+ .compatible = "samsung,exynos4210-chipid",
+ .data = &exynos4210_chipid_drv_data,
+ }, {
+ .compatible = "samsung,exynos850-chipid",
+ .data = &exynos850_chipid_drv_data,
+ },
+ { }
};
+MODULE_DEVICE_TABLE(of, exynos_chipid_of_device_ids);
static struct platform_driver exynos_chipid_driver = {
.driver = {
@@ -138,4 +197,11 @@ static struct platform_driver exynos_chipid_driver = {
.probe = exynos_chipid_probe,
.remove = exynos_chipid_remove,
};
-builtin_platform_driver(exynos_chipid_driver);
+module_platform_driver(exynos_chipid_driver);
+
+MODULE_DESCRIPTION("Samsung Exynos ChipID controller and ASV driver");
+MODULE_AUTHOR("Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>");
+MODULE_AUTHOR("Pankaj Dubey <pankaj.dubey@samsung.com>");
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/samsung/exynos5422-asv.c b/drivers/soc/samsung/exynos5422-asv.c
index ca409a976e34..475ae5276529 100644
--- a/drivers/soc/samsung/exynos5422-asv.c
+++ b/drivers/soc/samsung/exynos5422-asv.c
@@ -503,3 +503,4 @@ int exynos5422_asv_init(struct exynos_asv *asv)
return 0;
}
+EXPORT_SYMBOL_GPL(exynos5422_asv_init);
diff --git a/drivers/soc/samsung/pm_domains.c b/drivers/soc/samsung/pm_domains.c
index 5ec0c13f0aaf..d07f3c9d6903 100644
--- a/drivers/soc/samsung/pm_domains.c
+++ b/drivers/soc/samsung/pm_domains.c
@@ -28,7 +28,6 @@ struct exynos_pm_domain_config {
*/
struct exynos_pm_domain {
void __iomem *base;
- bool is_off;
struct generic_pm_domain pd;
u32 local_pwr_cfg;
};
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index 42833e33a96c..a8f3876963a0 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -331,7 +331,6 @@ static struct regmap_config sunxi_sram_emac_clock_regmap = {
static int sunxi_sram_probe(struct platform_device *pdev)
{
- struct resource *res;
struct dentry *d;
struct regmap *emac_clock;
const struct sunxi_sramc_variant *variant;
@@ -342,8 +341,7 @@ static int sunxi_sram_probe(struct platform_device *pdev)
if (!variant)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/soc/tegra/Makefile b/drivers/soc/tegra/Makefile
index 9c809c1814bd..054e862b63d8 100644
--- a/drivers/soc/tegra/Makefile
+++ b/drivers/soc/tegra/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_SOC_TEGRA_PMC) += pmc.o
obj-$(CONFIG_SOC_TEGRA_POWERGATE_BPMP) += powergate-bpmp.o
obj-$(CONFIG_SOC_TEGRA20_VOLTAGE_COUPLER) += regulators-tegra20.o
obj-$(CONFIG_SOC_TEGRA30_VOLTAGE_COUPLER) += regulators-tegra30.o
+obj-$(CONFIG_ARCH_TEGRA_186_SOC) += ari-tegra186.o
diff --git a/drivers/soc/tegra/ari-tegra186.c b/drivers/soc/tegra/ari-tegra186.c
new file mode 100644
index 000000000000..02577853ec49
--- /dev/null
+++ b/drivers/soc/tegra/ari-tegra186.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/panic_notifier.h>
+
+#define SMC_SIP_INVOKE_MCE 0xc2ffff00
+#define MCE_SMC_READ_MCA 12
+
+#define MCA_ARI_CMD_RD_SERR 1
+
+#define MCA_ARI_RW_SUBIDX_STAT 1
+#define SERR_STATUS_VAL BIT_ULL(63)
+
+#define MCA_ARI_RW_SUBIDX_ADDR 2
+#define MCA_ARI_RW_SUBIDX_MSC1 3
+#define MCA_ARI_RW_SUBIDX_MSC2 4
+
+static const char * const bank_names[] = {
+ "SYS:DPMU", "ROC:IOB", "ROC:MCB", "ROC:CCE", "ROC:CQX", "ROC:CTU",
+};
+
+static void read_uncore_mca(u8 cmd, u8 idx, u8 subidx, u8 inst, u64 *data)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(SMC_SIP_INVOKE_MCE | MCE_SMC_READ_MCA,
+ ((u64)inst << 24) | ((u64)idx << 16) |
+ ((u64)subidx << 8) | ((u64)cmd << 0),
+ 0, 0, 0, 0, 0, 0, &res);
+
+ *data = res.a2;
+}
+
+static int tegra186_ari_panic_handler(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ u64 status;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bank_names); i++) {
+ read_uncore_mca(MCA_ARI_CMD_RD_SERR, i, MCA_ARI_RW_SUBIDX_STAT,
+ 0, &status);
+
+ if (status & SERR_STATUS_VAL) {
+ u64 addr, misc1, misc2;
+
+ read_uncore_mca(MCA_ARI_CMD_RD_SERR, i,
+ MCA_ARI_RW_SUBIDX_ADDR, 0, &addr);
+ read_uncore_mca(MCA_ARI_CMD_RD_SERR, i,
+ MCA_ARI_RW_SUBIDX_MSC1, 0, &misc1);
+ read_uncore_mca(MCA_ARI_CMD_RD_SERR, i,
+ MCA_ARI_RW_SUBIDX_MSC2, 0, &misc2);
+
+ pr_crit("Machine Check Error in %s\n"
+ " status=0x%llx addr=0x%llx\n"
+ " msc1=0x%llx msc2=0x%llx\n",
+ bank_names[i], status, addr, misc1, misc2);
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block tegra186_ari_panic_nb = {
+ .notifier_call = tegra186_ari_panic_handler,
+};
+
+static int __init tegra186_ari_init(void)
+{
+ if (of_machine_is_compatible("nvidia,tegra186"))
+ atomic_notifier_chain_register(&panic_notifier_list, &tegra186_ari_panic_nb);
+
+ return 0;
+}
+early_initcall(tegra186_ari_init);
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 50091c4ec948..575d6d5b4294 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -360,6 +360,7 @@ struct tegra_pmc_soc {
unsigned int num_pmc_clks;
bool has_blink_output;
bool has_usb_sleepwalk;
+ bool supports_core_domain;
};
/**
@@ -782,7 +783,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg,
err = reset_control_deassert(pg->reset);
if (err)
- goto powergate_off;
+ goto disable_clks;
usleep_range(10, 20);
@@ -2815,8 +2816,7 @@ static int tegra_pmc_probe(struct platform_device *pdev)
return err;
/* take over the memory region from the early initialization */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -3041,6 +3041,7 @@ static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
}
static const struct tegra_pmc_soc tegra20_pmc_soc = {
+ .supports_core_domain = false,
.num_powergates = ARRAY_SIZE(tegra20_powergates),
.powergates = tegra20_powergates,
.num_cpu_powergates = 0,
@@ -3065,7 +3066,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
.pmc_clks_data = NULL,
.num_pmc_clks = 0,
.has_blink_output = true,
- .has_usb_sleepwalk = false,
+ .has_usb_sleepwalk = true,
};
static const char * const tegra30_powergates[] = {
@@ -3101,6 +3102,7 @@ static const char * const tegra30_reset_sources[] = {
};
static const struct tegra_pmc_soc tegra30_pmc_soc = {
+ .supports_core_domain = false,
.num_powergates = ARRAY_SIZE(tegra30_powergates),
.powergates = tegra30_powergates,
.num_cpu_powergates = ARRAY_SIZE(tegra30_cpu_powergates),
@@ -3125,7 +3127,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
.pmc_clks_data = tegra_pmc_clks_data,
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
- .has_usb_sleepwalk = false,
+ .has_usb_sleepwalk = true,
};
static const char * const tegra114_powergates[] = {
@@ -3157,6 +3159,7 @@ static const u8 tegra114_cpu_powergates[] = {
};
static const struct tegra_pmc_soc tegra114_pmc_soc = {
+ .supports_core_domain = false,
.num_powergates = ARRAY_SIZE(tegra114_powergates),
.powergates = tegra114_powergates,
.num_cpu_powergates = ARRAY_SIZE(tegra114_cpu_powergates),
@@ -3181,7 +3184,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
.pmc_clks_data = tegra_pmc_clks_data,
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
- .has_usb_sleepwalk = false,
+ .has_usb_sleepwalk = true,
};
static const char * const tegra124_powergates[] = {
@@ -3273,6 +3276,7 @@ static const struct pinctrl_pin_desc tegra124_pin_descs[] = {
};
static const struct tegra_pmc_soc tegra124_pmc_soc = {
+ .supports_core_domain = false,
.num_powergates = ARRAY_SIZE(tegra124_powergates),
.powergates = tegra124_powergates,
.num_cpu_powergates = ARRAY_SIZE(tegra124_cpu_powergates),
@@ -3398,6 +3402,7 @@ static const struct tegra_wake_event tegra210_wake_events[] = {
};
static const struct tegra_pmc_soc tegra210_pmc_soc = {
+ .supports_core_domain = false,
.num_powergates = ARRAY_SIZE(tegra210_powergates),
.powergates = tegra210_powergates,
.num_cpu_powergates = ARRAY_SIZE(tegra210_cpu_powergates),
@@ -3555,6 +3560,7 @@ static const struct tegra_wake_event tegra186_wake_events[] = {
};
static const struct tegra_pmc_soc tegra186_pmc_soc = {
+ .supports_core_domain = false,
.num_powergates = 0,
.powergates = NULL,
.num_cpu_powergates = 0,
@@ -3689,6 +3695,7 @@ static const struct tegra_wake_event tegra194_wake_events[] = {
};
static const struct tegra_pmc_soc tegra194_pmc_soc = {
+ .supports_core_domain = false,
.num_powergates = 0,
.powergates = NULL,
.num_cpu_powergates = 0,
@@ -3757,6 +3764,7 @@ static const char * const tegra234_reset_sources[] = {
};
static const struct tegra_pmc_soc tegra234_pmc_soc = {
+ .supports_core_domain = false,
.num_powergates = 0,
.powergates = NULL,
.num_cpu_powergates = 0,
@@ -3804,6 +3812,14 @@ static void tegra_pmc_sync_state(struct device *dev)
int err;
/*
+ * Newer device-trees have power domains, but we need to prepare all
+ * device drivers with runtime PM and OPP support first, otherwise
+ * state syncing is unsafe.
+ */
+ if (!pmc->soc->supports_core_domain)
+ return;
+
+ /*
* Older device-trees don't have core PD, and thus, there are
* no dependencies that will block the state syncing. We shouldn't
* mark the domain as synced in this case.
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
index 3aa33ea9e6a6..66b8a17f14c4 100644
--- a/drivers/tee/optee/Makefile
+++ b/drivers/tee/optee/Makefile
@@ -4,8 +4,9 @@ optee-objs += core.o
optee-objs += call.o
optee-objs += rpc.o
optee-objs += supp.o
-optee-objs += shm_pool.o
optee-objs += device.o
+optee-objs += smc_abi.o
+optee-objs += ffa_abi.o
# for tracing framework to find optee_trace.h
-CFLAGS_call.o := -I$(src)
+CFLAGS_smc_abi.o := -I$(src)
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index 945f03da0223..b25cc1fac945 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -1,29 +1,18 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2015-2021, Linaro Limited
*/
-#include <linux/arm-smccc.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/mm.h>
-#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/types.h>
-#include <linux/uaccess.h>
#include "optee_private.h"
-#include "optee_smc.h"
-#define CREATE_TRACE_POINTS
-#include "optee_trace.h"
-struct optee_call_waiter {
- struct list_head list_node;
- struct completion c;
-};
-
-static void optee_cq_wait_init(struct optee_call_queue *cq,
- struct optee_call_waiter *w)
+void optee_cq_wait_init(struct optee_call_queue *cq,
+ struct optee_call_waiter *w)
{
/*
* We're preparing to make a call to secure world. In case we can't
@@ -47,8 +36,8 @@ static void optee_cq_wait_init(struct optee_call_queue *cq,
mutex_unlock(&cq->mutex);
}
-static void optee_cq_wait_for_completion(struct optee_call_queue *cq,
- struct optee_call_waiter *w)
+void optee_cq_wait_for_completion(struct optee_call_queue *cq,
+ struct optee_call_waiter *w)
{
wait_for_completion(&w->c);
@@ -74,8 +63,8 @@ static void optee_cq_complete_one(struct optee_call_queue *cq)
}
}
-static void optee_cq_wait_final(struct optee_call_queue *cq,
- struct optee_call_waiter *w)
+void optee_cq_wait_final(struct optee_call_queue *cq,
+ struct optee_call_waiter *w)
{
/*
* We're done with the call to secure world. The thread in secure
@@ -115,97 +104,35 @@ static struct optee_session *find_session(struct optee_context_data *ctxdata,
return NULL;
}
-/**
- * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
- * @ctx: calling context
- * @parg: physical address of message to pass to secure world
- *
- * Does and SMC to OP-TEE in secure world and handles eventual resulting
- * Remote Procedure Calls (RPC) from OP-TEE.
- *
- * Returns return code from secure world, 0 is OK
- */
-u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
+struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params,
+ struct optee_msg_arg **msg_arg)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
- struct optee_call_waiter w;
- struct optee_rpc_param param = { };
- struct optee_call_ctx call_ctx = { };
- u32 ret;
-
- param.a0 = OPTEE_SMC_CALL_WITH_ARG;
- reg_pair_from_64(&param.a1, &param.a2, parg);
- /* Initialize waiter */
- optee_cq_wait_init(&optee->call_queue, &w);
- while (true) {
- struct arm_smccc_res res;
-
- trace_optee_invoke_fn_begin(&param);
- optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
- param.a4, param.a5, param.a6, param.a7,
- &res);
- trace_optee_invoke_fn_end(&param, &res);
-
- if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
- /*
- * Out of threads in secure world, wait for a thread
- * become available.
- */
- optee_cq_wait_for_completion(&optee->call_queue, &w);
- } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
- cond_resched();
- param.a0 = res.a0;
- param.a1 = res.a1;
- param.a2 = res.a2;
- param.a3 = res.a3;
- optee_handle_rpc(ctx, &param, &call_ctx);
- } else {
- ret = res.a0;
- break;
- }
- }
+ size_t sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
+ struct tee_shm *shm;
+ struct optee_msg_arg *ma;
- optee_rpc_finalize_call(&call_ctx);
/*
- * We're done with our thread in secure world, if there's any
- * thread waiters wake up one.
+ * rpc_arg_count is set to the number of allocated parameters in
+ * the RPC argument struct if a second MSG arg struct is expected.
+ * The second arg struct will then be used for RPC.
*/
- optee_cq_wait_final(&optee->call_queue, &w);
-
- return ret;
-}
+ if (optee->rpc_arg_count)
+ sz += OPTEE_MSG_GET_ARG_SIZE(optee->rpc_arg_count);
-static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
- struct optee_msg_arg **msg_arg,
- phys_addr_t *msg_parg)
-{
- int rc;
- struct tee_shm *shm;
- struct optee_msg_arg *ma;
-
- shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
- TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
if (IS_ERR(shm))
return shm;
ma = tee_shm_get_va(shm, 0);
if (IS_ERR(ma)) {
- rc = PTR_ERR(ma);
- goto out;
+ tee_shm_free(shm);
+ return (void *)ma;
}
- rc = tee_shm_get_pa(shm, 0, msg_parg);
- if (rc)
- goto out;
-
memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
ma->num_params = num_params;
*msg_arg = ma;
-out:
- if (rc) {
- tee_shm_free(shm);
- return ERR_PTR(rc);
- }
return shm;
}
@@ -214,16 +141,16 @@ int optee_open_session(struct tee_context *ctx,
struct tee_ioctl_open_session_arg *arg,
struct tee_param *param)
{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_context_data *ctxdata = ctx->data;
int rc;
struct tee_shm *shm;
struct optee_msg_arg *msg_arg;
- phys_addr_t msg_parg;
struct optee_session *sess = NULL;
uuid_t client_uuid;
/* +2 for the meta parameters added below */
- shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
+ shm = optee_get_msg_arg(ctx, arg->num_params + 2, &msg_arg);
if (IS_ERR(shm))
return PTR_ERR(shm);
@@ -247,7 +174,8 @@ int optee_open_session(struct tee_context *ctx,
goto out;
export_uuid(msg_arg->params[1].u.octets, &client_uuid);
- rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
+ rc = optee->ops->to_msg_param(optee, msg_arg->params + 2,
+ arg->num_params, param);
if (rc)
goto out;
@@ -257,7 +185,7 @@ int optee_open_session(struct tee_context *ctx,
goto out;
}
- if (optee_do_call_with_arg(ctx, msg_parg)) {
+ if (optee->ops->do_call_with_arg(ctx, shm)) {
msg_arg->ret = TEEC_ERROR_COMMUNICATION;
msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
}
@@ -272,7 +200,8 @@ int optee_open_session(struct tee_context *ctx,
kfree(sess);
}
- if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) {
+ if (optee->ops->from_msg_param(optee, param, arg->num_params,
+ msg_arg->params + 2)) {
arg->ret = TEEC_ERROR_COMMUNICATION;
arg->ret_origin = TEEC_ORIGIN_COMMS;
/* Close session again to avoid leakage */
@@ -288,12 +217,28 @@ out:
return rc;
}
-int optee_close_session(struct tee_context *ctx, u32 session)
+int optee_close_session_helper(struct tee_context *ctx, u32 session)
{
- struct optee_context_data *ctxdata = ctx->data;
struct tee_shm *shm;
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_msg_arg *msg_arg;
- phys_addr_t msg_parg;
+
+ shm = optee_get_msg_arg(ctx, 0, &msg_arg);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
+ msg_arg->session = session;
+ optee->ops->do_call_with_arg(ctx, shm);
+
+ tee_shm_free(shm);
+
+ return 0;
+}
+
+int optee_close_session(struct tee_context *ctx, u32 session)
+{
+ struct optee_context_data *ctxdata = ctx->data;
struct optee_session *sess;
/* Check that the session is valid and remove it from the list */
@@ -306,25 +251,16 @@ int optee_close_session(struct tee_context *ctx, u32 session)
return -EINVAL;
kfree(sess);
- shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
- if (IS_ERR(shm))
- return PTR_ERR(shm);
-
- msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
- msg_arg->session = session;
- optee_do_call_with_arg(ctx, msg_parg);
-
- tee_shm_free(shm);
- return 0;
+ return optee_close_session_helper(ctx, session);
}
int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
struct tee_param *param)
{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_context_data *ctxdata = ctx->data;
struct tee_shm *shm;
struct optee_msg_arg *msg_arg;
- phys_addr_t msg_parg;
struct optee_session *sess;
int rc;
@@ -335,7 +271,7 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
if (!sess)
return -EINVAL;
- shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg);
+ shm = optee_get_msg_arg(ctx, arg->num_params, &msg_arg);
if (IS_ERR(shm))
return PTR_ERR(shm);
msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
@@ -343,16 +279,18 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
msg_arg->session = arg->session;
msg_arg->cancel_id = arg->cancel_id;
- rc = optee_to_msg_param(msg_arg->params, arg->num_params, param);
+ rc = optee->ops->to_msg_param(optee, msg_arg->params, arg->num_params,
+ param);
if (rc)
goto out;
- if (optee_do_call_with_arg(ctx, msg_parg)) {
+ if (optee->ops->do_call_with_arg(ctx, shm)) {
msg_arg->ret = TEEC_ERROR_COMMUNICATION;
msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
}
- if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) {
+ if (optee->ops->from_msg_param(optee, param, arg->num_params,
+ msg_arg->params)) {
msg_arg->ret = TEEC_ERROR_COMMUNICATION;
msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
}
@@ -366,10 +304,10 @@ out:
int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_context_data *ctxdata = ctx->data;
struct tee_shm *shm;
struct optee_msg_arg *msg_arg;
- phys_addr_t msg_parg;
struct optee_session *sess;
/* Check that the session is valid */
@@ -379,195 +317,19 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
if (!sess)
return -EINVAL;
- shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
+ shm = optee_get_msg_arg(ctx, 0, &msg_arg);
if (IS_ERR(shm))
return PTR_ERR(shm);
msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
msg_arg->session = session;
msg_arg->cancel_id = cancel_id;
- optee_do_call_with_arg(ctx, msg_parg);
+ optee->ops->do_call_with_arg(ctx, shm);
tee_shm_free(shm);
return 0;
}
-/**
- * optee_enable_shm_cache() - Enables caching of some shared memory allocation
- * in OP-TEE
- * @optee: main service struct
- */
-void optee_enable_shm_cache(struct optee *optee)
-{
- struct optee_call_waiter w;
-
- /* We need to retry until secure world isn't busy. */
- optee_cq_wait_init(&optee->call_queue, &w);
- while (true) {
- struct arm_smccc_res res;
-
- optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
- 0, &res);
- if (res.a0 == OPTEE_SMC_RETURN_OK)
- break;
- optee_cq_wait_for_completion(&optee->call_queue, &w);
- }
- optee_cq_wait_final(&optee->call_queue, &w);
-}
-
-/**
- * __optee_disable_shm_cache() - Disables caching of some shared memory
- * allocation in OP-TEE
- * @optee: main service struct
- * @is_mapped: true if the cached shared memory addresses were mapped by this
- * kernel, are safe to dereference, and should be freed
- */
-static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
-{
- struct optee_call_waiter w;
-
- /* We need to retry until secure world isn't busy. */
- optee_cq_wait_init(&optee->call_queue, &w);
- while (true) {
- union {
- struct arm_smccc_res smccc;
- struct optee_smc_disable_shm_cache_result result;
- } res;
-
- optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
- 0, &res.smccc);
- if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
- break; /* All shm's freed */
- if (res.result.status == OPTEE_SMC_RETURN_OK) {
- struct tee_shm *shm;
-
- /*
- * Shared memory references that were not mapped by
- * this kernel must be ignored to prevent a crash.
- */
- if (!is_mapped)
- continue;
-
- shm = reg_pair_to_ptr(res.result.shm_upper32,
- res.result.shm_lower32);
- tee_shm_free(shm);
- } else {
- optee_cq_wait_for_completion(&optee->call_queue, &w);
- }
- }
- optee_cq_wait_final(&optee->call_queue, &w);
-}
-
-/**
- * optee_disable_shm_cache() - Disables caching of mapped shared memory
- * allocations in OP-TEE
- * @optee: main service struct
- */
-void optee_disable_shm_cache(struct optee *optee)
-{
- return __optee_disable_shm_cache(optee, true);
-}
-
-/**
- * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
- * allocations in OP-TEE which are not
- * currently mapped
- * @optee: main service struct
- */
-void optee_disable_unmapped_shm_cache(struct optee *optee)
-{
- return __optee_disable_shm_cache(optee, false);
-}
-
-#define PAGELIST_ENTRIES_PER_PAGE \
- ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
-
-/**
- * optee_fill_pages_list() - write list of user pages to given shared
- * buffer.
- *
- * @dst: page-aligned buffer where list of pages will be stored
- * @pages: array of pages that represents shared buffer
- * @num_pages: number of entries in @pages
- * @page_offset: offset of user buffer from page start
- *
- * @dst should be big enough to hold list of user page addresses and
- * links to the next pages of buffer
- */
-void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
- size_t page_offset)
-{
- int n = 0;
- phys_addr_t optee_page;
- /*
- * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
- * for details.
- */
- struct {
- u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
- u64 next_page_data;
- } *pages_data;
-
- /*
- * Currently OP-TEE uses 4k page size and it does not looks
- * like this will change in the future. On other hand, there are
- * no know ARM architectures with page size < 4k.
- * Thus the next built assert looks redundant. But the following
- * code heavily relies on this assumption, so it is better be
- * safe than sorry.
- */
- BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
-
- pages_data = (void *)dst;
- /*
- * If linux page is bigger than 4k, and user buffer offset is
- * larger than 4k/8k/12k/etc this will skip first 4k pages,
- * because they bear no value data for OP-TEE.
- */
- optee_page = page_to_phys(*pages) +
- round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
-
- while (true) {
- pages_data->pages_list[n++] = optee_page;
-
- if (n == PAGELIST_ENTRIES_PER_PAGE) {
- pages_data->next_page_data =
- virt_to_phys(pages_data + 1);
- pages_data++;
- n = 0;
- }
-
- optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
- if (!(optee_page & ~PAGE_MASK)) {
- if (!--num_pages)
- break;
- pages++;
- optee_page = page_to_phys(*pages);
- }
- }
-}
-
-/*
- * The final entry in each pagelist page is a pointer to the next
- * pagelist page.
- */
-static size_t get_pages_list_size(size_t num_entries)
-{
- int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
-
- return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
-}
-
-u64 *optee_allocate_pages_list(size_t num_entries)
-{
- return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
-}
-
-void optee_free_pages_list(void *list, size_t num_entries)
-{
- free_pages_exact(list, get_pages_list_size(num_entries));
-}
-
static bool is_normal_memory(pgprot_t p)
{
#if defined(CONFIG_ARM)
@@ -591,7 +353,7 @@ static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
return -EINVAL;
}
-static int check_mem_type(unsigned long start, size_t num_pages)
+int optee_check_mem_type(unsigned long start, size_t num_pages)
{
struct mm_struct *mm = current->mm;
int rc;
@@ -610,94 +372,3 @@ static int check_mem_type(unsigned long start, size_t num_pages)
return rc;
}
-
-int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
- struct page **pages, size_t num_pages,
- unsigned long start)
-{
- struct tee_shm *shm_arg = NULL;
- struct optee_msg_arg *msg_arg;
- u64 *pages_list;
- phys_addr_t msg_parg;
- int rc;
-
- if (!num_pages)
- return -EINVAL;
-
- rc = check_mem_type(start, num_pages);
- if (rc)
- return rc;
-
- pages_list = optee_allocate_pages_list(num_pages);
- if (!pages_list)
- return -ENOMEM;
-
- shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
- if (IS_ERR(shm_arg)) {
- rc = PTR_ERR(shm_arg);
- goto out;
- }
-
- optee_fill_pages_list(pages_list, pages, num_pages,
- tee_shm_get_page_offset(shm));
-
- msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
- msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
- OPTEE_MSG_ATTR_NONCONTIG;
- msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
- msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
- /*
- * In the least bits of msg_arg->params->u.tmem.buf_ptr we
- * store buffer offset from 4k page, as described in OP-TEE ABI.
- */
- msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
- (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
-
- if (optee_do_call_with_arg(ctx, msg_parg) ||
- msg_arg->ret != TEEC_SUCCESS)
- rc = -EINVAL;
-
- tee_shm_free(shm_arg);
-out:
- optee_free_pages_list(pages_list, num_pages);
- return rc;
-}
-
-int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
-{
- struct tee_shm *shm_arg;
- struct optee_msg_arg *msg_arg;
- phys_addr_t msg_parg;
- int rc = 0;
-
- shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
- if (IS_ERR(shm_arg))
- return PTR_ERR(shm_arg);
-
- msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
-
- msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
- msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
-
- if (optee_do_call_with_arg(ctx, msg_parg) ||
- msg_arg->ret != TEEC_SUCCESS)
- rc = -EINVAL;
- tee_shm_free(shm_arg);
- return rc;
-}
-
-int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
- struct page **pages, size_t num_pages,
- unsigned long start)
-{
- /*
- * We don't want to register supplicant memory in OP-TEE.
- * Instead information about it will be passed in RPC code.
- */
- return check_mem_type(start, num_pages);
-}
-
-int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm)
-{
- return 0;
-}
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 5363ebebfc35..ab2edfcc6c70 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -1,215 +1,71 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2015-2021, Linaro Limited
+ * Copyright (c) 2016, EPAM Systems
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/arm-smccc.h>
#include <linux/crash_dump.h>
#include <linux/errno.h>
#include <linux/io.h>
+#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/tee_drv.h>
#include <linux/types.h>
-#include <linux/uaccess.h>
#include <linux/workqueue.h>
#include "optee_private.h"
-#include "optee_smc.h"
-#include "shm_pool.h"
-#define DRIVER_NAME "optee"
-
-#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
-
-/**
- * optee_from_msg_param() - convert from OPTEE_MSG parameters to
- * struct tee_param
- * @params: subsystem internal parameter representation
- * @num_params: number of elements in the parameter arrays
- * @msg_params: OPTEE_MSG parameters
- * Returns 0 on success or <0 on failure
- */
-int optee_from_msg_param(struct tee_param *params, size_t num_params,
- const struct optee_msg_param *msg_params)
+int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm, size_t size,
+ int (*shm_register)(struct tee_context *ctx,
+ struct tee_shm *shm,
+ struct page **pages,
+ size_t num_pages,
+ unsigned long start))
{
- int rc;
- size_t n;
- struct tee_shm *shm;
- phys_addr_t pa;
-
- for (n = 0; n < num_params; n++) {
- struct tee_param *p = params + n;
- const struct optee_msg_param *mp = msg_params + n;
- u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
-
- switch (attr) {
- case OPTEE_MSG_ATTR_TYPE_NONE:
- p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
- memset(&p->u, 0, sizeof(p->u));
- break;
- case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
- case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
- case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
- p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
- attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
- p->u.value.a = mp->u.value.a;
- p->u.value.b = mp->u.value.b;
- p->u.value.c = mp->u.value.c;
- break;
- case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
- case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
- case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
- p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
- attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
- p->u.memref.size = mp->u.tmem.size;
- shm = (struct tee_shm *)(unsigned long)
- mp->u.tmem.shm_ref;
- if (!shm) {
- p->u.memref.shm_offs = 0;
- p->u.memref.shm = NULL;
- break;
- }
- rc = tee_shm_get_pa(shm, 0, &pa);
- if (rc)
- return rc;
- p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
- p->u.memref.shm = shm;
- break;
- case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
- case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
- case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
- p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
- attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
- p->u.memref.size = mp->u.rmem.size;
- shm = (struct tee_shm *)(unsigned long)
- mp->u.rmem.shm_ref;
-
- if (!shm) {
- p->u.memref.shm_offs = 0;
- p->u.memref.shm = NULL;
- break;
- }
- p->u.memref.shm_offs = mp->u.rmem.offs;
- p->u.memref.shm = shm;
+ unsigned int order = get_order(size);
+ struct page *page;
+ int rc = 0;
- break;
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!page)
+ return -ENOMEM;
- default:
- return -EINVAL;
- }
- }
- return 0;
-}
+ shm->kaddr = page_address(page);
+ shm->paddr = page_to_phys(page);
+ shm->size = PAGE_SIZE << order;
-static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
- const struct tee_param *p)
-{
- int rc;
- phys_addr_t pa;
+ if (shm_register) {
+ unsigned int nr_pages = 1 << order, i;
+ struct page **pages;
- mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
- TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+ pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages) {
+ rc = -ENOMEM;
+ goto err;
+ }
- mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
- mp->u.tmem.size = p->u.memref.size;
+ for (i = 0; i < nr_pages; i++) {
+ pages[i] = page;
+ page++;
+ }
- if (!p->u.memref.shm) {
- mp->u.tmem.buf_ptr = 0;
- return 0;
+ shm->flags |= TEE_SHM_REGISTER;
+ rc = shm_register(shm->ctx, shm, pages, nr_pages,
+ (unsigned long)shm->kaddr);
+ kfree(pages);
+ if (rc)
+ goto err;
}
- rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
- if (rc)
- return rc;
-
- mp->u.tmem.buf_ptr = pa;
- mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
- OPTEE_MSG_ATTR_CACHE_SHIFT;
-
return 0;
-}
-
-static int to_msg_param_reg_mem(struct optee_msg_param *mp,
- const struct tee_param *p)
-{
- mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
- TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
- mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
- mp->u.rmem.size = p->u.memref.size;
- mp->u.rmem.offs = p->u.memref.shm_offs;
- return 0;
-}
-
-/**
- * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
- * @msg_params: OPTEE_MSG parameters
- * @num_params: number of elements in the parameter arrays
- * @params: subsystem itnernal parameter representation
- * Returns 0 on success or <0 on failure
- */
-int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
- const struct tee_param *params)
-{
- int rc;
- size_t n;
-
- for (n = 0; n < num_params; n++) {
- const struct tee_param *p = params + n;
- struct optee_msg_param *mp = msg_params + n;
-
- switch (p->attr) {
- case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
- mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
- memset(&mp->u, 0, sizeof(mp->u));
- break;
- case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
- case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
- case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
- mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
- TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
- mp->u.value.a = p->u.value.a;
- mp->u.value.b = p->u.value.b;
- mp->u.value.c = p->u.value.c;
- break;
- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
- if (tee_shm_is_registered(p->u.memref.shm))
- rc = to_msg_param_reg_mem(mp, p);
- else
- rc = to_msg_param_tmp_mem(mp, p);
- if (rc)
- return rc;
- break;
- default:
- return -EINVAL;
- }
- }
- return 0;
-}
-
-static void optee_get_version(struct tee_device *teedev,
- struct tee_ioctl_version_data *vers)
-{
- struct tee_ioctl_version_data v = {
- .impl_id = TEE_IMPL_ID_OPTEE,
- .impl_caps = TEE_OPTEE_CAP_TZ,
- .gen_caps = TEE_GEN_CAP_GP,
- };
- struct optee *optee = tee_get_drvdata(teedev);
-
- if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
- v.gen_caps |= TEE_GEN_CAP_REG_MEM;
- if (optee->sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
- v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
- *vers = v;
+err:
+ __free_pages(page, order);
+ return rc;
}
static void optee_bus_scan(struct work_struct *work)
@@ -217,7 +73,7 @@ static void optee_bus_scan(struct work_struct *work)
WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
}
-static int optee_open(struct tee_context *ctx)
+int optee_open(struct tee_context *ctx, bool cap_memref_null)
{
struct optee_context_data *ctxdata;
struct tee_device *teedev = ctx->teedev;
@@ -255,347 +111,55 @@ static int optee_open(struct tee_context *ctx)
mutex_init(&ctxdata->mutex);
INIT_LIST_HEAD(&ctxdata->sess_list);
- if (optee->sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
- ctx->cap_memref_null = true;
- else
- ctx->cap_memref_null = false;
-
+ ctx->cap_memref_null = cap_memref_null;
ctx->data = ctxdata;
return 0;
}
-static void optee_release(struct tee_context *ctx)
+static void optee_release_helper(struct tee_context *ctx,
+ int (*close_session)(struct tee_context *ctx,
+ u32 session))
{
struct optee_context_data *ctxdata = ctx->data;
- struct tee_device *teedev = ctx->teedev;
- struct optee *optee = tee_get_drvdata(teedev);
- struct tee_shm *shm;
- struct optee_msg_arg *arg = NULL;
- phys_addr_t parg;
struct optee_session *sess;
struct optee_session *sess_tmp;
if (!ctxdata)
return;
- shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg),
- TEE_SHM_MAPPED | TEE_SHM_PRIV);
- if (!IS_ERR(shm)) {
- arg = tee_shm_get_va(shm, 0);
- /*
- * If va2pa fails for some reason, we can't call into
- * secure world, only free the memory. Secure OS will leak
- * sessions and finally refuse more sessions, but we will
- * at least let normal world reclaim its memory.
- */
- if (!IS_ERR(arg))
- if (tee_shm_va2pa(shm, arg, &parg))
- arg = NULL; /* prevent usage of parg below */
- }
-
list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
list_node) {
list_del(&sess->list_node);
- if (!IS_ERR_OR_NULL(arg)) {
- memset(arg, 0, sizeof(*arg));
- arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
- arg->session = sess->session_id;
- optee_do_call_with_arg(ctx, parg);
- }
+ close_session(ctx, sess->session_id);
kfree(sess);
}
kfree(ctxdata);
-
- if (!IS_ERR(shm))
- tee_shm_free(shm);
-
ctx->data = NULL;
-
- if (teedev == optee->supp_teedev) {
- if (optee->scan_bus_wq) {
- destroy_workqueue(optee->scan_bus_wq);
- optee->scan_bus_wq = NULL;
- }
- optee_supp_release(&optee->supp);
- }
-}
-
-static const struct tee_driver_ops optee_ops = {
- .get_version = optee_get_version,
- .open = optee_open,
- .release = optee_release,
- .open_session = optee_open_session,
- .close_session = optee_close_session,
- .invoke_func = optee_invoke_func,
- .cancel_req = optee_cancel_req,
- .shm_register = optee_shm_register,
- .shm_unregister = optee_shm_unregister,
-};
-
-static const struct tee_desc optee_desc = {
- .name = DRIVER_NAME "-clnt",
- .ops = &optee_ops,
- .owner = THIS_MODULE,
-};
-
-static const struct tee_driver_ops optee_supp_ops = {
- .get_version = optee_get_version,
- .open = optee_open,
- .release = optee_release,
- .supp_recv = optee_supp_recv,
- .supp_send = optee_supp_send,
- .shm_register = optee_shm_register_supp,
- .shm_unregister = optee_shm_unregister_supp,
-};
-
-static const struct tee_desc optee_supp_desc = {
- .name = DRIVER_NAME "-supp",
- .ops = &optee_supp_ops,
- .owner = THIS_MODULE,
- .flags = TEE_DESC_PRIVILEGED,
-};
-
-static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
-{
- struct arm_smccc_res res;
-
- invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
-
- if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
- res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
- return true;
- return false;
-}
-
-static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
-{
- union {
- struct arm_smccc_res smccc;
- struct optee_smc_call_get_os_revision_result result;
- } res = {
- .result = {
- .build_id = 0
- }
- };
-
- invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
- &res.smccc);
-
- if (res.result.build_id)
- pr_info("revision %lu.%lu (%08lx)", res.result.major,
- res.result.minor, res.result.build_id);
- else
- pr_info("revision %lu.%lu", res.result.major, res.result.minor);
-}
-
-static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
-{
- union {
- struct arm_smccc_res smccc;
- struct optee_smc_calls_revision_result result;
- } res;
-
- invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
-
- if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
- (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
- return true;
- return false;
-}
-
-static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
- u32 *sec_caps)
-{
- union {
- struct arm_smccc_res smccc;
- struct optee_smc_exchange_capabilities_result result;
- } res;
- u32 a1 = 0;
-
- /*
- * TODO This isn't enough to tell if it's UP system (from kernel
- * point of view) or not, is_smp() returns the the information
- * needed, but can't be called directly from here.
- */
- if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
- a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
-
- invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
- &res.smccc);
-
- if (res.result.status != OPTEE_SMC_RETURN_OK)
- return false;
-
- *sec_caps = res.result.capabilities;
- return true;
-}
-
-static struct tee_shm_pool *optee_config_dyn_shm(void)
-{
- struct tee_shm_pool_mgr *priv_mgr;
- struct tee_shm_pool_mgr *dmabuf_mgr;
- void *rc;
-
- rc = optee_shm_pool_alloc_pages();
- if (IS_ERR(rc))
- return rc;
- priv_mgr = rc;
-
- rc = optee_shm_pool_alloc_pages();
- if (IS_ERR(rc)) {
- tee_shm_pool_mgr_destroy(priv_mgr);
- return rc;
- }
- dmabuf_mgr = rc;
-
- rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
- if (IS_ERR(rc)) {
- tee_shm_pool_mgr_destroy(priv_mgr);
- tee_shm_pool_mgr_destroy(dmabuf_mgr);
- }
-
- return rc;
}
-static struct tee_shm_pool *
-optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
+void optee_release(struct tee_context *ctx)
{
- union {
- struct arm_smccc_res smccc;
- struct optee_smc_get_shm_config_result result;
- } res;
- unsigned long vaddr;
- phys_addr_t paddr;
- size_t size;
- phys_addr_t begin;
- phys_addr_t end;
- void *va;
- struct tee_shm_pool_mgr *priv_mgr;
- struct tee_shm_pool_mgr *dmabuf_mgr;
- void *rc;
- const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
-
- invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
- if (res.result.status != OPTEE_SMC_RETURN_OK) {
- pr_err("static shm service not available\n");
- return ERR_PTR(-ENOENT);
- }
-
- if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
- pr_err("only normal cached shared memory supported\n");
- return ERR_PTR(-EINVAL);
- }
-
- begin = roundup(res.result.start, PAGE_SIZE);
- end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
- paddr = begin;
- size = end - begin;
-
- if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
- pr_err("too small shared memory area\n");
- return ERR_PTR(-EINVAL);
- }
-
- va = memremap(paddr, size, MEMREMAP_WB);
- if (!va) {
- pr_err("shared memory ioremap failed\n");
- return ERR_PTR(-EINVAL);
- }
- vaddr = (unsigned long)va;
-
- rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
- 3 /* 8 bytes aligned */);
- if (IS_ERR(rc))
- goto err_memunmap;
- priv_mgr = rc;
-
- vaddr += sz;
- paddr += sz;
- size -= sz;
-
- rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
- if (IS_ERR(rc))
- goto err_free_priv_mgr;
- dmabuf_mgr = rc;
-
- rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
- if (IS_ERR(rc))
- goto err_free_dmabuf_mgr;
-
- *memremaped_shm = va;
-
- return rc;
-
-err_free_dmabuf_mgr:
- tee_shm_pool_mgr_destroy(dmabuf_mgr);
-err_free_priv_mgr:
- tee_shm_pool_mgr_destroy(priv_mgr);
-err_memunmap:
- memunmap(va);
- return rc;
+ optee_release_helper(ctx, optee_close_session_helper);
}
-/* Simple wrapper functions to be able to use a function pointer */
-static void optee_smccc_smc(unsigned long a0, unsigned long a1,
- unsigned long a2, unsigned long a3,
- unsigned long a4, unsigned long a5,
- unsigned long a6, unsigned long a7,
- struct arm_smccc_res *res)
+void optee_release_supp(struct tee_context *ctx)
{
- arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
-}
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
-static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
- unsigned long a2, unsigned long a3,
- unsigned long a4, unsigned long a5,
- unsigned long a6, unsigned long a7,
- struct arm_smccc_res *res)
-{
- arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
-}
-
-static optee_invoke_fn *get_invoke_func(struct device *dev)
-{
- const char *method;
-
- pr_info("probing for conduit method.\n");
-
- if (device_property_read_string(dev, "method", &method)) {
- pr_warn("missing \"method\" property\n");
- return ERR_PTR(-ENXIO);
+ optee_release_helper(ctx, optee_close_session_helper);
+ if (optee->scan_bus_wq) {
+ destroy_workqueue(optee->scan_bus_wq);
+ optee->scan_bus_wq = NULL;
}
-
- if (!strcmp("hvc", method))
- return optee_smccc_hvc;
- else if (!strcmp("smc", method))
- return optee_smccc_smc;
-
- pr_warn("invalid \"method\" property: %s\n", method);
- return ERR_PTR(-EINVAL);
+ optee_supp_release(&optee->supp);
}
-/* optee_remove - Device Removal Routine
- * @pdev: platform device information struct
- *
- * optee_remove is called by platform subsystem to alert the driver
- * that it should release the device
- */
-
-static int optee_remove(struct platform_device *pdev)
+void optee_remove_common(struct optee *optee)
{
- struct optee *optee = platform_get_drvdata(pdev);
-
/* Unregister OP-TEE specific client devices on TEE bus */
optee_unregister_devices();
/*
- * Ask OP-TEE to free all cached shared memory objects to decrease
- * reference counters and also avoid wild pointers in secure world
- * into the old shared memory range.
- */
- optee_disable_shm_cache(optee);
-
- /*
* The two devices have to be unregistered before we can free the
* other resources.
*/
@@ -603,39 +167,16 @@ static int optee_remove(struct platform_device *pdev)
tee_device_unregister(optee->teedev);
tee_shm_pool_free(optee->pool);
- if (optee->memremaped_shm)
- memunmap(optee->memremaped_shm);
optee_wait_queue_exit(&optee->wait_queue);
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
-
- kfree(optee);
-
- return 0;
}
-/* optee_shutdown - Device Removal Routine
- * @pdev: platform device information struct
- *
- * platform_shutdown is called by the platform subsystem to alert
- * the driver that a shutdown, reboot, or kexec is happening and
- * device must be disabled.
- */
-static void optee_shutdown(struct platform_device *pdev)
-{
- optee_disable_shm_cache(platform_get_drvdata(pdev));
-}
+static int smc_abi_rc;
+static int ffa_abi_rc;
-static int optee_probe(struct platform_device *pdev)
+static int optee_core_init(void)
{
- optee_invoke_fn *invoke_fn;
- struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
- struct optee *optee = NULL;
- void *memremaped_shm = NULL;
- struct tee_device *teedev;
- u32 sec_caps;
- int rc;
-
/*
* The kernel may have crashed at the same time that all available
* secure world threads were suspended and we cannot reschedule the
@@ -646,138 +187,24 @@ static int optee_probe(struct platform_device *pdev)
if (is_kdump_kernel())
return -ENODEV;
- invoke_fn = get_invoke_func(&pdev->dev);
- if (IS_ERR(invoke_fn))
- return PTR_ERR(invoke_fn);
-
- if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
- pr_warn("api uid mismatch\n");
- return -EINVAL;
- }
-
- optee_msg_get_os_revision(invoke_fn);
-
- if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
- pr_warn("api revision mismatch\n");
- return -EINVAL;
- }
-
- if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
- pr_warn("capabilities mismatch\n");
- return -EINVAL;
- }
-
- /*
- * Try to use dynamic shared memory if possible
- */
- if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
- pool = optee_config_dyn_shm();
-
- /*
- * If dynamic shared memory is not available or failed - try static one
- */
- if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
- pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
-
- if (IS_ERR(pool))
- return PTR_ERR(pool);
-
- optee = kzalloc(sizeof(*optee), GFP_KERNEL);
- if (!optee) {
- rc = -ENOMEM;
- goto err;
- }
-
- optee->invoke_fn = invoke_fn;
- optee->sec_caps = sec_caps;
-
- teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
- if (IS_ERR(teedev)) {
- rc = PTR_ERR(teedev);
- goto err;
- }
- optee->teedev = teedev;
-
- teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
- if (IS_ERR(teedev)) {
- rc = PTR_ERR(teedev);
- goto err;
- }
- optee->supp_teedev = teedev;
-
- rc = tee_device_register(optee->teedev);
- if (rc)
- goto err;
-
- rc = tee_device_register(optee->supp_teedev);
- if (rc)
- goto err;
-
- mutex_init(&optee->call_queue.mutex);
- INIT_LIST_HEAD(&optee->call_queue.waiters);
- optee_wait_queue_init(&optee->wait_queue);
- optee_supp_init(&optee->supp);
- optee->memremaped_shm = memremaped_shm;
- optee->pool = pool;
-
- /*
- * Ensure that there are no pre-existing shm objects before enabling
- * the shm cache so that there's no chance of receiving an invalid
- * address during shutdown. This could occur, for example, if we're
- * kexec booting from an older kernel that did not properly cleanup the
- * shm cache.
- */
- optee_disable_unmapped_shm_cache(optee);
-
- optee_enable_shm_cache(optee);
+ smc_abi_rc = optee_smc_abi_register();
+ ffa_abi_rc = optee_ffa_abi_register();
- if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
- pr_info("dynamic shared memory is enabled\n");
-
- platform_set_drvdata(pdev, optee);
-
- rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
- if (rc) {
- optee_remove(pdev);
- return rc;
- }
-
- pr_info("initialized driver\n");
+ /* If both failed there's no point with this module */
+ if (smc_abi_rc && ffa_abi_rc)
+ return smc_abi_rc;
return 0;
-err:
- if (optee) {
- /*
- * tee_device_unregister() is safe to call even if the
- * devices hasn't been registered with
- * tee_device_register() yet.
- */
- tee_device_unregister(optee->supp_teedev);
- tee_device_unregister(optee->teedev);
- kfree(optee);
- }
- if (pool)
- tee_shm_pool_free(pool);
- if (memremaped_shm)
- memunmap(memremaped_shm);
- return rc;
}
+module_init(optee_core_init);
-static const struct of_device_id optee_dt_match[] = {
- { .compatible = "linaro,optee-tz" },
- {},
-};
-MODULE_DEVICE_TABLE(of, optee_dt_match);
-
-static struct platform_driver optee_driver = {
- .probe = optee_probe,
- .remove = optee_remove,
- .shutdown = optee_shutdown,
- .driver = {
- .name = "optee",
- .of_match_table = optee_dt_match,
- },
-};
-module_platform_driver(optee_driver);
+static void optee_core_exit(void)
+{
+ if (!smc_abi_rc)
+ optee_smc_abi_unregister();
+ if (!ffa_abi_rc)
+ optee_ffa_abi_unregister();
+}
+module_exit(optee_core_exit);
MODULE_AUTHOR("Linaro");
MODULE_DESCRIPTION("OP-TEE driver");
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
new file mode 100644
index 000000000000..45424824e0f9
--- /dev/null
+++ b/drivers/tee/optee/ffa_abi.c
@@ -0,0 +1,911 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/arm_ffa.h>
+#include <linux/errno.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include "optee_private.h"
+#include "optee_ffa.h"
+#include "optee_rpc_cmd.h"
+
+/*
+ * This file implement the FF-A ABI used when communicating with secure world
+ * OP-TEE OS via FF-A.
+ * This file is divided into the following sections:
+ * 1. Maintain a hash table for lookup of a global FF-A memory handle
+ * 2. Convert between struct tee_param and struct optee_msg_param
+ * 3. Low level support functions to register shared memory in secure world
+ * 4. Dynamic shared memory pool based on alloc_pages()
+ * 5. Do a normal scheduled call into secure world
+ * 6. Driver initialization.
+ */
+
+/*
+ * 1. Maintain a hash table for lookup of a global FF-A memory handle
+ *
+ * FF-A assigns a global memory handle for each piece shared memory.
+ * This handle is then used when communicating with secure world.
+ *
+ * Main functions are optee_shm_add_ffa_handle() and optee_shm_rem_ffa_handle()
+ */
+struct shm_rhash {
+ struct tee_shm *shm;
+ u64 global_id;
+ struct rhash_head linkage;
+};
+
+static void rh_free_fn(void *ptr, void *arg)
+{
+ kfree(ptr);
+}
+
+static const struct rhashtable_params shm_rhash_params = {
+ .head_offset = offsetof(struct shm_rhash, linkage),
+ .key_len = sizeof(u64),
+ .key_offset = offsetof(struct shm_rhash, global_id),
+ .automatic_shrinking = true,
+};
+
+static struct tee_shm *optee_shm_from_ffa_handle(struct optee *optee,
+ u64 global_id)
+{
+ struct tee_shm *shm = NULL;
+ struct shm_rhash *r;
+
+ mutex_lock(&optee->ffa.mutex);
+ r = rhashtable_lookup_fast(&optee->ffa.global_ids, &global_id,
+ shm_rhash_params);
+ if (r)
+ shm = r->shm;
+ mutex_unlock(&optee->ffa.mutex);
+
+ return shm;
+}
+
+static int optee_shm_add_ffa_handle(struct optee *optee, struct tee_shm *shm,
+ u64 global_id)
+{
+ struct shm_rhash *r;
+ int rc;
+
+ r = kmalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return -ENOMEM;
+ r->shm = shm;
+ r->global_id = global_id;
+
+ mutex_lock(&optee->ffa.mutex);
+ rc = rhashtable_lookup_insert_fast(&optee->ffa.global_ids, &r->linkage,
+ shm_rhash_params);
+ mutex_unlock(&optee->ffa.mutex);
+
+ if (rc)
+ kfree(r);
+
+ return rc;
+}
+
+static int optee_shm_rem_ffa_handle(struct optee *optee, u64 global_id)
+{
+ struct shm_rhash *r;
+ int rc = -ENOENT;
+
+ mutex_lock(&optee->ffa.mutex);
+ r = rhashtable_lookup_fast(&optee->ffa.global_ids, &global_id,
+ shm_rhash_params);
+ if (r)
+ rc = rhashtable_remove_fast(&optee->ffa.global_ids,
+ &r->linkage, shm_rhash_params);
+ mutex_unlock(&optee->ffa.mutex);
+
+ if (!rc)
+ kfree(r);
+
+ return rc;
+}
+
+/*
+ * 2. Convert between struct tee_param and struct optee_msg_param
+ *
+ * optee_ffa_from_msg_param() and optee_ffa_to_msg_param() are the main
+ * functions.
+ */
+
+static void from_msg_param_ffa_mem(struct optee *optee, struct tee_param *p,
+ u32 attr, const struct optee_msg_param *mp)
+{
+ struct tee_shm *shm = NULL;
+ u64 offs_high = 0;
+ u64 offs_low = 0;
+
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
+ p->u.memref.size = mp->u.fmem.size;
+
+ if (mp->u.fmem.global_id != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
+ shm = optee_shm_from_ffa_handle(optee, mp->u.fmem.global_id);
+ p->u.memref.shm = shm;
+
+ if (shm) {
+ offs_low = mp->u.fmem.offs_low;
+ offs_high = mp->u.fmem.offs_high;
+ }
+ p->u.memref.shm_offs = offs_low | offs_high << 32;
+}
+
+/**
+ * optee_ffa_from_msg_param() - convert from OPTEE_MSG parameters to
+ * struct tee_param
+ * @optee: main service struct
+ * @params: subsystem internal parameter representation
+ * @num_params: number of elements in the parameter arrays
+ * @msg_params: OPTEE_MSG parameters
+ *
+ * Returns 0 on success or <0 on failure
+ */
+static int optee_ffa_from_msg_param(struct optee *optee,
+ struct tee_param *params, size_t num_params,
+ const struct optee_msg_param *msg_params)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_param *p = params + n;
+ const struct optee_msg_param *mp = msg_params + n;
+ u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
+
+ switch (attr) {
+ case OPTEE_MSG_ATTR_TYPE_NONE:
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&p->u, 0, sizeof(p->u));
+ break;
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+ optee_from_msg_param_value(p, attr, mp);
+ break;
+ case OPTEE_MSG_ATTR_TYPE_FMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_FMEM_INOUT:
+ from_msg_param_ffa_mem(optee, p, attr, mp);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int to_msg_param_ffa_mem(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ struct tee_shm *shm = p->u.memref.shm;
+
+ mp->attr = OPTEE_MSG_ATTR_TYPE_FMEM_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+ if (shm) {
+ u64 shm_offs = p->u.memref.shm_offs;
+
+ mp->u.fmem.internal_offs = shm->offset;
+
+ mp->u.fmem.offs_low = shm_offs;
+ mp->u.fmem.offs_high = shm_offs >> 32;
+ /* Check that the entire offset could be stored. */
+ if (mp->u.fmem.offs_high != shm_offs >> 32)
+ return -EINVAL;
+
+ mp->u.fmem.global_id = shm->sec_world_id;
+ } else {
+ memset(&mp->u, 0, sizeof(mp->u));
+ mp->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
+ }
+ mp->u.fmem.size = p->u.memref.size;
+
+ return 0;
+}
+
+/**
+ * optee_ffa_to_msg_param() - convert from struct tee_params to OPTEE_MSG
+ * parameters
+ * @optee: main service struct
+ * @msg_params: OPTEE_MSG parameters
+ * @num_params: number of elements in the parameter arrays
+ * @params: subsystem itnernal parameter representation
+ * Returns 0 on success or <0 on failure
+ */
+static int optee_ffa_to_msg_param(struct optee *optee,
+ struct optee_msg_param *msg_params,
+ size_t num_params,
+ const struct tee_param *params)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ const struct tee_param *p = params + n;
+ struct optee_msg_param *mp = msg_params + n;
+
+ switch (p->attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
+ mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&mp->u, 0, sizeof(mp->u));
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ optee_to_msg_param_value(mp, p);
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ if (to_msg_param_ffa_mem(mp, p))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * 3. Low level support functions to register shared memory in secure world
+ *
+ * Functions to register and unregister shared memory both for normal
+ * clients and for tee-supplicant.
+ */
+
+static int optee_ffa_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ struct ffa_mem_region_attributes mem_attr = {
+ .receiver = ffa_dev->vm_id,
+ .attrs = FFA_MEM_RW,
+ };
+ struct ffa_mem_ops_args args = {
+ .use_txbuf = true,
+ .attrs = &mem_attr,
+ .nattrs = 1,
+ };
+ struct sg_table sgt;
+ int rc;
+
+ rc = optee_check_mem_type(start, num_pages);
+ if (rc)
+ return rc;
+
+ rc = sg_alloc_table_from_pages(&sgt, pages, num_pages, 0,
+ num_pages * PAGE_SIZE, GFP_KERNEL);
+ if (rc)
+ return rc;
+ args.sg = sgt.sgl;
+ rc = ffa_ops->memory_share(ffa_dev, &args);
+ sg_free_table(&sgt);
+ if (rc)
+ return rc;
+
+ rc = optee_shm_add_ffa_handle(optee, shm, args.g_handle);
+ if (rc) {
+ ffa_ops->memory_reclaim(args.g_handle, 0);
+ return rc;
+ }
+
+ shm->sec_world_id = args.g_handle;
+
+ return 0;
+}
+
+static int optee_ffa_shm_unregister(struct tee_context *ctx,
+ struct tee_shm *shm)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ u64 global_handle = shm->sec_world_id;
+ struct ffa_send_direct_data data = {
+ .data0 = OPTEE_FFA_UNREGISTER_SHM,
+ .data1 = (u32)global_handle,
+ .data2 = (u32)(global_handle >> 32)
+ };
+ int rc;
+
+ optee_shm_rem_ffa_handle(optee, global_handle);
+ shm->sec_world_id = 0;
+
+ rc = ffa_ops->sync_send_receive(ffa_dev, &data);
+ if (rc)
+ pr_err("Unregister SHM id 0x%llx rc %d\n", global_handle, rc);
+
+ rc = ffa_ops->memory_reclaim(global_handle, 0);
+ if (rc)
+ pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
+
+ return rc;
+}
+
+static int optee_ffa_shm_unregister_supp(struct tee_context *ctx,
+ struct tee_shm *shm)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
+ u64 global_handle = shm->sec_world_id;
+ int rc;
+
+ /*
+ * We're skipping the OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM call
+ * since this is OP-TEE freeing via RPC so it has already retired
+ * this ID.
+ */
+
+ optee_shm_rem_ffa_handle(optee, global_handle);
+ rc = ffa_ops->memory_reclaim(global_handle, 0);
+ if (rc)
+ pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
+
+ shm->sec_world_id = 0;
+
+ return rc;
+}
+
+/*
+ * 4. Dynamic shared memory pool based on alloc_pages()
+ *
+ * Implements an OP-TEE specific shared memory pool.
+ * The main function is optee_ffa_shm_pool_alloc_pages().
+ */
+
+static int pool_ffa_op_alloc(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm, size_t size)
+{
+ return optee_pool_op_alloc_helper(poolm, shm, size,
+ optee_ffa_shm_register);
+}
+
+static void pool_ffa_op_free(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm)
+{
+ optee_ffa_shm_unregister(shm->ctx, shm);
+ free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+ shm->kaddr = NULL;
+}
+
+static void pool_ffa_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+{
+ kfree(poolm);
+}
+
+static const struct tee_shm_pool_mgr_ops pool_ffa_ops = {
+ .alloc = pool_ffa_op_alloc,
+ .free = pool_ffa_op_free,
+ .destroy_poolmgr = pool_ffa_op_destroy_poolmgr,
+};
+
+/**
+ * optee_ffa_shm_pool_alloc_pages() - create page-based allocator pool
+ *
+ * This pool is used with OP-TEE over FF-A. In this case command buffers
+ * and such are allocated from kernel's own memory.
+ */
+static struct tee_shm_pool_mgr *optee_ffa_shm_pool_alloc_pages(void)
+{
+ struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ mgr->ops = &pool_ffa_ops;
+
+ return mgr;
+}
+
+/*
+ * 5. Do a normal scheduled call into secure world
+ *
+ * The function optee_ffa_do_call_with_arg() performs a normal scheduled
+ * call into secure world. During this call may normal world request help
+ * from normal world using RPCs, Remote Procedure Calls. This includes
+ * delivery of non-secure interrupts to for instance allow rescheduling of
+ * the current task.
+ */
+
+static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ struct tee_shm *shm;
+
+ if (arg->num_params != 1 ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_SHM_TYPE_APPL:
+ shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b);
+ break;
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
+ shm = tee_shm_alloc(ctx, arg->params[0].u.value.b,
+ TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ if (IS_ERR(shm)) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+
+ arg->params[0] = (struct optee_msg_param){
+ .attr = OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT,
+ .u.fmem.size = tee_shm_get_size(shm),
+ .u.fmem.global_id = shm->sec_world_id,
+ .u.fmem.internal_offs = shm->offset,
+ };
+
+ arg->ret = TEEC_SUCCESS;
+}
+
+static void handle_ffa_rpc_func_cmd_shm_free(struct tee_context *ctx,
+ struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ struct tee_shm *shm;
+
+ if (arg->num_params != 1 ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
+ goto err_bad_param;
+
+ shm = optee_shm_from_ffa_handle(optee, arg->params[0].u.value.b);
+ if (!shm)
+ goto err_bad_param;
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_SHM_TYPE_APPL:
+ optee_rpc_cmd_free_suppl(ctx, shm);
+ break;
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
+ tee_shm_free(shm);
+ break;
+ default:
+ goto err_bad_param;
+ }
+ arg->ret = TEEC_SUCCESS;
+ return;
+
+err_bad_param:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ switch (arg->cmd) {
+ case OPTEE_RPC_CMD_SHM_ALLOC:
+ handle_ffa_rpc_func_cmd_shm_alloc(ctx, arg);
+ break;
+ case OPTEE_RPC_CMD_SHM_FREE:
+ handle_ffa_rpc_func_cmd_shm_free(ctx, optee, arg);
+ break;
+ default:
+ optee_rpc_cmd(ctx, optee, arg);
+ }
+}
+
+static void optee_handle_ffa_rpc(struct tee_context *ctx, u32 cmd,
+ struct optee_msg_arg *arg)
+{
+ switch (cmd) {
+ case OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD:
+ handle_ffa_rpc_func_cmd(ctx, arg);
+ break;
+ case OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT:
+ /* Interrupt delivered by now */
+ break;
+ default:
+ pr_warn("Unknown RPC func 0x%x\n", cmd);
+ break;
+ }
+}
+
+static int optee_ffa_yielding_call(struct tee_context *ctx,
+ struct ffa_send_direct_data *data,
+ struct optee_msg_arg *rpc_arg)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ struct optee_call_waiter w;
+ u32 cmd = data->data0;
+ u32 w4 = data->data1;
+ u32 w5 = data->data2;
+ u32 w6 = data->data3;
+ int rc;
+
+ /* Initialize waiter */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ rc = ffa_ops->sync_send_receive(ffa_dev, data);
+ if (rc)
+ goto done;
+
+ switch ((int)data->data0) {
+ case TEEC_SUCCESS:
+ break;
+ case TEEC_ERROR_BUSY:
+ if (cmd == OPTEE_FFA_YIELDING_CALL_RESUME) {
+ rc = -EIO;
+ goto done;
+ }
+
+ /*
+ * Out of threads in secure world, wait for a thread
+ * become available.
+ */
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ data->data0 = cmd;
+ data->data1 = w4;
+ data->data2 = w5;
+ data->data3 = w6;
+ continue;
+ default:
+ rc = -EIO;
+ goto done;
+ }
+
+ if (data->data1 == OPTEE_FFA_YIELDING_CALL_RETURN_DONE)
+ goto done;
+
+ /*
+ * OP-TEE has returned with a RPC request.
+ *
+ * Note that data->data4 (passed in register w7) is already
+ * filled in by ffa_ops->sync_send_receive() returning
+ * above.
+ */
+ cond_resched();
+ optee_handle_ffa_rpc(ctx, data->data1, rpc_arg);
+ cmd = OPTEE_FFA_YIELDING_CALL_RESUME;
+ data->data0 = cmd;
+ data->data1 = 0;
+ data->data2 = 0;
+ data->data3 = 0;
+ }
+done:
+ /*
+ * We're done with our thread in secure world, if there's any
+ * thread waiters wake up one.
+ */
+ optee_cq_wait_final(&optee->call_queue, &w);
+
+ return rc;
+}
+
+/**
+ * optee_ffa_do_call_with_arg() - Do a FF-A call to enter OP-TEE in secure world
+ * @ctx: calling context
+ * @shm: shared memory holding the message to pass to secure world
+ *
+ * Does a FF-A call to OP-TEE in secure world and handles eventual resulting
+ * Remote Procedure Calls (RPC) from OP-TEE.
+ *
+ * Returns return code from FF-A, 0 is OK
+ */
+
+static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
+ struct tee_shm *shm)
+{
+ struct ffa_send_direct_data data = {
+ .data0 = OPTEE_FFA_YIELDING_CALL_WITH_ARG,
+ .data1 = (u32)shm->sec_world_id,
+ .data2 = (u32)(shm->sec_world_id >> 32),
+ .data3 = shm->offset,
+ };
+ struct optee_msg_arg *arg = tee_shm_get_va(shm, 0);
+ unsigned int rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
+ struct optee_msg_arg *rpc_arg = tee_shm_get_va(shm, rpc_arg_offs);
+
+ return optee_ffa_yielding_call(ctx, &data, rpc_arg);
+}
+
+/*
+ * 6. Driver initialization
+ *
+ * During driver inititialization is the OP-TEE Secure Partition is probed
+ * to find out which features it supports so the driver can be initialized
+ * with a matching configuration.
+ */
+
+static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
+ const struct ffa_dev_ops *ops)
+{
+ struct ffa_send_direct_data data = { OPTEE_FFA_GET_API_VERSION };
+ int rc;
+
+ ops->mode_32bit_set(ffa_dev);
+
+ rc = ops->sync_send_receive(ffa_dev, &data);
+ if (rc) {
+ pr_err("Unexpected error %d\n", rc);
+ return false;
+ }
+ if (data.data0 != OPTEE_FFA_VERSION_MAJOR ||
+ data.data1 < OPTEE_FFA_VERSION_MINOR) {
+ pr_err("Incompatible OP-TEE API version %lu.%lu",
+ data.data0, data.data1);
+ return false;
+ }
+
+ data = (struct ffa_send_direct_data){ OPTEE_FFA_GET_OS_VERSION };
+ rc = ops->sync_send_receive(ffa_dev, &data);
+ if (rc) {
+ pr_err("Unexpected error %d\n", rc);
+ return false;
+ }
+ if (data.data2)
+ pr_info("revision %lu.%lu (%08lx)",
+ data.data0, data.data1, data.data2);
+ else
+ pr_info("revision %lu.%lu", data.data0, data.data1);
+
+ return true;
+}
+
+static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
+ const struct ffa_dev_ops *ops,
+ unsigned int *rpc_arg_count)
+{
+ struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES };
+ int rc;
+
+ rc = ops->sync_send_receive(ffa_dev, &data);
+ if (rc) {
+ pr_err("Unexpected error %d", rc);
+ return false;
+ }
+ if (data.data0) {
+ pr_err("Unexpected exchange error %lu", data.data0);
+ return false;
+ }
+
+ *rpc_arg_count = (u8)data.data1;
+
+ return true;
+}
+
+static struct tee_shm_pool *optee_ffa_config_dyn_shm(void)
+{
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
+
+ rc = optee_ffa_shm_pool_alloc_pages();
+ if (IS_ERR(rc))
+ return rc;
+ priv_mgr = rc;
+
+ rc = optee_ffa_shm_pool_alloc_pages();
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ return rc;
+ }
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+ }
+
+ return rc;
+}
+
+static void optee_ffa_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_OPTEE,
+ .impl_caps = TEE_OPTEE_CAP_TZ,
+ .gen_caps = TEE_GEN_CAP_GP | TEE_GEN_CAP_REG_MEM |
+ TEE_GEN_CAP_MEMREF_NULL,
+ };
+
+ *vers = v;
+}
+
+static int optee_ffa_open(struct tee_context *ctx)
+{
+ return optee_open(ctx, true);
+}
+
+static const struct tee_driver_ops optee_ffa_clnt_ops = {
+ .get_version = optee_ffa_get_version,
+ .open = optee_ffa_open,
+ .release = optee_release,
+ .open_session = optee_open_session,
+ .close_session = optee_close_session,
+ .invoke_func = optee_invoke_func,
+ .cancel_req = optee_cancel_req,
+ .shm_register = optee_ffa_shm_register,
+ .shm_unregister = optee_ffa_shm_unregister,
+};
+
+static const struct tee_desc optee_ffa_clnt_desc = {
+ .name = DRIVER_NAME "-ffa-clnt",
+ .ops = &optee_ffa_clnt_ops,
+ .owner = THIS_MODULE,
+};
+
+static const struct tee_driver_ops optee_ffa_supp_ops = {
+ .get_version = optee_ffa_get_version,
+ .open = optee_ffa_open,
+ .release = optee_release_supp,
+ .supp_recv = optee_supp_recv,
+ .supp_send = optee_supp_send,
+ .shm_register = optee_ffa_shm_register, /* same as for clnt ops */
+ .shm_unregister = optee_ffa_shm_unregister_supp,
+};
+
+static const struct tee_desc optee_ffa_supp_desc = {
+ .name = DRIVER_NAME "-ffa-supp",
+ .ops = &optee_ffa_supp_ops,
+ .owner = THIS_MODULE,
+ .flags = TEE_DESC_PRIVILEGED,
+};
+
+static const struct optee_ops optee_ffa_ops = {
+ .do_call_with_arg = optee_ffa_do_call_with_arg,
+ .to_msg_param = optee_ffa_to_msg_param,
+ .from_msg_param = optee_ffa_from_msg_param,
+};
+
+static void optee_ffa_remove(struct ffa_device *ffa_dev)
+{
+ struct optee *optee = ffa_dev->dev.driver_data;
+
+ optee_remove_common(optee);
+
+ mutex_destroy(&optee->ffa.mutex);
+ rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
+
+ kfree(optee);
+}
+
+static int optee_ffa_probe(struct ffa_device *ffa_dev)
+{
+ const struct ffa_dev_ops *ffa_ops;
+ unsigned int rpc_arg_count;
+ struct tee_device *teedev;
+ struct optee *optee;
+ int rc;
+
+ ffa_ops = ffa_dev_ops_get(ffa_dev);
+ if (!ffa_ops) {
+ pr_warn("failed \"method\" init: ffa\n");
+ return -ENOENT;
+ }
+
+ if (!optee_ffa_api_is_compatbile(ffa_dev, ffa_ops))
+ return -EINVAL;
+
+ if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &rpc_arg_count))
+ return -EINVAL;
+
+ optee = kzalloc(sizeof(*optee), GFP_KERNEL);
+ if (!optee) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ optee->pool = optee_ffa_config_dyn_shm();
+ if (IS_ERR(optee->pool)) {
+ rc = PTR_ERR(optee->pool);
+ optee->pool = NULL;
+ goto err;
+ }
+
+ optee->ops = &optee_ffa_ops;
+ optee->ffa.ffa_dev = ffa_dev;
+ optee->ffa.ffa_ops = ffa_ops;
+ optee->rpc_arg_count = rpc_arg_count;
+
+ teedev = tee_device_alloc(&optee_ffa_clnt_desc, NULL, optee->pool,
+ optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err;
+ }
+ optee->teedev = teedev;
+
+ teedev = tee_device_alloc(&optee_ffa_supp_desc, NULL, optee->pool,
+ optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err;
+ }
+ optee->supp_teedev = teedev;
+
+ rc = tee_device_register(optee->teedev);
+ if (rc)
+ goto err;
+
+ rc = tee_device_register(optee->supp_teedev);
+ if (rc)
+ goto err;
+
+ rc = rhashtable_init(&optee->ffa.global_ids, &shm_rhash_params);
+ if (rc)
+ goto err;
+ mutex_init(&optee->ffa.mutex);
+ mutex_init(&optee->call_queue.mutex);
+ INIT_LIST_HEAD(&optee->call_queue.waiters);
+ optee_wait_queue_init(&optee->wait_queue);
+ optee_supp_init(&optee->supp);
+ ffa_dev_set_drvdata(ffa_dev, optee);
+
+ rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
+ if (rc) {
+ optee_ffa_remove(ffa_dev);
+ return rc;
+ }
+
+ pr_info("initialized driver\n");
+ return 0;
+err:
+ /*
+ * tee_device_unregister() is safe to call even if the
+ * devices hasn't been registered with
+ * tee_device_register() yet.
+ */
+ tee_device_unregister(optee->supp_teedev);
+ tee_device_unregister(optee->teedev);
+ if (optee->pool)
+ tee_shm_pool_free(optee->pool);
+ kfree(optee);
+ return rc;
+}
+
+static const struct ffa_device_id optee_ffa_device_id[] = {
+ /* 486178e0-e7f8-11e3-bc5e0002a5d5c51b */
+ { UUID_INIT(0x486178e0, 0xe7f8, 0x11e3,
+ 0xbc, 0x5e, 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b) },
+ {}
+};
+
+static struct ffa_driver optee_ffa_driver = {
+ .name = "optee",
+ .probe = optee_ffa_probe,
+ .remove = optee_ffa_remove,
+ .id_table = optee_ffa_device_id,
+};
+
+int optee_ffa_abi_register(void)
+{
+ if (IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT))
+ return ffa_register(&optee_ffa_driver);
+ else
+ return -EOPNOTSUPP;
+}
+
+void optee_ffa_abi_unregister(void)
+{
+ if (IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT))
+ ffa_unregister(&optee_ffa_driver);
+}
diff --git a/drivers/tee/optee/optee_ffa.h b/drivers/tee/optee/optee_ffa.h
new file mode 100644
index 000000000000..ee3a03fc392c
--- /dev/null
+++ b/drivers/tee/optee/optee_ffa.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (c) 2019-2021, Linaro Limited
+ */
+
+/*
+ * This file is exported by OP-TEE and is kept in sync between secure world
+ * and normal world drivers. We're using ARM FF-A 1.0 specification.
+ */
+
+#ifndef __OPTEE_FFA_H
+#define __OPTEE_FFA_H
+
+#include <linux/arm_ffa.h>
+
+/*
+ * Normal world sends requests with FFA_MSG_SEND_DIRECT_REQ and
+ * responses are returned with FFA_MSG_SEND_DIRECT_RESP for normal
+ * messages.
+ *
+ * All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP
+ * are using the AArch32 SMC calling convention with register usage as
+ * defined in FF-A specification:
+ * w0: Function ID (0x8400006F or 0x84000070)
+ * w1: Source/Destination IDs
+ * w2: Reserved (MBZ)
+ * w3-w7: Implementation defined, free to be used below
+ */
+
+#define OPTEE_FFA_VERSION_MAJOR 1
+#define OPTEE_FFA_VERSION_MINOR 0
+
+#define OPTEE_FFA_BLOCKING_CALL(id) (id)
+#define OPTEE_FFA_YIELDING_CALL_BIT 31
+#define OPTEE_FFA_YIELDING_CALL(id) ((id) | BIT(OPTEE_FFA_YIELDING_CALL_BIT))
+
+/*
+ * Returns the API version implemented, currently follows the FF-A version.
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_GET_API_VERSION
+ * w4-w7: Not used (MBZ)
+ *
+ * Return register usage:
+ * w3: OPTEE_FFA_VERSION_MAJOR
+ * w4: OPTEE_FFA_VERSION_MINOR
+ * w5-w7: Not used (MBZ)
+ */
+#define OPTEE_FFA_GET_API_VERSION OPTEE_FFA_BLOCKING_CALL(0)
+
+/*
+ * Returns the revision of OP-TEE.
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_GET_OS_VERSION
+ * w4-w7: Unused (MBZ)
+ *
+ * Return register usage:
+ * w3: CFG_OPTEE_REVISION_MAJOR
+ * w4: CFG_OPTEE_REVISION_MINOR
+ * w5: TEE_IMPL_GIT_SHA1 (or zero if not supported)
+ */
+#define OPTEE_FFA_GET_OS_VERSION OPTEE_FFA_BLOCKING_CALL(1)
+
+/*
+ * Exchange capabilities between normal world and secure world.
+ *
+ * Currently there are no defined capabilities. When features are added new
+ * capabilities may be added.
+ *
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_EXCHANGE_CAPABILITIES
+ * w4-w7: Note used (MBZ)
+ *
+ * Return register usage:
+ * w3: Error code, 0 on success
+ * w4: Bit[7:0]: Number of parameters needed for RPC to be supplied
+ * as the second MSG arg struct for
+ * OPTEE_FFA_YIELDING_CALL_WITH_ARG.
+ * Bit[31:8]: Reserved (MBZ)
+ * w5-w7: Note used (MBZ)
+ */
+#define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2)
+
+/*
+ * Unregister shared memory
+ *
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM
+ * w4: Shared memory handle, lower bits
+ * w5: Shared memory handle, higher bits
+ * w6-w7: Not used (MBZ)
+ *
+ * Return register usage:
+ * w3: Error code, 0 on success
+ * w4-w7: Note used (MBZ)
+ */
+#define OPTEE_FFA_UNREGISTER_SHM OPTEE_FFA_BLOCKING_CALL(3)
+
+/*
+ * Call with struct optee_msg_arg as argument in the supplied shared memory
+ * with a zero internal offset and normal cached memory attributes.
+ * Register usage:
+ * w3: Service ID, OPTEE_FFA_YIELDING_CALL_WITH_ARG
+ * w4: Lower 32 bits of a 64-bit Shared memory handle
+ * w5: Upper 32 bits of a 64-bit Shared memory handle
+ * w6: Offset into shared memory pointing to a struct optee_msg_arg
+ * right after the parameters of this struct (at offset
+ * OPTEE_MSG_GET_ARG_SIZE(num_params) follows a struct optee_msg_arg
+ * for RPC, this struct has reserved space for the number of RPC
+ * parameters as returned by OPTEE_FFA_EXCHANGE_CAPABILITIES.
+ * w7: Not used (MBZ)
+ * Resume from RPC. Register usage:
+ * w3: Service ID, OPTEE_FFA_YIELDING_CALL_RESUME
+ * w4-w6: Not used (MBZ)
+ * w7: Resume info
+ *
+ * Normal return (yielding call is completed). Register usage:
+ * w3: Error code, 0 on success
+ * w4: OPTEE_FFA_YIELDING_CALL_RETURN_DONE
+ * w5-w7: Not used (MBZ)
+ *
+ * RPC interrupt return (RPC from secure world). Register usage:
+ * w3: Error code == 0
+ * w4: Any defined RPC code but OPTEE_FFA_YIELDING_CALL_RETURN_DONE
+ * w5-w6: Not used (MBZ)
+ * w7: Resume info
+ *
+ * Possible error codes in register w3:
+ * 0: Success
+ * FFA_DENIED: w4 isn't one of OPTEE_FFA_YIELDING_CALL_START
+ * OPTEE_FFA_YIELDING_CALL_RESUME
+ *
+ * Possible error codes for OPTEE_FFA_YIELDING_CALL_START,
+ * FFA_BUSY: Number of OP-TEE OS threads exceeded,
+ * try again later
+ * FFA_DENIED: RPC shared memory object not found
+ * FFA_INVALID_PARAMETER: Bad shared memory handle or offset into the memory
+ *
+ * Possible error codes for OPTEE_FFA_YIELDING_CALL_RESUME
+ * FFA_INVALID_PARAMETER: Bad resume info
+ */
+#define OPTEE_FFA_YIELDING_CALL_WITH_ARG OPTEE_FFA_YIELDING_CALL(0)
+#define OPTEE_FFA_YIELDING_CALL_RESUME OPTEE_FFA_YIELDING_CALL(1)
+
+#define OPTEE_FFA_YIELDING_CALL_RETURN_DONE 0
+#define OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD 1
+#define OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT 2
+
+#endif /*__OPTEE_FFA_H*/
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
index e3d72d09c484..2422e185d400 100644
--- a/drivers/tee/optee/optee_msg.h
+++ b/drivers/tee/optee/optee_msg.h
@@ -28,6 +28,9 @@
#define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT 0x5
#define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT 0x6
#define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT 0x7
+#define OPTEE_MSG_ATTR_TYPE_FMEM_INPUT OPTEE_MSG_ATTR_TYPE_RMEM_INPUT
+#define OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT
+#define OPTEE_MSG_ATTR_TYPE_FMEM_INOUT OPTEE_MSG_ATTR_TYPE_RMEM_INOUT
#define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 0x9
#define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT 0xa
#define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT 0xb
@@ -96,6 +99,8 @@
*/
#define OPTEE_MSG_NONCONTIG_PAGE_SIZE 4096
+#define OPTEE_MSG_FMEM_INVALID_GLOBAL_ID 0xffffffffffffffff
+
/**
* struct optee_msg_param_tmem - temporary memory reference parameter
* @buf_ptr: Address of the buffer
@@ -128,6 +133,23 @@ struct optee_msg_param_rmem {
};
/**
+ * struct optee_msg_param_fmem - ffa memory reference parameter
+ * @offs_lower: Lower bits of offset into shared memory reference
+ * @offs_upper: Upper bits of offset into shared memory reference
+ * @internal_offs: Internal offset into the first page of shared memory
+ * reference
+ * @size: Size of the buffer
+ * @global_id: Global identifier of Shared memory
+ */
+struct optee_msg_param_fmem {
+ u32 offs_low;
+ u16 offs_high;
+ u16 internal_offs;
+ u64 size;
+ u64 global_id;
+};
+
+/**
* struct optee_msg_param_value - opaque value parameter
*
* Value parameters are passed unchecked between normal and secure world.
@@ -143,13 +165,15 @@ struct optee_msg_param_value {
* @attr: attributes
* @tmem: parameter by temporary memory reference
* @rmem: parameter by registered memory reference
+ * @fmem: parameter by ffa registered memory reference
* @value: parameter by opaque value
* @octets: parameter by octet string
*
* @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
* the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value or octets,
* OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
- * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem,
+ * OPTEE_MSG_ATTR_TYPE_RMEM_* or the alias PTEE_MSG_ATTR_TYPE_FMEM_* indicates
+ * @rmem or @fmem depending on the conduit.
* OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
*/
struct optee_msg_param {
@@ -157,6 +181,7 @@ struct optee_msg_param {
union {
struct optee_msg_param_tmem tmem;
struct optee_msg_param_rmem rmem;
+ struct optee_msg_param_fmem fmem;
struct optee_msg_param_value value;
u8 octets[24];
} u;
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index f6bb4a763ba9..6660e05298db 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -1,17 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2015-2021, Linaro Limited
*/
#ifndef OPTEE_PRIVATE_H
#define OPTEE_PRIVATE_H
#include <linux/arm-smccc.h>
+#include <linux/rhashtable.h>
#include <linux/semaphore.h>
#include <linux/tee_drv.h>
#include <linux/types.h>
#include "optee_msg.h"
+#define DRIVER_NAME "optee"
+
#define OPTEE_MAX_ARG_SIZE 1024
/* Some Global Platform error codes used in this driver */
@@ -20,6 +23,7 @@
#define TEEC_ERROR_NOT_SUPPORTED 0xFFFF000A
#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
+#define TEEC_ERROR_BUSY 0xFFFF000D
#define TEEC_ERROR_SHORT_BUFFER 0xFFFF0010
#define TEEC_ORIGIN_COMMS 0x00000002
@@ -29,6 +33,11 @@ typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long,
struct arm_smccc_res *);
+struct optee_call_waiter {
+ struct list_head list_node;
+ struct completion c;
+};
+
struct optee_call_queue {
/* Serializes access to this struct */
struct mutex mutex;
@@ -66,19 +75,65 @@ struct optee_supp {
struct completion reqs_c;
};
+struct optee_smc {
+ optee_invoke_fn *invoke_fn;
+ void *memremaped_shm;
+ u32 sec_caps;
+};
+
+/**
+ * struct optee_ffa_data - FFA communication struct
+ * @ffa_dev FFA device, contains the destination id, the id of
+ * OP-TEE in secure world
+ * @ffa_ops FFA operations
+ * @mutex Serializes access to @global_ids
+ * @global_ids FF-A shared memory global handle translation
+ */
+struct optee_ffa {
+ struct ffa_device *ffa_dev;
+ const struct ffa_dev_ops *ffa_ops;
+ /* Serializes access to @global_ids */
+ struct mutex mutex;
+ struct rhashtable global_ids;
+};
+
+struct optee;
+
+/**
+ * struct optee_ops - OP-TEE driver internal operations
+ * @do_call_with_arg: enters OP-TEE in secure world
+ * @to_msg_param: converts from struct tee_param to OPTEE_MSG parameters
+ * @from_msg_param: converts from OPTEE_MSG parameters to struct tee_param
+ *
+ * These OPs are only supposed to be used internally in the OP-TEE driver
+ * as a way of abstracting the different methogs of entering OP-TEE in
+ * secure world.
+ */
+struct optee_ops {
+ int (*do_call_with_arg)(struct tee_context *ctx,
+ struct tee_shm *shm_arg);
+ int (*to_msg_param)(struct optee *optee,
+ struct optee_msg_param *msg_params,
+ size_t num_params, const struct tee_param *params);
+ int (*from_msg_param)(struct optee *optee, struct tee_param *params,
+ size_t num_params,
+ const struct optee_msg_param *msg_params);
+};
+
/**
* struct optee - main service struct
* @supp_teedev: supplicant device
+ * @ops: internal callbacks for different ways to reach secure
+ * world
* @teedev: client device
- * @invoke_fn: function to issue smc or hvc
+ * @smc: specific to SMC ABI
+ * @ffa: specific to FF-A ABI
* @call_queue: queue of threads waiting to call @invoke_fn
* @wait_queue: queue of threads from secure world waiting for a
* secure world sync object
* @supp: supplicant synchronization struct for RPC to supplicant
* @pool: shared memory pool
- * @memremaped_shm virtual address of memory in shared memory pool
- * @sec_caps: secure world capabilities defined by
- * OPTEE_SMC_SEC_CAP_* in optee_smc.h
+ * @rpc_arg_count: If > 0 number of RPC parameters to make room for
* @scan_bus_done flag if device registation was already done.
* @scan_bus_wq workqueue to scan optee bus and register optee drivers
* @scan_bus_work workq to scan optee bus and register optee drivers
@@ -86,13 +141,16 @@ struct optee_supp {
struct optee {
struct tee_device *supp_teedev;
struct tee_device *teedev;
- optee_invoke_fn *invoke_fn;
+ const struct optee_ops *ops;
+ union {
+ struct optee_smc smc;
+ struct optee_ffa ffa;
+ };
struct optee_call_queue call_queue;
struct optee_wait_queue wait_queue;
struct optee_supp supp;
struct tee_shm_pool *pool;
- void *memremaped_shm;
- u32 sec_caps;
+ unsigned int rpc_arg_count;
bool scan_bus_done;
struct workqueue_struct *scan_bus_wq;
struct work_struct scan_bus_work;
@@ -127,10 +185,6 @@ struct optee_call_ctx {
size_t num_entries;
};
-void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
- struct optee_call_ctx *call_ctx);
-void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx);
-
void optee_wait_queue_init(struct optee_wait_queue *wq);
void optee_wait_queue_exit(struct optee_wait_queue *wq);
@@ -148,43 +202,68 @@ int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
struct tee_param *param);
-u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg);
int optee_open_session(struct tee_context *ctx,
struct tee_ioctl_open_session_arg *arg,
struct tee_param *param);
+int optee_close_session_helper(struct tee_context *ctx, u32 session);
int optee_close_session(struct tee_context *ctx, u32 session);
int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
struct tee_param *param);
int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
-void optee_enable_shm_cache(struct optee *optee);
-void optee_disable_shm_cache(struct optee *optee);
-void optee_disable_unmapped_shm_cache(struct optee *optee);
+#define PTA_CMD_GET_DEVICES 0x0
+#define PTA_CMD_GET_DEVICES_SUPP 0x1
+int optee_enumerate_devices(u32 func);
+void optee_unregister_devices(void);
-int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
- struct page **pages, size_t num_pages,
- unsigned long start);
-int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm);
+int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm, size_t size,
+ int (*shm_register)(struct tee_context *ctx,
+ struct tee_shm *shm,
+ struct page **pages,
+ size_t num_pages,
+ unsigned long start));
-int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
- struct page **pages, size_t num_pages,
- unsigned long start);
-int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm);
-int optee_from_msg_param(struct tee_param *params, size_t num_params,
- const struct optee_msg_param *msg_params);
-int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
- const struct tee_param *params);
+void optee_remove_common(struct optee *optee);
+int optee_open(struct tee_context *ctx, bool cap_memref_null);
+void optee_release(struct tee_context *ctx);
+void optee_release_supp(struct tee_context *ctx);
-u64 *optee_allocate_pages_list(size_t num_entries);
-void optee_free_pages_list(void *array, size_t num_entries);
-void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
- size_t page_offset);
+static inline void optee_from_msg_param_value(struct tee_param *p, u32 attr,
+ const struct optee_msg_param *mp)
+{
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ p->u.value.a = mp->u.value.a;
+ p->u.value.b = mp->u.value.b;
+ p->u.value.c = mp->u.value.c;
+}
-#define PTA_CMD_GET_DEVICES 0x0
-#define PTA_CMD_GET_DEVICES_SUPP 0x1
-int optee_enumerate_devices(u32 func);
-void optee_unregister_devices(void);
+static inline void optee_to_msg_param_value(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ mp->u.value.a = p->u.value.a;
+ mp->u.value.b = p->u.value.b;
+ mp->u.value.c = p->u.value.c;
+}
+
+void optee_cq_wait_init(struct optee_call_queue *cq,
+ struct optee_call_waiter *w);
+void optee_cq_wait_for_completion(struct optee_call_queue *cq,
+ struct optee_call_waiter *w);
+void optee_cq_wait_final(struct optee_call_queue *cq,
+ struct optee_call_waiter *w);
+int optee_check_mem_type(unsigned long start, size_t num_pages);
+struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params,
+ struct optee_msg_arg **msg_arg);
+
+struct tee_shm *optee_rpc_cmd_alloc_suppl(struct tee_context *ctx, size_t sz);
+void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm);
+void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
+ struct optee_msg_arg *arg);
/*
* Small helpers
@@ -201,4 +280,10 @@ static inline void reg_pair_from_64(u32 *reg0, u32 *reg1, u64 val)
*reg1 = val;
}
+/* Registration of the ABIs */
+int optee_smc_abi_register(void);
+void optee_smc_abi_unregister(void);
+int optee_ffa_abi_register(void);
+void optee_ffa_abi_unregister(void);
+
#endif /*OPTEE_PRIVATE_H*/
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
index efbaff7ad7e5..cd642e340eaf 100644
--- a/drivers/tee/optee/rpc.c
+++ b/drivers/tee/optee/rpc.c
@@ -1,17 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015-2016, Linaro Limited
+ * Copyright (c) 2015-2021, Linaro Limited
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
-#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include "optee_private.h"
-#include "optee_smc.h"
#include "optee_rpc_cmd.h"
struct wq_entry {
@@ -55,6 +53,7 @@ bad:
static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
struct optee_msg_arg *arg)
{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
struct tee_param *params;
struct i2c_adapter *adapter;
struct i2c_msg msg = { };
@@ -79,7 +78,8 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
return;
}
- if (optee_from_msg_param(params, arg->num_params, arg->params))
+ if (optee->ops->from_msg_param(optee, params, arg->num_params,
+ arg->params))
goto bad;
for (i = 0; i < arg->num_params; i++) {
@@ -122,7 +122,8 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
arg->ret = TEEC_ERROR_COMMUNICATION;
} else {
params[3].u.value.a = msg.len;
- if (optee_to_msg_param(arg->params, arg->num_params, params))
+ if (optee->ops->to_msg_param(optee, arg->params,
+ arg->num_params, params))
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
else
arg->ret = TEEC_SUCCESS;
@@ -234,7 +235,7 @@ bad:
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
}
-static void handle_rpc_supp_cmd(struct tee_context *ctx,
+static void handle_rpc_supp_cmd(struct tee_context *ctx, struct optee *optee,
struct optee_msg_arg *arg)
{
struct tee_param *params;
@@ -248,20 +249,22 @@ static void handle_rpc_supp_cmd(struct tee_context *ctx,
return;
}
- if (optee_from_msg_param(params, arg->num_params, arg->params)) {
+ if (optee->ops->from_msg_param(optee, params, arg->num_params,
+ arg->params)) {
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
goto out;
}
arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params);
- if (optee_to_msg_param(arg->params, arg->num_params, params))
+ if (optee->ops->to_msg_param(optee, arg->params, arg->num_params,
+ params))
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
out:
kfree(params);
}
-static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
+struct tee_shm *optee_rpc_cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
{
u32 ret;
struct tee_param param;
@@ -284,103 +287,7 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
return shm;
}
-static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
- struct optee_msg_arg *arg,
- struct optee_call_ctx *call_ctx)
-{
- phys_addr_t pa;
- struct tee_shm *shm;
- size_t sz;
- size_t n;
-
- arg->ret_origin = TEEC_ORIGIN_COMMS;
-
- if (!arg->num_params ||
- arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
- return;
- }
-
- for (n = 1; n < arg->num_params; n++) {
- if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
- return;
- }
- }
-
- sz = arg->params[0].u.value.b;
- switch (arg->params[0].u.value.a) {
- case OPTEE_RPC_SHM_TYPE_APPL:
- shm = cmd_alloc_suppl(ctx, sz);
- break;
- case OPTEE_RPC_SHM_TYPE_KERNEL:
- shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
- break;
- default:
- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
- return;
- }
-
- if (IS_ERR(shm)) {
- arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
- return;
- }
-
- if (tee_shm_get_pa(shm, 0, &pa)) {
- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
- goto bad;
- }
-
- sz = tee_shm_get_size(shm);
-
- if (tee_shm_is_registered(shm)) {
- struct page **pages;
- u64 *pages_list;
- size_t page_num;
-
- pages = tee_shm_get_pages(shm, &page_num);
- if (!pages || !page_num) {
- arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
- goto bad;
- }
-
- pages_list = optee_allocate_pages_list(page_num);
- if (!pages_list) {
- arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
- goto bad;
- }
-
- call_ctx->pages_list = pages_list;
- call_ctx->num_entries = page_num;
-
- arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
- OPTEE_MSG_ATTR_NONCONTIG;
- /*
- * In the least bits of u.tmem.buf_ptr we store buffer offset
- * from 4k page, as described in OP-TEE ABI.
- */
- arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
- (tee_shm_get_page_offset(shm) &
- (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
- arg->params[0].u.tmem.size = tee_shm_get_size(shm);
- arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
-
- optee_fill_pages_list(pages_list, pages, page_num,
- tee_shm_get_page_offset(shm));
- } else {
- arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
- arg->params[0].u.tmem.buf_ptr = pa;
- arg->params[0].u.tmem.size = sz;
- arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
- }
-
- arg->ret = TEEC_SUCCESS;
- return;
-bad:
- tee_shm_free(shm);
-}
-
-static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
+void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
{
struct tee_param param;
@@ -405,60 +312,9 @@ static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
optee_supp_thrd_req(ctx, OPTEE_RPC_CMD_SHM_FREE, 1, &param);
}
-static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
- struct optee_msg_arg *arg)
-{
- struct tee_shm *shm;
-
- arg->ret_origin = TEEC_ORIGIN_COMMS;
-
- if (arg->num_params != 1 ||
- arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
- return;
- }
-
- shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
- switch (arg->params[0].u.value.a) {
- case OPTEE_RPC_SHM_TYPE_APPL:
- cmd_free_suppl(ctx, shm);
- break;
- case OPTEE_RPC_SHM_TYPE_KERNEL:
- tee_shm_free(shm);
- break;
- default:
- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
- }
- arg->ret = TEEC_SUCCESS;
-}
-
-static void free_pages_list(struct optee_call_ctx *call_ctx)
-{
- if (call_ctx->pages_list) {
- optee_free_pages_list(call_ctx->pages_list,
- call_ctx->num_entries);
- call_ctx->pages_list = NULL;
- call_ctx->num_entries = 0;
- }
-}
-
-void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
-{
- free_pages_list(call_ctx);
-}
-
-static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
- struct tee_shm *shm,
- struct optee_call_ctx *call_ctx)
+void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
+ struct optee_msg_arg *arg)
{
- struct optee_msg_arg *arg;
-
- arg = tee_shm_get_va(shm, 0);
- if (IS_ERR(arg)) {
- pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
- return;
- }
-
switch (arg->cmd) {
case OPTEE_RPC_CMD_GET_TIME:
handle_rpc_func_cmd_get_time(arg);
@@ -469,73 +325,12 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
case OPTEE_RPC_CMD_SUSPEND:
handle_rpc_func_cmd_wait(arg);
break;
- case OPTEE_RPC_CMD_SHM_ALLOC:
- free_pages_list(call_ctx);
- handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
- break;
- case OPTEE_RPC_CMD_SHM_FREE:
- handle_rpc_func_cmd_shm_free(ctx, arg);
- break;
case OPTEE_RPC_CMD_I2C_TRANSFER:
handle_rpc_func_cmd_i2c_transfer(ctx, arg);
break;
default:
- handle_rpc_supp_cmd(ctx, arg);
+ handle_rpc_supp_cmd(ctx, optee, arg);
}
}
-/**
- * optee_handle_rpc() - handle RPC from secure world
- * @ctx: context doing the RPC
- * @param: value of registers for the RPC
- * @call_ctx: call context. Preserved during one OP-TEE invocation
- *
- * Result of RPC is written back into @param.
- */
-void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
- struct optee_call_ctx *call_ctx)
-{
- struct tee_device *teedev = ctx->teedev;
- struct optee *optee = tee_get_drvdata(teedev);
- struct tee_shm *shm;
- phys_addr_t pa;
-
- switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
- case OPTEE_SMC_RPC_FUNC_ALLOC:
- shm = tee_shm_alloc(ctx, param->a1,
- TEE_SHM_MAPPED | TEE_SHM_PRIV);
- if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
- reg_pair_from_64(&param->a1, &param->a2, pa);
- reg_pair_from_64(&param->a4, &param->a5,
- (unsigned long)shm);
- } else {
- param->a1 = 0;
- param->a2 = 0;
- param->a4 = 0;
- param->a5 = 0;
- }
- break;
- case OPTEE_SMC_RPC_FUNC_FREE:
- shm = reg_pair_to_ptr(param->a1, param->a2);
- tee_shm_free(shm);
- break;
- case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
- /*
- * A foreign interrupt was raised while secure world was
- * executing, since they are handled in Linux a dummy RPC is
- * performed to let Linux take the interrupt through the normal
- * vector.
- */
- break;
- case OPTEE_SMC_RPC_FUNC_CMD:
- shm = reg_pair_to_ptr(param->a1, param->a2);
- handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
- break;
- default:
- pr_warn("Unknown RPC func 0x%x\n",
- (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
- break;
- }
- param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
-}
diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c
deleted file mode 100644
index d167039af519..000000000000
--- a/drivers/tee/optee/shm_pool.c
+++ /dev/null
@@ -1,101 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015, Linaro Limited
- * Copyright (c) 2017, EPAM Systems
- */
-#include <linux/device.h>
-#include <linux/dma-buf.h>
-#include <linux/genalloc.h>
-#include <linux/slab.h>
-#include <linux/tee_drv.h>
-#include "optee_private.h"
-#include "optee_smc.h"
-#include "shm_pool.h"
-
-static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
- struct tee_shm *shm, size_t size)
-{
- unsigned int order = get_order(size);
- struct page *page;
- int rc = 0;
-
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!page)
- return -ENOMEM;
-
- shm->kaddr = page_address(page);
- shm->paddr = page_to_phys(page);
- shm->size = PAGE_SIZE << order;
-
- /*
- * Shared memory private to the OP-TEE driver doesn't need
- * to be registered with OP-TEE.
- */
- if (!(shm->flags & TEE_SHM_PRIV)) {
- unsigned int nr_pages = 1 << order, i;
- struct page **pages;
-
- pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
- if (!pages) {
- rc = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < nr_pages; i++) {
- pages[i] = page;
- page++;
- }
-
- shm->flags |= TEE_SHM_REGISTER;
- rc = optee_shm_register(shm->ctx, shm, pages, nr_pages,
- (unsigned long)shm->kaddr);
- kfree(pages);
- if (rc)
- goto err;
- }
-
- return 0;
-
-err:
- __free_pages(page, order);
- return rc;
-}
-
-static void pool_op_free(struct tee_shm_pool_mgr *poolm,
- struct tee_shm *shm)
-{
- if (!(shm->flags & TEE_SHM_PRIV))
- optee_shm_unregister(shm->ctx, shm);
-
- free_pages((unsigned long)shm->kaddr, get_order(shm->size));
- shm->kaddr = NULL;
-}
-
-static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
-{
- kfree(poolm);
-}
-
-static const struct tee_shm_pool_mgr_ops pool_ops = {
- .alloc = pool_op_alloc,
- .free = pool_op_free,
- .destroy_poolmgr = pool_op_destroy_poolmgr,
-};
-
-/**
- * optee_shm_pool_alloc_pages() - create page-based allocator pool
- *
- * This pool is used when OP-TEE supports dymanic SHM. In this case
- * command buffers and such are allocated from kernel's own memory.
- */
-struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
-{
- struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
-
- if (!mgr)
- return ERR_PTR(-ENOMEM);
-
- mgr->ops = &pool_ops;
-
- return mgr;
-}
diff --git a/drivers/tee/optee/shm_pool.h b/drivers/tee/optee/shm_pool.h
deleted file mode 100644
index 28109d991c4b..000000000000
--- a/drivers/tee/optee/shm_pool.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015, Linaro Limited
- * Copyright (c) 2016, EPAM Systems
- */
-
-#ifndef SHM_POOL_H
-#define SHM_POOL_H
-
-#include <linux/tee_drv.h>
-
-struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void);
-
-#endif
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
new file mode 100644
index 000000000000..6196d7c3888f
--- /dev/null
+++ b/drivers/tee/optee/smc_abi.c
@@ -0,0 +1,1362 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ * Copyright (c) 2016, EPAM Systems
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include "optee_private.h"
+#include "optee_smc.h"
+#include "optee_rpc_cmd.h"
+#define CREATE_TRACE_POINTS
+#include "optee_trace.h"
+
+/*
+ * This file implement the SMC ABI used when communicating with secure world
+ * OP-TEE OS via raw SMCs.
+ * This file is divided into the following sections:
+ * 1. Convert between struct tee_param and struct optee_msg_param
+ * 2. Low level support functions to register shared memory in secure world
+ * 3. Dynamic shared memory pool based on alloc_pages()
+ * 4. Do a normal scheduled call into secure world
+ * 5. Driver initialization.
+ */
+
+#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
+
+/*
+ * 1. Convert between struct tee_param and struct optee_msg_param
+ *
+ * optee_from_msg_param() and optee_to_msg_param() are the main
+ * functions.
+ */
+
+static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
+ const struct optee_msg_param *mp)
+{
+ struct tee_shm *shm;
+ phys_addr_t pa;
+ int rc;
+
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
+ p->u.memref.size = mp->u.tmem.size;
+ shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref;
+ if (!shm) {
+ p->u.memref.shm_offs = 0;
+ p->u.memref.shm = NULL;
+ return 0;
+ }
+
+ rc = tee_shm_get_pa(shm, 0, &pa);
+ if (rc)
+ return rc;
+
+ p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
+ p->u.memref.shm = shm;
+
+ /* Check that the memref is covered by the shm object */
+ if (p->u.memref.size) {
+ size_t o = p->u.memref.shm_offs +
+ p->u.memref.size - 1;
+
+ rc = tee_shm_get_pa(shm, o, NULL);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static void from_msg_param_reg_mem(struct tee_param *p, u32 attr,
+ const struct optee_msg_param *mp)
+{
+ struct tee_shm *shm;
+
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+ p->u.memref.size = mp->u.rmem.size;
+ shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref;
+
+ if (shm) {
+ p->u.memref.shm_offs = mp->u.rmem.offs;
+ p->u.memref.shm = shm;
+ } else {
+ p->u.memref.shm_offs = 0;
+ p->u.memref.shm = NULL;
+ }
+}
+
+/**
+ * optee_from_msg_param() - convert from OPTEE_MSG parameters to
+ * struct tee_param
+ * @optee: main service struct
+ * @params: subsystem internal parameter representation
+ * @num_params: number of elements in the parameter arrays
+ * @msg_params: OPTEE_MSG parameters
+ * Returns 0 on success or <0 on failure
+ */
+static int optee_from_msg_param(struct optee *optee, struct tee_param *params,
+ size_t num_params,
+ const struct optee_msg_param *msg_params)
+{
+ int rc;
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_param *p = params + n;
+ const struct optee_msg_param *mp = msg_params + n;
+ u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
+
+ switch (attr) {
+ case OPTEE_MSG_ATTR_TYPE_NONE:
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&p->u, 0, sizeof(p->u));
+ break;
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+ optee_from_msg_param_value(p, attr, mp);
+ break;
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
+ rc = from_msg_param_tmp_mem(p, attr, mp);
+ if (rc)
+ return rc;
+ break;
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
+ from_msg_param_reg_mem(p, attr, mp);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ int rc;
+ phys_addr_t pa;
+
+ mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+ mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
+ mp->u.tmem.size = p->u.memref.size;
+
+ if (!p->u.memref.shm) {
+ mp->u.tmem.buf_ptr = 0;
+ return 0;
+ }
+
+ rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
+ if (rc)
+ return rc;
+
+ mp->u.tmem.buf_ptr = pa;
+ mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
+ OPTEE_MSG_ATTR_CACHE_SHIFT;
+
+ return 0;
+}
+
+static int to_msg_param_reg_mem(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+ mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
+ mp->u.rmem.size = p->u.memref.size;
+ mp->u.rmem.offs = p->u.memref.shm_offs;
+ return 0;
+}
+
+/**
+ * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
+ * @optee: main service struct
+ * @msg_params: OPTEE_MSG parameters
+ * @num_params: number of elements in the parameter arrays
+ * @params: subsystem itnernal parameter representation
+ * Returns 0 on success or <0 on failure
+ */
+static int optee_to_msg_param(struct optee *optee,
+ struct optee_msg_param *msg_params,
+ size_t num_params, const struct tee_param *params)
+{
+ int rc;
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ const struct tee_param *p = params + n;
+ struct optee_msg_param *mp = msg_params + n;
+
+ switch (p->attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
+ mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&mp->u, 0, sizeof(mp->u));
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ optee_to_msg_param_value(mp, p);
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ if (tee_shm_is_registered(p->u.memref.shm))
+ rc = to_msg_param_reg_mem(mp, p);
+ else
+ rc = to_msg_param_tmp_mem(mp, p);
+ if (rc)
+ return rc;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/*
+ * 2. Low level support functions to register shared memory in secure world
+ *
+ * Functions to enable/disable shared memory caching in secure world, that
+ * is, lazy freeing of previously allocated shared memory. Freeing is
+ * performed when a request has been compled.
+ *
+ * Functions to register and unregister shared memory both for normal
+ * clients and for tee-supplicant.
+ */
+
+/**
+ * optee_enable_shm_cache() - Enables caching of some shared memory allocation
+ * in OP-TEE
+ * @optee: main service struct
+ */
+static void optee_enable_shm_cache(struct optee *optee)
+{
+ struct optee_call_waiter w;
+
+ /* We need to retry until secure world isn't busy. */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ struct arm_smccc_res res;
+
+ optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE,
+ 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 == OPTEE_SMC_RETURN_OK)
+ break;
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ }
+ optee_cq_wait_final(&optee->call_queue, &w);
+}
+
+/**
+ * __optee_disable_shm_cache() - Disables caching of some shared memory
+ * allocation in OP-TEE
+ * @optee: main service struct
+ * @is_mapped: true if the cached shared memory addresses were mapped by this
+ * kernel, are safe to dereference, and should be freed
+ */
+static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
+{
+ struct optee_call_waiter w;
+
+ /* We need to retry until secure world isn't busy. */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_disable_shm_cache_result result;
+ } res;
+
+ optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE,
+ 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+ if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
+ break; /* All shm's freed */
+ if (res.result.status == OPTEE_SMC_RETURN_OK) {
+ struct tee_shm *shm;
+
+ /*
+ * Shared memory references that were not mapped by
+ * this kernel must be ignored to prevent a crash.
+ */
+ if (!is_mapped)
+ continue;
+
+ shm = reg_pair_to_ptr(res.result.shm_upper32,
+ res.result.shm_lower32);
+ tee_shm_free(shm);
+ } else {
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ }
+ }
+ optee_cq_wait_final(&optee->call_queue, &w);
+}
+
+/**
+ * optee_disable_shm_cache() - Disables caching of mapped shared memory
+ * allocations in OP-TEE
+ * @optee: main service struct
+ */
+static void optee_disable_shm_cache(struct optee *optee)
+{
+ return __optee_disable_shm_cache(optee, true);
+}
+
+/**
+ * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
+ * allocations in OP-TEE which are not
+ * currently mapped
+ * @optee: main service struct
+ */
+static void optee_disable_unmapped_shm_cache(struct optee *optee)
+{
+ return __optee_disable_shm_cache(optee, false);
+}
+
+#define PAGELIST_ENTRIES_PER_PAGE \
+ ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
+
+/*
+ * The final entry in each pagelist page is a pointer to the next
+ * pagelist page.
+ */
+static size_t get_pages_list_size(size_t num_entries)
+{
+ int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
+
+ return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
+}
+
+static u64 *optee_allocate_pages_list(size_t num_entries)
+{
+ return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
+}
+
+static void optee_free_pages_list(void *list, size_t num_entries)
+{
+ free_pages_exact(list, get_pages_list_size(num_entries));
+}
+
+/**
+ * optee_fill_pages_list() - write list of user pages to given shared
+ * buffer.
+ *
+ * @dst: page-aligned buffer where list of pages will be stored
+ * @pages: array of pages that represents shared buffer
+ * @num_pages: number of entries in @pages
+ * @page_offset: offset of user buffer from page start
+ *
+ * @dst should be big enough to hold list of user page addresses and
+ * links to the next pages of buffer
+ */
+static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
+ size_t page_offset)
+{
+ int n = 0;
+ phys_addr_t optee_page;
+ /*
+ * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
+ * for details.
+ */
+ struct {
+ u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
+ u64 next_page_data;
+ } *pages_data;
+
+ /*
+ * Currently OP-TEE uses 4k page size and it does not looks
+ * like this will change in the future. On other hand, there are
+ * no know ARM architectures with page size < 4k.
+ * Thus the next built assert looks redundant. But the following
+ * code heavily relies on this assumption, so it is better be
+ * safe than sorry.
+ */
+ BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+ pages_data = (void *)dst;
+ /*
+ * If linux page is bigger than 4k, and user buffer offset is
+ * larger than 4k/8k/12k/etc this will skip first 4k pages,
+ * because they bear no value data for OP-TEE.
+ */
+ optee_page = page_to_phys(*pages) +
+ round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+ while (true) {
+ pages_data->pages_list[n++] = optee_page;
+
+ if (n == PAGELIST_ENTRIES_PER_PAGE) {
+ pages_data->next_page_data =
+ virt_to_phys(pages_data + 1);
+ pages_data++;
+ n = 0;
+ }
+
+ optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
+ if (!(optee_page & ~PAGE_MASK)) {
+ if (!--num_pages)
+ break;
+ pages++;
+ optee_page = page_to_phys(*pages);
+ }
+ }
+}
+
+static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm_arg;
+ u64 *pages_list;
+ int rc;
+
+ if (!num_pages)
+ return -EINVAL;
+
+ rc = optee_check_mem_type(start, num_pages);
+ if (rc)
+ return rc;
+
+ pages_list = optee_allocate_pages_list(num_pages);
+ if (!pages_list)
+ return -ENOMEM;
+
+ shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
+ if (IS_ERR(shm_arg)) {
+ rc = PTR_ERR(shm_arg);
+ goto out;
+ }
+
+ optee_fill_pages_list(pages_list, pages, num_pages,
+ tee_shm_get_page_offset(shm));
+
+ msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
+ msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
+ OPTEE_MSG_ATTR_NONCONTIG;
+ msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
+ msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
+ /*
+ * In the least bits of msg_arg->params->u.tmem.buf_ptr we
+ * store buffer offset from 4k page, as described in OP-TEE ABI.
+ */
+ msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
+ (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
+
+ if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
+ msg_arg->ret != TEEC_SUCCESS)
+ rc = -EINVAL;
+
+ tee_shm_free(shm_arg);
+out:
+ optee_free_pages_list(pages_list, num_pages);
+ return rc;
+}
+
+static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm_arg;
+ int rc = 0;
+
+ shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
+ if (IS_ERR(shm_arg))
+ return PTR_ERR(shm_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
+
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+ msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
+
+ if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
+ msg_arg->ret != TEEC_SUCCESS)
+ rc = -EINVAL;
+ tee_shm_free(shm_arg);
+ return rc;
+}
+
+static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ /*
+ * We don't want to register supplicant memory in OP-TEE.
+ * Instead information about it will be passed in RPC code.
+ */
+ return optee_check_mem_type(start, num_pages);
+}
+
+static int optee_shm_unregister_supp(struct tee_context *ctx,
+ struct tee_shm *shm)
+{
+ return 0;
+}
+
+/*
+ * 3. Dynamic shared memory pool based on alloc_pages()
+ *
+ * Implements an OP-TEE specific shared memory pool which is used
+ * when dynamic shared memory is supported by secure world.
+ *
+ * The main function is optee_shm_pool_alloc_pages().
+ */
+
+static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm, size_t size)
+{
+ /*
+ * Shared memory private to the OP-TEE driver doesn't need
+ * to be registered with OP-TEE.
+ */
+ if (shm->flags & TEE_SHM_PRIV)
+ return optee_pool_op_alloc_helper(poolm, shm, size, NULL);
+
+ return optee_pool_op_alloc_helper(poolm, shm, size, optee_shm_register);
+}
+
+static void pool_op_free(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm)
+{
+ if (!(shm->flags & TEE_SHM_PRIV))
+ optee_shm_unregister(shm->ctx, shm);
+
+ free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+ shm->kaddr = NULL;
+}
+
+static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+{
+ kfree(poolm);
+}
+
+static const struct tee_shm_pool_mgr_ops pool_ops = {
+ .alloc = pool_op_alloc,
+ .free = pool_op_free,
+ .destroy_poolmgr = pool_op_destroy_poolmgr,
+};
+
+/**
+ * optee_shm_pool_alloc_pages() - create page-based allocator pool
+ *
+ * This pool is used when OP-TEE supports dymanic SHM. In this case
+ * command buffers and such are allocated from kernel's own memory.
+ */
+static struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
+{
+ struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ mgr->ops = &pool_ops;
+
+ return mgr;
+}
+
+/*
+ * 4. Do a normal scheduled call into secure world
+ *
+ * The function optee_smc_do_call_with_arg() performs a normal scheduled
+ * call into secure world. During this call may normal world request help
+ * from normal world using RPCs, Remote Procedure Calls. This includes
+ * delivery of non-secure interrupts to for instance allow rescheduling of
+ * the current task.
+ */
+
+static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ struct tee_shm *shm;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->num_params != 1 ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_SHM_TYPE_APPL:
+ optee_rpc_cmd_free_suppl(ctx, shm);
+ break;
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
+ tee_shm_free(shm);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ }
+ arg->ret = TEEC_SUCCESS;
+}
+
+static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee_msg_arg *arg,
+ struct optee_call_ctx *call_ctx)
+{
+ phys_addr_t pa;
+ struct tee_shm *shm;
+ size_t sz;
+ size_t n;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (!arg->num_params ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ for (n = 1; n < arg->num_params; n++) {
+ if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+ }
+
+ sz = arg->params[0].u.value.b;
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_SHM_TYPE_APPL:
+ shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
+ break;
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
+ shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ if (IS_ERR(shm)) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+
+ if (tee_shm_get_pa(shm, 0, &pa)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto bad;
+ }
+
+ sz = tee_shm_get_size(shm);
+
+ if (tee_shm_is_registered(shm)) {
+ struct page **pages;
+ u64 *pages_list;
+ size_t page_num;
+
+ pages = tee_shm_get_pages(shm, &page_num);
+ if (!pages || !page_num) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ goto bad;
+ }
+
+ pages_list = optee_allocate_pages_list(page_num);
+ if (!pages_list) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ goto bad;
+ }
+
+ call_ctx->pages_list = pages_list;
+ call_ctx->num_entries = page_num;
+
+ arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
+ OPTEE_MSG_ATTR_NONCONTIG;
+ /*
+ * In the least bits of u.tmem.buf_ptr we store buffer offset
+ * from 4k page, as described in OP-TEE ABI.
+ */
+ arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
+ (tee_shm_get_page_offset(shm) &
+ (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
+ arg->params[0].u.tmem.size = tee_shm_get_size(shm);
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+
+ optee_fill_pages_list(pages_list, pages, page_num,
+ tee_shm_get_page_offset(shm));
+ } else {
+ arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
+ arg->params[0].u.tmem.buf_ptr = pa;
+ arg->params[0].u.tmem.size = sz;
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+ }
+
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ tee_shm_free(shm);
+}
+
+static void free_pages_list(struct optee_call_ctx *call_ctx)
+{
+ if (call_ctx->pages_list) {
+ optee_free_pages_list(call_ctx->pages_list,
+ call_ctx->num_entries);
+ call_ctx->pages_list = NULL;
+ call_ctx->num_entries = 0;
+ }
+}
+
+static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
+{
+ free_pages_list(call_ctx);
+}
+
+static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
+ struct tee_shm *shm,
+ struct optee_call_ctx *call_ctx)
+{
+ struct optee_msg_arg *arg;
+
+ arg = tee_shm_get_va(shm, 0);
+ if (IS_ERR(arg)) {
+ pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
+ return;
+ }
+
+ switch (arg->cmd) {
+ case OPTEE_RPC_CMD_SHM_ALLOC:
+ free_pages_list(call_ctx);
+ handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
+ break;
+ case OPTEE_RPC_CMD_SHM_FREE:
+ handle_rpc_func_cmd_shm_free(ctx, arg);
+ break;
+ default:
+ optee_rpc_cmd(ctx, optee, arg);
+ }
+}
+
+/**
+ * optee_handle_rpc() - handle RPC from secure world
+ * @ctx: context doing the RPC
+ * @param: value of registers for the RPC
+ * @call_ctx: call context. Preserved during one OP-TEE invocation
+ *
+ * Result of RPC is written back into @param.
+ */
+static void optee_handle_rpc(struct tee_context *ctx,
+ struct optee_rpc_param *param,
+ struct optee_call_ctx *call_ctx)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+ struct tee_shm *shm;
+ phys_addr_t pa;
+
+ switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
+ case OPTEE_SMC_RPC_FUNC_ALLOC:
+ shm = tee_shm_alloc(ctx, param->a1,
+ TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
+ reg_pair_from_64(&param->a1, &param->a2, pa);
+ reg_pair_from_64(&param->a4, &param->a5,
+ (unsigned long)shm);
+ } else {
+ param->a1 = 0;
+ param->a2 = 0;
+ param->a4 = 0;
+ param->a5 = 0;
+ }
+ break;
+ case OPTEE_SMC_RPC_FUNC_FREE:
+ shm = reg_pair_to_ptr(param->a1, param->a2);
+ tee_shm_free(shm);
+ break;
+ case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
+ /*
+ * A foreign interrupt was raised while secure world was
+ * executing, since they are handled in Linux a dummy RPC is
+ * performed to let Linux take the interrupt through the normal
+ * vector.
+ */
+ break;
+ case OPTEE_SMC_RPC_FUNC_CMD:
+ shm = reg_pair_to_ptr(param->a1, param->a2);
+ handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
+ break;
+ default:
+ pr_warn("Unknown RPC func 0x%x\n",
+ (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
+ break;
+ }
+
+ param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
+}
+
+/**
+ * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world
+ * @ctx: calling context
+ * @arg: shared memory holding the message to pass to secure world
+ *
+ * Does and SMC to OP-TEE in secure world and handles eventual resulting
+ * Remote Procedure Calls (RPC) from OP-TEE.
+ *
+ * Returns return code from secure world, 0 is OK
+ */
+static int optee_smc_do_call_with_arg(struct tee_context *ctx,
+ struct tee_shm *arg)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_call_waiter w;
+ struct optee_rpc_param param = { };
+ struct optee_call_ctx call_ctx = { };
+ phys_addr_t parg;
+ int rc;
+
+ rc = tee_shm_get_pa(arg, 0, &parg);
+ if (rc)
+ return rc;
+
+ param.a0 = OPTEE_SMC_CALL_WITH_ARG;
+ reg_pair_from_64(&param.a1, &param.a2, parg);
+ /* Initialize waiter */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ struct arm_smccc_res res;
+
+ trace_optee_invoke_fn_begin(&param);
+ optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3,
+ param.a4, param.a5, param.a6, param.a7,
+ &res);
+ trace_optee_invoke_fn_end(&param, &res);
+
+ if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
+ /*
+ * Out of threads in secure world, wait for a thread
+ * become available.
+ */
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
+ cond_resched();
+ param.a0 = res.a0;
+ param.a1 = res.a1;
+ param.a2 = res.a2;
+ param.a3 = res.a3;
+ optee_handle_rpc(ctx, &param, &call_ctx);
+ } else {
+ rc = res.a0;
+ break;
+ }
+ }
+
+ optee_rpc_finalize_call(&call_ctx);
+ /*
+ * We're done with our thread in secure world, if there's any
+ * thread waiters wake up one.
+ */
+ optee_cq_wait_final(&optee->call_queue, &w);
+
+ return rc;
+}
+
+/*
+ * 5. Driver initialization
+ *
+ * During driver inititialization is secure world probed to find out which
+ * features it supports so the driver can be initialized with a matching
+ * configuration. This involves for instance support for dynamic shared
+ * memory instead of a static memory carvout.
+ */
+
+static void optee_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_OPTEE,
+ .impl_caps = TEE_OPTEE_CAP_TZ,
+ .gen_caps = TEE_GEN_CAP_GP,
+ };
+ struct optee *optee = tee_get_drvdata(teedev);
+
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ v.gen_caps |= TEE_GEN_CAP_REG_MEM;
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
+ v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
+ *vers = v;
+}
+
+static int optee_smc_open(struct tee_context *ctx)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ u32 sec_caps = optee->smc.sec_caps;
+
+ return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL);
+}
+
+static const struct tee_driver_ops optee_clnt_ops = {
+ .get_version = optee_get_version,
+ .open = optee_smc_open,
+ .release = optee_release,
+ .open_session = optee_open_session,
+ .close_session = optee_close_session,
+ .invoke_func = optee_invoke_func,
+ .cancel_req = optee_cancel_req,
+ .shm_register = optee_shm_register,
+ .shm_unregister = optee_shm_unregister,
+};
+
+static const struct tee_desc optee_clnt_desc = {
+ .name = DRIVER_NAME "-clnt",
+ .ops = &optee_clnt_ops,
+ .owner = THIS_MODULE,
+};
+
+static const struct tee_driver_ops optee_supp_ops = {
+ .get_version = optee_get_version,
+ .open = optee_smc_open,
+ .release = optee_release_supp,
+ .supp_recv = optee_supp_recv,
+ .supp_send = optee_supp_send,
+ .shm_register = optee_shm_register_supp,
+ .shm_unregister = optee_shm_unregister_supp,
+};
+
+static const struct tee_desc optee_supp_desc = {
+ .name = DRIVER_NAME "-supp",
+ .ops = &optee_supp_ops,
+ .owner = THIS_MODULE,
+ .flags = TEE_DESC_PRIVILEGED,
+};
+
+static const struct optee_ops optee_ops = {
+ .do_call_with_arg = optee_smc_do_call_with_arg,
+ .to_msg_param = optee_to_msg_param,
+ .from_msg_param = optee_from_msg_param,
+};
+
+static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
+ res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
+ return true;
+ return false;
+}
+
+static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_call_get_os_revision_result result;
+ } res = {
+ .result = {
+ .build_id = 0
+ }
+ };
+
+ invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
+ &res.smccc);
+
+ if (res.result.build_id)
+ pr_info("revision %lu.%lu (%08lx)", res.result.major,
+ res.result.minor, res.result.build_id);
+ else
+ pr_info("revision %lu.%lu", res.result.major, res.result.minor);
+}
+
+static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_calls_revision_result result;
+ } res;
+
+ invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+
+ if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
+ (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
+ return true;
+ return false;
+}
+
+static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
+ u32 *sec_caps)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_exchange_capabilities_result result;
+ } res;
+ u32 a1 = 0;
+
+ /*
+ * TODO This isn't enough to tell if it's UP system (from kernel
+ * point of view) or not, is_smp() returns the information
+ * needed, but can't be called directly from here.
+ */
+ if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
+ a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
+
+ invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
+ &res.smccc);
+
+ if (res.result.status != OPTEE_SMC_RETURN_OK)
+ return false;
+
+ *sec_caps = res.result.capabilities;
+ return true;
+}
+
+static struct tee_shm_pool *optee_config_dyn_shm(void)
+{
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
+
+ rc = optee_shm_pool_alloc_pages();
+ if (IS_ERR(rc))
+ return rc;
+ priv_mgr = rc;
+
+ rc = optee_shm_pool_alloc_pages();
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ return rc;
+ }
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+ }
+
+ return rc;
+}
+
+static struct tee_shm_pool *
+optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_get_shm_config_result result;
+ } res;
+ unsigned long vaddr;
+ phys_addr_t paddr;
+ size_t size;
+ phys_addr_t begin;
+ phys_addr_t end;
+ void *va;
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
+ const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
+
+ invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+ if (res.result.status != OPTEE_SMC_RETURN_OK) {
+ pr_err("static shm service not available\n");
+ return ERR_PTR(-ENOENT);
+ }
+
+ if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
+ pr_err("only normal cached shared memory supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ begin = roundup(res.result.start, PAGE_SIZE);
+ end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
+ paddr = begin;
+ size = end - begin;
+
+ if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
+ pr_err("too small shared memory area\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ va = memremap(paddr, size, MEMREMAP_WB);
+ if (!va) {
+ pr_err("shared memory ioremap failed\n");
+ return ERR_PTR(-EINVAL);
+ }
+ vaddr = (unsigned long)va;
+
+ rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
+ 3 /* 8 bytes aligned */);
+ if (IS_ERR(rc))
+ goto err_memunmap;
+ priv_mgr = rc;
+
+ vaddr += sz;
+ paddr += sz;
+ size -= sz;
+
+ rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
+ if (IS_ERR(rc))
+ goto err_free_priv_mgr;
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc))
+ goto err_free_dmabuf_mgr;
+
+ *memremaped_shm = va;
+
+ return rc;
+
+err_free_dmabuf_mgr:
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+err_free_priv_mgr:
+ tee_shm_pool_mgr_destroy(priv_mgr);
+err_memunmap:
+ memunmap(va);
+ return rc;
+}
+
+/* Simple wrapper functions to be able to use a function pointer */
+static void optee_smccc_smc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res)
+{
+ arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res)
+{
+ arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+static optee_invoke_fn *get_invoke_func(struct device *dev)
+{
+ const char *method;
+
+ pr_info("probing for conduit method.\n");
+
+ if (device_property_read_string(dev, "method", &method)) {
+ pr_warn("missing \"method\" property\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ if (!strcmp("hvc", method))
+ return optee_smccc_hvc;
+ else if (!strcmp("smc", method))
+ return optee_smccc_smc;
+
+ pr_warn("invalid \"method\" property: %s\n", method);
+ return ERR_PTR(-EINVAL);
+}
+
+/* optee_remove - Device Removal Routine
+ * @pdev: platform device information struct
+ *
+ * optee_remove is called by platform subsystem to alert the driver
+ * that it should release the device
+ */
+static int optee_smc_remove(struct platform_device *pdev)
+{
+ struct optee *optee = platform_get_drvdata(pdev);
+
+ /*
+ * Ask OP-TEE to free all cached shared memory objects to decrease
+ * reference counters and also avoid wild pointers in secure world
+ * into the old shared memory range.
+ */
+ optee_disable_shm_cache(optee);
+
+ optee_remove_common(optee);
+
+ if (optee->smc.memremaped_shm)
+ memunmap(optee->smc.memremaped_shm);
+
+ kfree(optee);
+
+ return 0;
+}
+
+/* optee_shutdown - Device Removal Routine
+ * @pdev: platform device information struct
+ *
+ * platform_shutdown is called by the platform subsystem to alert
+ * the driver that a shutdown, reboot, or kexec is happening and
+ * device must be disabled.
+ */
+static void optee_shutdown(struct platform_device *pdev)
+{
+ optee_disable_shm_cache(platform_get_drvdata(pdev));
+}
+
+static int optee_probe(struct platform_device *pdev)
+{
+ optee_invoke_fn *invoke_fn;
+ struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
+ struct optee *optee = NULL;
+ void *memremaped_shm = NULL;
+ struct tee_device *teedev;
+ u32 sec_caps;
+ int rc;
+
+ invoke_fn = get_invoke_func(&pdev->dev);
+ if (IS_ERR(invoke_fn))
+ return PTR_ERR(invoke_fn);
+
+ if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
+ pr_warn("api uid mismatch\n");
+ return -EINVAL;
+ }
+
+ optee_msg_get_os_revision(invoke_fn);
+
+ if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
+ pr_warn("api revision mismatch\n");
+ return -EINVAL;
+ }
+
+ if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
+ pr_warn("capabilities mismatch\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Try to use dynamic shared memory if possible
+ */
+ if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ pool = optee_config_dyn_shm();
+
+ /*
+ * If dynamic shared memory is not available or failed - try static one
+ */
+ if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
+ pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
+
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ optee = kzalloc(sizeof(*optee), GFP_KERNEL);
+ if (!optee) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ optee->ops = &optee_ops;
+ optee->smc.invoke_fn = invoke_fn;
+ optee->smc.sec_caps = sec_caps;
+
+ teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err;
+ }
+ optee->teedev = teedev;
+
+ teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err;
+ }
+ optee->supp_teedev = teedev;
+
+ rc = tee_device_register(optee->teedev);
+ if (rc)
+ goto err;
+
+ rc = tee_device_register(optee->supp_teedev);
+ if (rc)
+ goto err;
+
+ mutex_init(&optee->call_queue.mutex);
+ INIT_LIST_HEAD(&optee->call_queue.waiters);
+ optee_wait_queue_init(&optee->wait_queue);
+ optee_supp_init(&optee->supp);
+ optee->smc.memremaped_shm = memremaped_shm;
+ optee->pool = pool;
+
+ /*
+ * Ensure that there are no pre-existing shm objects before enabling
+ * the shm cache so that there's no chance of receiving an invalid
+ * address during shutdown. This could occur, for example, if we're
+ * kexec booting from an older kernel that did not properly cleanup the
+ * shm cache.
+ */
+ optee_disable_unmapped_shm_cache(optee);
+
+ optee_enable_shm_cache(optee);
+
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ pr_info("dynamic shared memory is enabled\n");
+
+ platform_set_drvdata(pdev, optee);
+
+ rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
+ if (rc) {
+ optee_smc_remove(pdev);
+ return rc;
+ }
+
+ pr_info("initialized driver\n");
+ return 0;
+err:
+ if (optee) {
+ /*
+ * tee_device_unregister() is safe to call even if the
+ * devices hasn't been registered with
+ * tee_device_register() yet.
+ */
+ tee_device_unregister(optee->supp_teedev);
+ tee_device_unregister(optee->teedev);
+ kfree(optee);
+ }
+ if (pool)
+ tee_shm_pool_free(pool);
+ if (memremaped_shm)
+ memunmap(memremaped_shm);
+ return rc;
+}
+
+static const struct of_device_id optee_dt_match[] = {
+ { .compatible = "linaro,optee-tz" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, optee_dt_match);
+
+static struct platform_driver optee_driver = {
+ .probe = optee_probe,
+ .remove = optee_smc_remove,
+ .shutdown = optee_shutdown,
+ .driver = {
+ .name = "optee",
+ .of_match_table = optee_dt_match,
+ },
+};
+
+int optee_smc_abi_register(void)
+{
+ return platform_driver_register(&optee_driver);
+}
+
+void optee_smc_abi_unregister(void)
+{
+ platform_driver_unregister(&optee_driver);
+}
diff --git a/include/dt-bindings/power/qcom-aoss-qmp.h b/include/dt-bindings/power/qcom-aoss-qmp.h
deleted file mode 100644
index ec336d31dee4..000000000000
--- a/include/dt-bindings/power/qcom-aoss-qmp.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Linaro Ltd. */
-
-#ifndef __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H
-#define __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H
-
-#define AOSS_QMP_LS_CDSP 0
-#define AOSS_QMP_LS_LPASS 1
-#define AOSS_QMP_LS_MODEM 2
-#define AOSS_QMP_LS_SLPI 3
-#define AOSS_QMP_LS_SPSS 4
-#define AOSS_QMP_LS_VENUS 5
-
-#endif
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index 4533dbbf9937..960f7976a807 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -20,6 +20,14 @@
#define SDX55_MX 1
#define SDX55_CX 2
+/* SM6350 Power Domain Indexes */
+#define SM6350_CX 0
+#define SM6350_GFX 1
+#define SM6350_LCX 2
+#define SM6350_LMX 3
+#define SM6350_MSS 4
+#define SM6350_MX 5
+
/* SM8150 Power Domain Indexes */
#define SM8150_MSS 0
#define SM8150_EBI 1
@@ -133,6 +141,15 @@
#define MSM8916_VDDMX 3
#define MSM8916_VDDMX_AO 4
+/* MSM8953 Power Domain Indexes */
+#define MSM8953_VDDMD 0
+#define MSM8953_VDDMD_AO 1
+#define MSM8953_VDDCX 2
+#define MSM8953_VDDCX_AO 3
+#define MSM8953_VDDCX_VFL 4
+#define MSM8953_VDDMX 5
+#define MSM8953_VDDMX_AO 6
+
/* MSM8976 Power Domain Indexes */
#define MSM8976_VDDCX 0
#define MSM8976_VDDCX_AO 1
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index 505c679b6a9b..85651e41ded8 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -262,6 +262,8 @@ struct ffa_dev_ops {
int (*memory_reclaim)(u64 g_handle, u32 flags);
int (*memory_share)(struct ffa_device *dev,
struct ffa_mem_ops_args *args);
+ int (*memory_lend)(struct ffa_device *dev,
+ struct ffa_mem_ops_args *args);
};
#endif /* _LINUX_ARM_FFA_H */
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index d128ad1570aa..3650e926e93f 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -42,6 +42,7 @@ struct tegra_cpu_car_ops {
#endif
};
+#ifdef CONFIG_ARCH_TEGRA
extern struct tegra_cpu_car_ops *tegra_cpu_car_ops;
static inline void tegra_wait_cpu_in_reset(u32 cpu)
@@ -83,8 +84,29 @@ static inline void tegra_disable_cpu_clock(u32 cpu)
tegra_cpu_car_ops->disable_clock(cpu);
}
+#else
+static inline void tegra_wait_cpu_in_reset(u32 cpu)
+{
+}
-#ifdef CONFIG_PM_SLEEP
+static inline void tegra_put_cpu_in_reset(u32 cpu)
+{
+}
+
+static inline void tegra_cpu_out_of_reset(u32 cpu)
+{
+}
+
+static inline void tegra_enable_cpu_clock(u32 cpu)
+{
+}
+
+static inline void tegra_disable_cpu_clock(u32 cpu)
+{
+}
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA) && defined(CONFIG_PM_SLEEP)
static inline bool tegra_cpu_rail_off_ready(void)
{
if (WARN_ON(!tegra_cpu_car_ops->rail_off_ready))
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index 9837fb011f2f..eb556f988d57 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -50,6 +50,9 @@ struct sysc_regbits {
s8 emufree_shift;
};
+#define SYSC_MODULE_QUIRK_OTG BIT(30)
+#define SYSC_QUIRK_RESET_ON_CTX_LOST BIT(29)
+#define SYSC_QUIRK_REINIT_ON_CTX_LOST BIT(28)
#define SYSC_QUIRK_REINIT_ON_RESUME BIT(27)
#define SYSC_QUIRK_GPMC_DEBUG BIT(26)
#define SYSC_MODULE_QUIRK_ENA_RESETDONE BIT(25)
diff --git a/include/linux/soc/mediatek/mtk-mmsys.h b/include/linux/soc/mediatek/mtk-mmsys.h
index 2228bf6133da..4bba275e235a 100644
--- a/include/linux/soc/mediatek/mtk-mmsys.h
+++ b/include/linux/soc/mediatek/mtk-mmsys.h
@@ -29,13 +29,16 @@ enum mtk_ddp_comp_id {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_OVL_2L0,
DDP_COMPONENT_OVL_2L1,
+ DDP_COMPONENT_OVL_2L2,
DDP_COMPONENT_OVL1,
+ DDP_COMPONENT_POSTMASK0,
DDP_COMPONENT_PWM0,
DDP_COMPONENT_PWM1,
DDP_COMPONENT_PWM2,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_RDMA1,
DDP_COMPONENT_RDMA2,
+ DDP_COMPONENT_RDMA4,
DDP_COMPONENT_UFOE,
DDP_COMPONENT_WDMA0,
DDP_COMPONENT_WDMA1,
diff --git a/include/linux/soc/qcom/qcom_aoss.h b/include/linux/soc/qcom/qcom_aoss.h
new file mode 100644
index 000000000000..3c2a82e606f8
--- /dev/null
+++ b/include/linux/soc/qcom/qcom_aoss.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __QCOM_AOSS_H__
+#define __QCOM_AOSS_H__
+
+#include <linux/err.h>
+#include <linux/device.h>
+
+struct qmp;
+
+#if IS_ENABLED(CONFIG_QCOM_AOSS_QMP)
+
+int qmp_send(struct qmp *qmp, const void *data, size_t len);
+struct qmp *qmp_get(struct device *dev);
+void qmp_put(struct qmp *qmp);
+
+#else
+
+static inline int qmp_send(struct qmp *qmp, const void *data, size_t len)
+{
+ return -ENODEV;
+}
+
+static inline struct qmp *qmp_get(struct device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void qmp_put(struct qmp *qmp)
+{
+}
+
+#endif
+
+#endif
diff --git a/include/linux/soc/samsung/exynos-chipid.h b/include/linux/soc/samsung/exynos-chipid.h
index 8bca6763f99c..62f0e2531068 100644
--- a/include/linux/soc/samsung/exynos-chipid.h
+++ b/include/linux/soc/samsung/exynos-chipid.h
@@ -9,10 +9,8 @@
#define __LINUX_SOC_EXYNOS_CHIPID_H
#define EXYNOS_CHIPID_REG_PRO_ID 0x00
-#define EXYNOS_SUBREV_MASK (0xf << 4)
-#define EXYNOS_MAINREV_MASK (0xf << 0)
-#define EXYNOS_REV_MASK (EXYNOS_SUBREV_MASK | \
- EXYNOS_MAINREV_MASK)
+#define EXYNOS_REV_PART_MASK 0xf
+#define EXYNOS_REV_PART_SHIFT 4
#define EXYNOS_MASK 0xfffff000
#define EXYNOS_CHIPID_REG_PKG_ID 0x04
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index 3ebfea0781f1..a1f03461369b 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -197,7 +197,11 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
* @num_pages: number of locked pages
* @dmabuf: dmabuf used to for exporting to user space
* @flags: defined by TEE_SHM_* in tee_drv.h
- * @id: unique id of a shared memory object on this device
+ * @id: unique id of a shared memory object on this device, shared
+ * with user space
+ * @sec_world_id:
+ * secure world assigned id of this shared memory object, not
+ * used by all drivers
*
* This pool is only supposed to be accessed directly from the TEE
* subsystem and from drivers that implements their own shm pool manager.
@@ -213,6 +217,7 @@ struct tee_shm {
struct dma_buf *dmabuf;
u32 flags;
int id;
+ u64 sec_world_id;
};
/**
diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h
index e3e770f76f34..77c694a19149 100644
--- a/include/memory/renesas-rpc-if.h
+++ b/include/memory/renesas-rpc-if.h
@@ -59,6 +59,7 @@ struct rpcif_op {
struct rpcif {
struct device *dev;
+ void __iomem *base;
void __iomem *dirmap;
struct regmap *regmap;
struct reset_control *rstc;
diff --git a/include/soc/qcom/spm.h b/include/soc/qcom/spm.h
new file mode 100644
index 000000000000..4951f9d8b0bd
--- /dev/null
+++ b/include/soc/qcom/spm.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2015, Linaro Ltd.
+ */
+
+#ifndef __SPM_H__
+#define __SPM_H__
+
+#include <linux/cpuidle.h>
+
+#define MAX_PMIC_DATA 2
+#define MAX_SEQ_DATA 64
+
+enum pm_sleep_mode {
+ PM_SLEEP_MODE_STBY,
+ PM_SLEEP_MODE_RET,
+ PM_SLEEP_MODE_SPC,
+ PM_SLEEP_MODE_PC,
+ PM_SLEEP_MODE_NR,
+};
+
+struct spm_reg_data {
+ const u16 *reg_offset;
+ u32 spm_cfg;
+ u32 spm_dly;
+ u32 pmic_dly;
+ u32 pmic_data[MAX_PMIC_DATA];
+ u32 avs_ctl;
+ u32 avs_limit;
+ u8 seq[MAX_SEQ_DATA];
+ u8 start_index[PM_SLEEP_MODE_NR];
+};
+
+struct spm_driver_data {
+ void __iomem *reg_base;
+ const struct spm_reg_data *reg_data;
+};
+
+void spm_set_low_power_mode(struct spm_driver_data *drv,
+ enum pm_sleep_mode mode);
+
+#endif /* __SPM_H__ */
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index 990701f788bc..67d2bc856fbc 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -6,6 +6,8 @@
#ifndef __SOC_TEGRA_FUSE_H__
#define __SOC_TEGRA_FUSE_H__
+#include <linux/types.h>
+
#define TEGRA20 0x20
#define TEGRA30 0x30
#define TEGRA114 0x35
@@ -22,11 +24,6 @@
#ifndef __ASSEMBLY__
-u32 tegra_read_chipid(void);
-u8 tegra_get_chip_id(void);
-u8 tegra_get_platform(void);
-bool tegra_is_silicon(void);
-
enum tegra_revision {
TEGRA_REVISION_UNKNOWN = 0,
TEGRA_REVISION_A01,
@@ -57,6 +54,10 @@ extern struct tegra_sku_info tegra_sku_info;
u32 tegra_read_straps(void);
u32 tegra_read_ram_code(void);
int tegra_fuse_readl(unsigned long offset, u32 *value);
+u32 tegra_read_chipid(void);
+u8 tegra_get_chip_id(void);
+u8 tegra_get_platform(void);
+bool tegra_is_silicon(void);
#else
static struct tegra_sku_info tegra_sku_info __maybe_unused;
@@ -74,6 +75,26 @@ static inline int tegra_fuse_readl(unsigned long offset, u32 *value)
{
return -ENODEV;
}
+
+static inline u32 tegra_read_chipid(void)
+{
+ return 0;
+}
+
+static inline u8 tegra_get_chip_id(void)
+{
+ return 0;
+}
+
+static inline u8 tegra_get_platform(void)
+{
+ return 0;
+}
+
+static inline bool tegra_is_silicon(void)
+{
+ return false;
+}
#endif
struct device *tegra_soc_device_register(void);
diff --git a/include/soc/tegra/irq.h b/include/soc/tegra/irq.h
index 8eb11a7109e4..94539551c8c1 100644
--- a/include/soc/tegra/irq.h
+++ b/include/soc/tegra/irq.h
@@ -6,8 +6,15 @@
#ifndef __SOC_TEGRA_IRQ_H
#define __SOC_TEGRA_IRQ_H
-#if defined(CONFIG_ARM)
+#include <linux/types.h>
+
+#if defined(CONFIG_ARM) && defined(CONFIG_ARCH_TEGRA)
bool tegra_pending_sgi(void);
+#else
+static inline bool tegra_pending_sgi(void)
+{
+ return false;
+}
#endif
#endif /* __SOC_TEGRA_IRQ_H */
diff --git a/include/soc/tegra/pm.h b/include/soc/tegra/pm.h
index 433878927026..ce4d0b1bd0d6 100644
--- a/include/soc/tegra/pm.h
+++ b/include/soc/tegra/pm.h
@@ -17,7 +17,7 @@ enum tegra_suspend_mode {
TEGRA_SUSPEND_NOT_READY,
};
-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM)
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM) && defined(CONFIG_ARCH_TEGRA)
enum tegra_suspend_mode
tegra_pm_validate_suspend_mode(enum tegra_suspend_mode mode);
diff --git a/sound/soc/cirrus/ep93xx-i2s.c b/sound/soc/cirrus/ep93xx-i2s.c
index 06c315a4d20a..2c8cd843d049 100644
--- a/sound/soc/cirrus/ep93xx-i2s.c
+++ b/sound/soc/cirrus/ep93xx-i2s.c
@@ -111,9 +111,9 @@ static void ep93xx_i2s_enable(struct ep93xx_i2s_info *info, int stream)
if ((ep93xx_i2s_read_reg(info, EP93XX_I2S_TX0EN) & 0x1) == 0 &&
(ep93xx_i2s_read_reg(info, EP93XX_I2S_RX0EN) & 0x1) == 0) {
/* Enable clocks */
- clk_enable(info->mclk);
- clk_enable(info->sclk);
- clk_enable(info->lrclk);
+ clk_prepare_enable(info->mclk);
+ clk_prepare_enable(info->sclk);
+ clk_prepare_enable(info->lrclk);
/* Enable i2s */
ep93xx_i2s_write_reg(info, EP93XX_I2S_GLCTRL, 1);
@@ -156,9 +156,9 @@ static void ep93xx_i2s_disable(struct ep93xx_i2s_info *info, int stream)
ep93xx_i2s_write_reg(info, EP93XX_I2S_GLCTRL, 0);
/* Disable clocks */
- clk_disable(info->lrclk);
- clk_disable(info->sclk);
- clk_disable(info->mclk);
+ clk_disable_unprepare(info->lrclk);
+ clk_disable_unprepare(info->sclk);
+ clk_disable_unprepare(info->mclk);
}
}