summaryrefslogtreecommitdiff
path: root/drivers/soc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/soc')
-rw-r--r--drivers/soc/Kconfig6
-rw-r--r--drivers/soc/Makefile8
-rw-r--r--drivers/soc/actions/Kconfig17
-rw-r--r--drivers/soc/amlogic/Kconfig35
-rw-r--r--drivers/soc/amlogic/meson-canvas.c12
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c462
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c18
-rw-r--r--drivers/soc/apple/Kconfig23
-rw-r--r--drivers/soc/apple/Makefile4
-rw-r--r--drivers/soc/apple/mailbox.c463
-rw-r--r--drivers/soc/apple/mailbox.h48
-rw-r--r--drivers/soc/apple/rtkit-internal.h9
-rw-r--r--drivers/soc/apple/rtkit.c248
-rw-r--r--drivers/soc/apple/sart.c73
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c18
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c229
-rw-r--r--drivers/soc/aspeed/aspeed-p2a-ctrl.c18
-rw-r--r--drivers/soc/aspeed/aspeed-socinfo.c4
-rw-r--r--drivers/soc/aspeed/aspeed-uart-routing.c4
-rw-r--r--drivers/soc/atmel/soc.c30
-rw-r--r--drivers/soc/atmel/soc.h12
-rw-r--r--drivers/soc/bcm/Kconfig53
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c12
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm.h2
-rw-r--r--drivers/soc/canaan/Kconfig4
-rw-r--r--drivers/soc/cirrus/Kconfig17
-rw-r--r--drivers/soc/cirrus/Makefile2
-rw-r--r--drivers/soc/cirrus/soc-ep93xx.c252
-rw-r--r--drivers/soc/dove/pmu.c12
-rw-r--r--drivers/soc/fsl/Kconfig4
-rw-r--r--drivers/soc/fsl/dpaa2-console.c4
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c4
-rw-r--r--drivers/soc/fsl/qbman/Kconfig2
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c27
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.c12
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.h4
-rw-r--r--drivers/soc/fsl/qbman/qman.c34
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c73
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c5
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c10
-rw-r--r--drivers/soc/fsl/qe/Kconfig17
-rw-r--r--drivers/soc/fsl/qe/gpio.c149
-rw-r--r--drivers/soc/fsl/qe/qe.c2
-rw-r--r--drivers/soc/fsl/qe/qe_common.c84
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c24
-rw-r--r--drivers/soc/fsl/qe/qmc.c1372
-rw-r--r--drivers/soc/fsl/qe/tsa.c710
-rw-r--r--drivers/soc/fsl/qe/tsa.h3
-rw-r--r--drivers/soc/fsl/qe/ucc.c1
-rw-r--r--drivers/soc/fsl/rcpm.c1
-rw-r--r--drivers/soc/fujitsu/a64fx-diag.c4
-rw-r--r--drivers/soc/hisilicon/Kconfig9
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c682
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.h50
-rw-r--r--drivers/soc/imx/Kconfig19
-rw-r--r--drivers/soc/imx/Makefile2
-rw-r--r--drivers/soc/imx/soc-imx8m.c284
-rw-r--r--drivers/soc/imx/soc-imx9.c128
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-npe.c5
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-qmgr.c4
-rw-r--r--drivers/soc/litex/Kconfig2
-rw-r--r--drivers/soc/litex/litex_soc_ctrl.c28
-rw-r--r--drivers/soc/loongson/Kconfig1
-rw-r--r--drivers/soc/loongson/loongson2_guts.c15
-rw-r--r--drivers/soc/loongson/loongson2_pm.c7
-rw-r--r--drivers/soc/mediatek/Kconfig44
-rw-r--r--drivers/soc/mediatek/Makefile2
-rw-r--r--drivers/soc/mediatek/mt8167-mmsys.h31
-rw-r--r--drivers/soc/mediatek/mt8173-mmsys.h99
-rw-r--r--drivers/soc/mediatek/mt8183-mmsys.h50
-rw-r--r--drivers/soc/mediatek/mt8186-mmsys.h88
-rw-r--r--drivers/soc/mediatek/mt8188-mmsys.h330
-rw-r--r--drivers/soc/mediatek/mt8192-mmsys.h71
-rw-r--r--drivers/soc/mediatek/mt8195-mmsys.h632
-rw-r--r--drivers/soc/mediatek/mt8365-mmsys.h84
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c386
-rw-r--r--drivers/soc/mediatek/mtk-devapc.c23
-rw-r--r--drivers/soc/mediatek/mtk-dvfsrc.c578
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c44
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.h46
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c258
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c120
-rw-r--r--drivers/soc/mediatek/mtk-regulator-coupler.c1
-rw-r--r--drivers/soc/mediatek/mtk-socinfo.c212
-rw-r--r--drivers/soc/mediatek/mtk-svs.c1672
-rw-r--r--drivers/soc/microchip/Kconfig15
-rw-r--r--drivers/soc/microchip/Makefile1
-rw-r--r--drivers/soc/microchip/mpfs-control-scb.c38
-rw-r--r--drivers/soc/microchip/mpfs-mss-top-sysreg.c44
-rw-r--r--drivers/soc/microchip/mpfs-sys-controller.c37
-rw-r--r--drivers/soc/pxa/ssp.c10
-rw-r--r--drivers/soc/qcom/Kconfig88
-rw-r--r--drivers/soc/qcom/Makefile8
-rw-r--r--drivers/soc/qcom/apr.c15
-rw-r--r--drivers/soc/qcom/cmd-db.c51
-rw-r--r--drivers/soc/qcom/icc-bwmon.c37
-rw-r--r--drivers/soc/qcom/ice.c454
-rw-r--r--drivers/soc/qcom/kryo-l2-accessors.c4
-rw-r--r--drivers/soc/qcom/llcc-qcom.c4524
-rw-r--r--drivers/soc/qcom/mdt_loader.c129
-rw-r--r--drivers/soc/qcom/ocmem.c29
-rw-r--r--drivers/soc/qcom/pdr_interface.c62
-rw-r--r--drivers/soc/qcom/pdr_internal.h317
-rw-r--r--drivers/soc/qcom/pmic_glink.c188
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c141
-rw-r--r--drivers/soc/qcom/pmic_pdcharger_ulog.c170
-rw-r--r--drivers/soc/qcom/pmic_pdcharger_ulog.h36
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c561
-rw-r--r--drivers/soc/qcom/qcom-pbs.c228
-rw-r--r--drivers/soc/qcom/qcom_aoss.c128
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c10
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c731
-rw-r--r--drivers/soc/qcom/qcom_pdr_msg.c352
-rw-r--r--drivers/soc/qcom/qcom_stats.c141
-rw-r--r--drivers/soc/qcom/qmi_encdec.c58
-rw-r--r--drivers/soc/qcom/qmi_interface.c28
-rw-r--r--drivers/soc/qcom/ramp_controller.c5
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c15
-rw-r--r--drivers/soc/qcom/rpm-proc.c2
-rw-r--r--drivers/soc/qcom/rpm_master_stats.c8
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c30
-rw-r--r--drivers/soc/qcom/rpmh.c9
-rw-r--r--drivers/soc/qcom/smd-rpm.c60
-rw-r--r--drivers/soc/qcom/smem.c131
-rw-r--r--drivers/soc/qcom/smem_state.c15
-rw-r--r--drivers/soc/qcom/smp2p.c59
-rw-r--r--drivers/soc/qcom/smsm.c61
-rw-r--r--drivers/soc/qcom/socinfo.c175
-rw-r--r--drivers/soc/qcom/spm.c249
-rw-r--r--drivers/soc/qcom/trace-aoss.h48
-rw-r--r--drivers/soc/qcom/trace-rpmh.h4
-rw-r--r--drivers/soc/qcom/trace-smp2p.h98
-rw-r--r--drivers/soc/qcom/trace_icc-bwmon.h48
-rw-r--r--drivers/soc/qcom/ubwc_config.c317
-rw-r--r--drivers/soc/qcom/wcnss_ctrl.c14
-rw-r--r--drivers/soc/renesas/Kconfig507
-rw-r--r--drivers/soc/renesas/Makefile5
-rw-r--r--drivers/soc/renesas/pwc-rzv2m.c6
-rw-r--r--drivers/soc/renesas/r9a08g045-sysc.c93
-rw-r--r--drivers/soc/renesas/r9a09g047-sys.c147
-rw-r--r--drivers/soc/renesas/r9a09g056-sys.c144
-rw-r--r--drivers/soc/renesas/r9a09g057-sys.c169
-rw-r--r--drivers/soc/renesas/rcar-rst.c4
-rw-r--r--drivers/soc/renesas/renesas-soc.c32
-rw-r--r--drivers/soc/renesas/rz-sysc.c169
-rw-r--r--drivers/soc/renesas/rz-sysc.h53
-rw-r--r--drivers/soc/rockchip/Kconfig12
-rw-r--r--drivers/soc/rockchip/grf.c74
-rw-r--r--drivers/soc/rockchip/io-domain.c52
-rw-r--r--drivers/soc/samsung/Kconfig5
-rw-r--r--drivers/soc/samsung/Makefile3
-rw-r--r--drivers/soc/samsung/exynos-asv.c11
-rw-r--r--drivers/soc/samsung/exynos-chipid.c35
-rw-r--r--drivers/soc/samsung/exynos-pmu.c423
-rw-r--r--drivers/soc/samsung/exynos-pmu.h39
-rw-r--r--drivers/soc/samsung/exynos-usi.c108
-rw-r--r--drivers/soc/samsung/exynos3250-pmu.c1
-rw-r--r--drivers/soc/samsung/exynos5250-pmu.c1
-rw-r--r--drivers/soc/samsung/exynos5420-pmu.c1
-rw-r--r--drivers/soc/samsung/gs101-pmu.c446
-rw-r--r--drivers/soc/sifive/Kconfig10
-rw-r--r--drivers/soc/sifive/Makefile3
-rw-r--r--drivers/soc/sifive/sifive_ccache.c272
-rw-r--r--drivers/soc/sophgo/Kconfig34
-rw-r--r--drivers/soc/sophgo/Makefile4
-rw-r--r--drivers/soc/sophgo/cv1800-rtcsys.c63
-rw-r--r--drivers/soc/sophgo/sg2044-topsys.c45
-rw-r--r--drivers/soc/starfive/Kconfig12
-rw-r--r--drivers/soc/sunxi/Kconfig9
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c40
-rw-r--r--drivers/soc/tegra/Kconfig28
-rw-r--r--drivers/soc/tegra/cbb/tegra-cbb.c20
-rw-r--r--drivers/soc/tegra/cbb/tegra194-cbb.c40
-rw-r--r--drivers/soc/tegra/cbb/tegra234-cbb.c760
-rw-r--r--drivers/soc/tegra/common.c12
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c116
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c162
-rw-r--r--drivers/soc/tegra/fuse/fuse.h8
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra210.c63
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c109
-rw-r--r--drivers/soc/tegra/pmc.c350
-rw-r--r--drivers/soc/ti/Kconfig12
-rw-r--r--drivers/soc/ti/k3-ringacc.c17
-rw-r--r--drivers/soc/ti/k3-socinfo.c103
-rw-r--r--drivers/soc/ti/knav_dma.c42
-rw-r--r--drivers/soc/ti/knav_qmss.h2
-rw-r--r--drivers/soc/ti/knav_qmss_acc.c2
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c127
-rw-r--r--drivers/soc/ti/pm33xx.c61
-rw-r--r--drivers/soc/ti/pruss.c182
-rw-r--r--drivers/soc/ti/smartreflex.c7
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c10
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c19
-rw-r--r--drivers/soc/versatile/Kconfig4
-rw-r--r--drivers/soc/versatile/soc-integrator.c1
-rw-r--r--drivers/soc/versatile/soc-realview.c20
-rw-r--r--drivers/soc/vt8500/Kconfig19
-rw-r--r--drivers/soc/vt8500/Makefile2
-rw-r--r--drivers/soc/vt8500/wmt-socinfo.c125
-rw-r--r--drivers/soc/xilinx/Kconfig9
-rw-r--r--drivers/soc/xilinx/xlnx_event_manager.c56
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c173
202 files changed, 20638 insertions, 5686 deletions
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index d21e75d69294..a2d65adffb80 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -1,13 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "SOC (System On Chip) specific Drivers"
-source "drivers/soc/actions/Kconfig"
source "drivers/soc/amlogic/Kconfig"
source "drivers/soc/apple/Kconfig"
source "drivers/soc/aspeed/Kconfig"
source "drivers/soc/atmel/Kconfig"
source "drivers/soc/bcm/Kconfig"
source "drivers/soc/canaan/Kconfig"
+source "drivers/soc/cirrus/Kconfig"
source "drivers/soc/fsl/Kconfig"
source "drivers/soc/fujitsu/Kconfig"
source "drivers/soc/hisilicon/Kconfig"
@@ -23,13 +23,13 @@ source "drivers/soc/qcom/Kconfig"
source "drivers/soc/renesas/Kconfig"
source "drivers/soc/rockchip/Kconfig"
source "drivers/soc/samsung/Kconfig"
-source "drivers/soc/sifive/Kconfig"
-source "drivers/soc/starfive/Kconfig"
+source "drivers/soc/sophgo/Kconfig"
source "drivers/soc/sunxi/Kconfig"
source "drivers/soc/tegra/Kconfig"
source "drivers/soc/ti/Kconfig"
source "drivers/soc/ux500/Kconfig"
source "drivers/soc/versatile/Kconfig"
+source "drivers/soc/vt8500/Kconfig"
source "drivers/soc/xilinx/Kconfig"
endmenu
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 0706a27d13be..c9e689080ceb 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -7,7 +7,8 @@ obj-y += apple/
obj-y += aspeed/
obj-$(CONFIG_ARCH_AT91) += atmel/
obj-y += bcm/
-obj-$(CONFIG_SOC_CANAAN) += canaan/
+obj-$(CONFIG_ARCH_CANAAN) += canaan/
+obj-$(CONFIG_EP93XX_SOC) += cirrus/
obj-$(CONFIG_ARCH_DOVE) += dove/
obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
@@ -28,10 +29,11 @@ obj-y += qcom/
obj-y += renesas/
obj-y += rockchip/
obj-$(CONFIG_SOC_SAMSUNG) += samsung/
-obj-y += sifive/
+obj-y += sophgo/
obj-y += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
obj-$(CONFIG_ARCH_U8500) += ux500/
-obj-$(CONFIG_PLAT_VERSATILE) += versatile/
+obj-y += versatile/
+obj-y += vt8500/
obj-y += xilinx/
diff --git a/drivers/soc/actions/Kconfig b/drivers/soc/actions/Kconfig
deleted file mode 100644
index 1aca2058a40c..000000000000
--- a/drivers/soc/actions/Kconfig
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-if ARCH_ACTIONS || COMPILE_TEST
-
-config OWL_PM_DOMAINS_HELPER
- bool
-
-config OWL_PM_DOMAINS
- bool "Actions Semi SPS power domains"
- depends on PM
- select OWL_PM_DOMAINS_HELPER
- select PM_GENERIC_DOMAINS
- help
- Say 'y' here to enable support for Smart Power System (SPS)
- power-gating on Actions Semiconductor S500, S700 and S900 SoCs.
- If unsure, say 'n'.
-
-endif
diff --git a/drivers/soc/amlogic/Kconfig b/drivers/soc/amlogic/Kconfig
index 174a9b011461..d08e398bdad4 100644
--- a/drivers/soc/amlogic/Kconfig
+++ b/drivers/soc/amlogic/Kconfig
@@ -26,41 +26,6 @@ config MESON_GX_SOCINFO
Say yes to support decoding of Amlogic Meson GX SoC family
information about the type, package and version.
-config MESON_GX_PM_DOMAINS
- tristate "Amlogic Meson GX Power Domains driver"
- depends on ARCH_MESON || COMPILE_TEST
- depends on PM && OF
- default ARCH_MESON
- select PM_GENERIC_DOMAINS
- select PM_GENERIC_DOMAINS_OF
- help
- Say yes to expose Amlogic Meson GX Power Domains as
- Generic Power Domains.
-
-config MESON_EE_PM_DOMAINS
- tristate "Amlogic Meson Everything-Else Power Domains driver"
- depends on ARCH_MESON || COMPILE_TEST
- depends on PM && OF
- default ARCH_MESON
- select PM_GENERIC_DOMAINS
- select PM_GENERIC_DOMAINS_OF
- help
- Say yes to expose Amlogic Meson Everything-Else Power Domains as
- Generic Power Domains.
-
-config MESON_SECURE_PM_DOMAINS
- tristate "Amlogic Meson Secure Power Domains driver"
- depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
- depends on PM && OF
- depends on HAVE_ARM_SMCCC
- default ARCH_MESON
- select PM_GENERIC_DOMAINS
- select PM_GENERIC_DOMAINS_OF
- help
- Support for the power controller on Amlogic A1/C1 series.
- Say yes to expose Amlogic Meson Secure Power Domains as Generic
- Power Domains.
-
config MESON_MX_SOCINFO
bool "Amlogic Meson MX SoC Information driver"
depends on (ARM && ARCH_MESON) || COMPILE_TEST
diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c
index b6e06c4d2117..79681afea8c6 100644
--- a/drivers/soc/amlogic/meson-canvas.c
+++ b/drivers/soc/amlogic/meson-canvas.c
@@ -60,12 +60,9 @@ struct meson_canvas *meson_canvas_get(struct device *dev)
return ERR_PTR(-ENODEV);
canvas_pdev = of_find_device_by_node(canvas_node);
- if (!canvas_pdev) {
- of_node_put(canvas_node);
- return ERR_PTR(-EPROBE_DEFER);
- }
-
of_node_put(canvas_node);
+ if (!canvas_pdev)
+ return ERR_PTR(-EPROBE_DEFER);
/*
* If priv is NULL, it's probably because the canvas hasn't
@@ -73,10 +70,9 @@ struct meson_canvas *meson_canvas_get(struct device *dev)
* current state, this driver probe cannot return -EPROBE_DEFER
*/
canvas = dev_get_drvdata(&canvas_pdev->dev);
- if (!canvas) {
- put_device(&canvas_pdev->dev);
+ put_device(&canvas_pdev->dev);
+ if (!canvas)
return ERR_PTR(-EINVAL);
- }
return canvas;
}
diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c
index 3f3039600357..d862e30a244e 100644
--- a/drivers/soc/amlogic/meson-clk-measure.c
+++ b/drivers/soc/amlogic/meson-clk-measure.c
@@ -14,11 +14,6 @@
static DEFINE_MUTEX(measure_lock);
-#define MSR_CLK_DUTY 0x0
-#define MSR_CLK_REG0 0x4
-#define MSR_CLK_REG1 0x8
-#define MSR_CLK_REG2 0xc
-
#define MSR_DURATION GENMASK(15, 0)
#define MSR_ENABLE BIT(16)
#define MSR_CONT BIT(17) /* continuous measurement */
@@ -33,23 +28,34 @@ static DEFINE_MUTEX(measure_lock);
#define DIV_STEP 32
#define DIV_MAX 640
-#define CLK_MSR_MAX 128
-
struct meson_msr_id {
struct meson_msr *priv;
unsigned int id;
const char *name;
};
+struct msr_reg_offset {
+ unsigned int duty_val;
+ unsigned int freq_ctrl;
+ unsigned int duty_ctrl;
+ unsigned int freq_val;
+};
+
+struct meson_msr_data {
+ struct meson_msr_id *msr_table;
+ unsigned int msr_count;
+ const struct msr_reg_offset *reg;
+};
+
struct meson_msr {
struct regmap *regmap;
- struct meson_msr_id msr_table[CLK_MSR_MAX];
+ struct meson_msr_data data;
};
#define CLK_MSR_ID(__id, __name) \
[__id] = {.id = __id, .name = __name,}
-static struct meson_msr_id clk_msr_m8[CLK_MSR_MAX] = {
+static const struct meson_msr_id clk_msr_m8[] = {
CLK_MSR_ID(0, "ring_osc_out_ee0"),
CLK_MSR_ID(1, "ring_osc_out_ee1"),
CLK_MSR_ID(2, "ring_osc_out_ee2"),
@@ -98,7 +104,7 @@ static struct meson_msr_id clk_msr_m8[CLK_MSR_MAX] = {
CLK_MSR_ID(63, "mipi_csi_cfg"),
};
-static struct meson_msr_id clk_msr_gx[CLK_MSR_MAX] = {
+static const struct meson_msr_id clk_msr_gx[] = {
CLK_MSR_ID(0, "ring_osc_out_ee_0"),
CLK_MSR_ID(1, "ring_osc_out_ee_1"),
CLK_MSR_ID(2, "ring_osc_out_ee_2"),
@@ -168,7 +174,7 @@ static struct meson_msr_id clk_msr_gx[CLK_MSR_MAX] = {
CLK_MSR_ID(82, "ge2d"),
};
-static struct meson_msr_id clk_msr_axg[CLK_MSR_MAX] = {
+static const struct meson_msr_id clk_msr_axg[] = {
CLK_MSR_ID(0, "ring_osc_out_ee_0"),
CLK_MSR_ID(1, "ring_osc_out_ee_1"),
CLK_MSR_ID(2, "ring_osc_out_ee_2"),
@@ -242,7 +248,7 @@ static struct meson_msr_id clk_msr_axg[CLK_MSR_MAX] = {
CLK_MSR_ID(109, "audio_locker_in"),
};
-static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = {
+static const struct meson_msr_id clk_msr_g12a[] = {
CLK_MSR_ID(0, "ring_osc_out_ee_0"),
CLK_MSR_ID(1, "ring_osc_out_ee_1"),
CLK_MSR_ID(2, "ring_osc_out_ee_2"),
@@ -358,7 +364,7 @@ static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = {
CLK_MSR_ID(122, "audio_pdm_dclk"),
};
-static struct meson_msr_id clk_msr_sm1[CLK_MSR_MAX] = {
+static const struct meson_msr_id clk_msr_sm1[] = {
CLK_MSR_ID(0, "ring_osc_out_ee_0"),
CLK_MSR_ID(1, "ring_osc_out_ee_1"),
CLK_MSR_ID(2, "ring_osc_out_ee_2"),
@@ -488,10 +494,304 @@ static struct meson_msr_id clk_msr_sm1[CLK_MSR_MAX] = {
CLK_MSR_ID(127, "csi2_data"),
};
+static const struct meson_msr_id clk_msr_c3[] = {
+ CLK_MSR_ID(0, "sys_clk"),
+ CLK_MSR_ID(1, "axi_clk"),
+ CLK_MSR_ID(2, "rtc_clk"),
+ CLK_MSR_ID(3, "p20_usb2_ckout"),
+ CLK_MSR_ID(4, "eth_mpll_test"),
+ CLK_MSR_ID(5, "sys_pll"),
+ CLK_MSR_ID(6, "cpu_clk_div16"),
+ CLK_MSR_ID(7, "ts_pll"),
+ CLK_MSR_ID(8, "fclk_div2"),
+ CLK_MSR_ID(9, "fclk_div2p5"),
+ CLK_MSR_ID(10, "fclk_div3"),
+ CLK_MSR_ID(11, "fclk_div4"),
+ CLK_MSR_ID(12, "fclk_div5"),
+ CLK_MSR_ID(13, "fclk_div7"),
+ CLK_MSR_ID(15, "fclk_50m"),
+ CLK_MSR_ID(16, "sys_oscin32k_i"),
+ CLK_MSR_ID(17, "mclk_pll"),
+ CLK_MSR_ID(19, "hifi_pll"),
+ CLK_MSR_ID(20, "gp0_pll"),
+ CLK_MSR_ID(21, "gp1_pll"),
+ CLK_MSR_ID(22, "eth_mppll_50m_ckout"),
+ CLK_MSR_ID(23, "sys_pll_div16"),
+ CLK_MSR_ID(24, "ddr_dpll_pt_clk"),
+ CLK_MSR_ID(26, "nna_core"),
+ CLK_MSR_ID(27, "rtc_sec_pulse_out"),
+ CLK_MSR_ID(28, "rtc_osc_clk_out"),
+ CLK_MSR_ID(29, "debug_in_clk"),
+ CLK_MSR_ID(30, "mod_eth_phy_ref_clk"),
+ CLK_MSR_ID(31, "mod_eth_tx_clk"),
+ CLK_MSR_ID(32, "eth_125m"),
+ CLK_MSR_ID(33, "eth_rmii"),
+ CLK_MSR_ID(34, "co_clkin_to_mac"),
+ CLK_MSR_ID(36, "co_rx_clk"),
+ CLK_MSR_ID(37, "co_tx_clk"),
+ CLK_MSR_ID(38, "eth_phy_rxclk"),
+ CLK_MSR_ID(39, "eth_phy_plltxclk"),
+ CLK_MSR_ID(40, "ephy_test_clk"),
+ CLK_MSR_ID(66, "vapb"),
+ CLK_MSR_ID(67, "ge2d"),
+ CLK_MSR_ID(68, "dewarpa"),
+ CLK_MSR_ID(70, "mipi_dsi_meas"),
+ CLK_MSR_ID(71, "dsi_phy"),
+ CLK_MSR_ID(79, "rama"),
+ CLK_MSR_ID(94, "vc9000e_core"),
+ CLK_MSR_ID(95, "vc9000e_sys"),
+ CLK_MSR_ID(96, "vc9000e_aclk"),
+ CLK_MSR_ID(97, "hcodec"),
+ CLK_MSR_ID(106, "deskew_pll_clk_div32_out"),
+ CLK_MSR_ID(107, "mipi_csi_phy_clk_out[0]"),
+ CLK_MSR_ID(108, "mipi_csi_phy_clk_out[1]"),
+ CLK_MSR_ID(110, "spifc"),
+ CLK_MSR_ID(111, "saradc"),
+ CLK_MSR_ID(112, "ts"),
+ CLK_MSR_ID(113, "sd_emmc_c"),
+ CLK_MSR_ID(114, "sd_emmc_b"),
+ CLK_MSR_ID(115, "sd_emmc_a"),
+ CLK_MSR_ID(116, "gpio_msr_clk"),
+ CLK_MSR_ID(117, "spicc_b"),
+ CLK_MSR_ID(118, "spicc_a"),
+ CLK_MSR_ID(122, "mod_audio_pdm_dclk_o"),
+ CLK_MSR_ID(124, "o_earcrx_dmac_clk"),
+ CLK_MSR_ID(125, "o_earcrx_cmdc_clk"),
+ CLK_MSR_ID(126, "o_earctx_dmac_clk"),
+ CLK_MSR_ID(127, "o_earctx_cmdc_clk"),
+ CLK_MSR_ID(128, "o_tohdmitx_bclk"),
+ CLK_MSR_ID(129, "o_tohdmitx_mclk"),
+ CLK_MSR_ID(130, "o_tohdmitx_spdif_clk"),
+ CLK_MSR_ID(131, "o_toacodec_bclk"),
+ CLK_MSR_ID(132, "o_toacodec_mclk"),
+ CLK_MSR_ID(133, "o_spdifout_b_mst_clk"),
+ CLK_MSR_ID(134, "o_spdifout_mst_clk"),
+ CLK_MSR_ID(135, "o_spdifin_mst_clk"),
+ CLK_MSR_ID(136, "o_audio_mclk"),
+ CLK_MSR_ID(137, "o_vad_clk"),
+ CLK_MSR_ID(138, "o_tdmout_d_sclk"),
+ CLK_MSR_ID(139, "o_tdmout_c_sclk"),
+ CLK_MSR_ID(140, "o_tdmout_b_sclk"),
+ CLK_MSR_ID(141, "o_tdmout_a_sclk"),
+ CLK_MSR_ID(142, "o_tdminb_1b_sclk"),
+ CLK_MSR_ID(143, "o_tdmin_1b_sclk"),
+ CLK_MSR_ID(144, "o_tdmin_d_sclk"),
+ CLK_MSR_ID(145, "o_tdmin_c_sclk"),
+ CLK_MSR_ID(146, "o_tdmin_b_sclk"),
+ CLK_MSR_ID(147, "o_tdmin_a_sclk"),
+ CLK_MSR_ID(148, "o_resampleb_clk"),
+ CLK_MSR_ID(149, "o_resamplea_clk"),
+ CLK_MSR_ID(150, "o_pdmb_sysclk"),
+ CLK_MSR_ID(151, "o_pdmb_dclk"),
+ CLK_MSR_ID(152, "o_pdm_sysclk"),
+ CLK_MSR_ID(153, "o_pdm_dclk"),
+ CLK_MSR_ID(154, "c_alockerb_out_clk"),
+ CLK_MSR_ID(155, "c_alockerb_in_clk"),
+ CLK_MSR_ID(156, "c_alocker_out_clk"),
+ CLK_MSR_ID(157, "c_alocker_in_clk"),
+ CLK_MSR_ID(158, "audio_mst_clk[34]"),
+ CLK_MSR_ID(159, "audio_mst_clk[35]"),
+ CLK_MSR_ID(160, "pwm_n"),
+ CLK_MSR_ID(161, "pwm_m"),
+ CLK_MSR_ID(162, "pwm_l"),
+ CLK_MSR_ID(163, "pwm_k"),
+ CLK_MSR_ID(164, "pwm_j"),
+ CLK_MSR_ID(165, "pwm_i"),
+ CLK_MSR_ID(166, "pwm_h"),
+ CLK_MSR_ID(167, "pwm_g"),
+ CLK_MSR_ID(168, "pwm_f"),
+ CLK_MSR_ID(169, "pwm_e"),
+ CLK_MSR_ID(170, "pwm_d"),
+ CLK_MSR_ID(171, "pwm_c"),
+ CLK_MSR_ID(172, "pwm_b"),
+ CLK_MSR_ID(173, "pwm_a"),
+ CLK_MSR_ID(174, "AU_DAC1_CLK_TO_GPIO"),
+ CLK_MSR_ID(175, "AU_ADC_CLK_TO_GPIO"),
+ CLK_MSR_ID(176, "rng_ring_osc_clk[0]"),
+ CLK_MSR_ID(177, "rng_ring_osc_clk[1]"),
+ CLK_MSR_ID(178, "rng_ring_osc_clk[2]"),
+ CLK_MSR_ID(179, "rng_ring_osc_clk[3]"),
+ CLK_MSR_ID(180, "sys_cpu_ring_osc_clk[0]"),
+ CLK_MSR_ID(181, "sys_cpu_ring_osc_clk[1]"),
+ CLK_MSR_ID(182, "sys_cpu_ring_osc_clk[2]"),
+ CLK_MSR_ID(183, "sys_cpu_ring_osc_clk[3]"),
+ CLK_MSR_ID(184, "sys_cpu_ring_osc_clk[4]"),
+ CLK_MSR_ID(185, "sys_cpu_ring_osc_clk[5]"),
+ CLK_MSR_ID(186, "sys_cpu_ring_osc_clk[6]"),
+ CLK_MSR_ID(187, "sys_cpu_ring_osc_clk[7]"),
+ CLK_MSR_ID(188, "sys_cpu_ring_osc_clk[8]"),
+ CLK_MSR_ID(189, "sys_cpu_ring_osc_clk[9]"),
+ CLK_MSR_ID(190, "sys_cpu_ring_osc_clk[10]"),
+ CLK_MSR_ID(191, "sys_cpu_ring_osc_clk[11]"),
+ CLK_MSR_ID(192, "am_ring_osc_clk_out[12](dmc)"),
+ CLK_MSR_ID(193, "am_ring_osc_clk_out[13](rama)"),
+ CLK_MSR_ID(194, "am_ring_osc_clk_out[14](nna)"),
+ CLK_MSR_ID(195, "am_ring_osc_clk_out[15](nna)"),
+ CLK_MSR_ID(200, "rng_ring_osc_clk_1[0]"),
+ CLK_MSR_ID(201, "rng_ring_osc_clk_1[1]"),
+ CLK_MSR_ID(202, "rng_ring_osc_clk_1[2]"),
+ CLK_MSR_ID(203, "rng_ring_osc_clk_1[3]"),
+
+};
+
+static const struct meson_msr_id clk_msr_s4[] = {
+ CLK_MSR_ID(0, "sys_clk"),
+ CLK_MSR_ID(1, "axi_clk"),
+ CLK_MSR_ID(2, "rtc_clk"),
+ CLK_MSR_ID(5, "mali"),
+ CLK_MSR_ID(6, "cpu_clk_div16"),
+ CLK_MSR_ID(7, "ceca_clk"),
+ CLK_MSR_ID(8, "cecb_clk"),
+ CLK_MSR_ID(10, "fclk_div5"),
+ CLK_MSR_ID(11, "mpll0"),
+ CLK_MSR_ID(12, "mpll1"),
+ CLK_MSR_ID(13, "mpll2"),
+ CLK_MSR_ID(14, "mpll3"),
+ CLK_MSR_ID(15, "fclk_50m"),
+ CLK_MSR_ID(16, "pcie_clk_inp"),
+ CLK_MSR_ID(17, "pcie_clk_inn"),
+ CLK_MSR_ID(18, "mpll_clk_test_out"),
+ CLK_MSR_ID(19, "hifi_pll"),
+ CLK_MSR_ID(20, "gp0_pll"),
+ CLK_MSR_ID(21, "gp1_pll"),
+ CLK_MSR_ID(22, "eth_mppll_50m_ckout"),
+ CLK_MSR_ID(23, "sys_pll_div16"),
+ CLK_MSR_ID(24, "ddr_dpll_pt_clk"),
+ CLK_MSR_ID(30, "mod_eth_phy_ref_clk"),
+ CLK_MSR_ID(31, "mod_eth_tx_clk"),
+ CLK_MSR_ID(32, "eth_125m"),
+ CLK_MSR_ID(33, "eth_rmii"),
+ CLK_MSR_ID(34, "co_clkin_to_mac"),
+ CLK_MSR_ID(35, "mod_eth_rx_clk_rmii"),
+ CLK_MSR_ID(36, "co_rx_clk"),
+ CLK_MSR_ID(37, "co_tx_clk"),
+ CLK_MSR_ID(38, "eth_phy_rxclk"),
+ CLK_MSR_ID(39, "eth_phy_plltxclk"),
+ CLK_MSR_ID(40, "ephy_test_clk"),
+ CLK_MSR_ID(50, "vid_pll_div_clk_out"),
+ CLK_MSR_ID(51, "enci"),
+ CLK_MSR_ID(52, "encp"),
+ CLK_MSR_ID(53, "encl"),
+ CLK_MSR_ID(54, "vdac"),
+ CLK_MSR_ID(55, "cdac_clk_c"),
+ CLK_MSR_ID(56, "mod_tcon_clko"),
+ CLK_MSR_ID(57, "lcd_an_clk_ph2"),
+ CLK_MSR_ID(58, "lcd_an_clk_ph3"),
+ CLK_MSR_ID(59, "hdmitx_pixel"),
+ CLK_MSR_ID(60, "vdin_meas"),
+ CLK_MSR_ID(61, "vpu"),
+ CLK_MSR_ID(62, "vpu_clkb"),
+ CLK_MSR_ID(63, "vpu_clkb_tmp"),
+ CLK_MSR_ID(64, "vpu_clkc"),
+ CLK_MSR_ID(65, "vid_lock"),
+ CLK_MSR_ID(66, "vapb"),
+ CLK_MSR_ID(67, "ge2d"),
+ CLK_MSR_ID(68, "cts_hdcp22_esmclk"),
+ CLK_MSR_ID(69, "cts_hdcp22_skpclk"),
+ CLK_MSR_ID(76, "hdmitx_tmds"),
+ CLK_MSR_ID(77, "hdmitx_sys_clk"),
+ CLK_MSR_ID(78, "hdmitx_fe_clk"),
+ CLK_MSR_ID(79, "rama"),
+ CLK_MSR_ID(93, "vdec"),
+ CLK_MSR_ID(99, "hevcf"),
+ CLK_MSR_ID(100, "demod_core"),
+ CLK_MSR_ID(101, "adc_extclk_in"),
+ CLK_MSR_ID(102, "cts_demod_core_t2_clk"),
+ CLK_MSR_ID(103, "adc_dpll_intclk"),
+ CLK_MSR_ID(104, "adc_dpll_clk_b3"),
+ CLK_MSR_ID(105, "s2_adc_clk"),
+ CLK_MSR_ID(106, "deskew_pll_clk_div32_out"),
+ CLK_MSR_ID(110, "sc"),
+ CLK_MSR_ID(111, "sar_adc"),
+ CLK_MSR_ID(113, "sd_emmc_c"),
+ CLK_MSR_ID(114, "sd_emmc_b"),
+ CLK_MSR_ID(115, "sd_emmc_a"),
+ CLK_MSR_ID(116, "gpio_msr_clk"),
+ CLK_MSR_ID(118, "spicc0"),
+ CLK_MSR_ID(121, "ts"),
+ CLK_MSR_ID(130, "audio_vad_clk"),
+ CLK_MSR_ID(131, "acodec_dac_clk_x128"),
+ CLK_MSR_ID(132, "audio_locker_in_clk"),
+ CLK_MSR_ID(133, "audio_locker_out_clk"),
+ CLK_MSR_ID(134, "audio_tdmout_c_sclk"),
+ CLK_MSR_ID(135, "audio_tdmout_b_sclk"),
+ CLK_MSR_ID(136, "audio_tdmout_a_sclk"),
+ CLK_MSR_ID(137, "audio_tdmin_lb_sclk"),
+ CLK_MSR_ID(138, "audio_tdmin_c_sclk"),
+ CLK_MSR_ID(139, "audio_tdmin_b_sclk"),
+ CLK_MSR_ID(140, "audio_tdmin_a_sclk"),
+ CLK_MSR_ID(141, "audio_resamplea_clk"),
+ CLK_MSR_ID(142, "audio_pdm_sysclk"),
+ CLK_MSR_ID(143, "audio_spdifout_b_mst_clk"),
+ CLK_MSR_ID(144, "audio_spdifout_mst_clk"),
+ CLK_MSR_ID(145, "audio_spdifin_mst_clk"),
+ CLK_MSR_ID(146, "audio_pdm_dclk"),
+ CLK_MSR_ID(147, "audio_resampleb_clk"),
+ CLK_MSR_ID(160, "pwm_j"),
+ CLK_MSR_ID(161, "pwm_i"),
+ CLK_MSR_ID(162, "pwm_h"),
+ CLK_MSR_ID(163, "pwm_g"),
+ CLK_MSR_ID(164, "pwm_f"),
+ CLK_MSR_ID(165, "pwm_e"),
+ CLK_MSR_ID(166, "pwm_d"),
+ CLK_MSR_ID(167, "pwm_c"),
+ CLK_MSR_ID(168, "pwm_b"),
+ CLK_MSR_ID(169, "pwm_a"),
+ CLK_MSR_ID(176, "rng_ring_0"),
+ CLK_MSR_ID(177, "rng_ring_1"),
+ CLK_MSR_ID(178, "rng_ring_2"),
+ CLK_MSR_ID(179, "rng_ring_3"),
+ CLK_MSR_ID(180, "dmc_osc_ring(LVT16)"),
+ CLK_MSR_ID(181, "gpu_osc_ring0(LVT16)"),
+ CLK_MSR_ID(182, "gpu_osc_ring1(ULVT16)"),
+ CLK_MSR_ID(183, "gpu_osc_ring2(SLVT16)"),
+ CLK_MSR_ID(184, "vpu_osc_ring0(SVT24)"),
+ CLK_MSR_ID(185, "vpu_osc_ring1(LVT20)"),
+ CLK_MSR_ID(186, "vpu_osc_ring2(LVT16)"),
+ CLK_MSR_ID(187, "dos_osc_ring0(SVT24)"),
+ CLK_MSR_ID(188, "dos_osc_ring1(SVT16)"),
+ CLK_MSR_ID(189, "dos_osc_ring2(LVT16)"),
+ CLK_MSR_ID(190, "dos_osc_ring3(ULVT20)"),
+ CLK_MSR_ID(192, "axi_sram_osc_ring(SVT16)"),
+ CLK_MSR_ID(193, "demod_osc_ring0"),
+ CLK_MSR_ID(194, "demod_osc_ring1"),
+ CLK_MSR_ID(195, "sar_osc_ring"),
+ CLK_MSR_ID(196, "sys_cpu_osc_ring0"),
+ CLK_MSR_ID(197, "sys_cpu_osc_ring1"),
+ CLK_MSR_ID(198, "sys_cpu_osc_ring2"),
+ CLK_MSR_ID(199, "sys_cpu_osc_ring3"),
+ CLK_MSR_ID(200, "sys_cpu_osc_ring4"),
+ CLK_MSR_ID(201, "sys_cpu_osc_ring5"),
+ CLK_MSR_ID(202, "sys_cpu_osc_ring6"),
+ CLK_MSR_ID(203, "sys_cpu_osc_ring7"),
+ CLK_MSR_ID(204, "sys_cpu_osc_ring8"),
+ CLK_MSR_ID(205, "sys_cpu_osc_ring9"),
+ CLK_MSR_ID(206, "sys_cpu_osc_ring10"),
+ CLK_MSR_ID(207, "sys_cpu_osc_ring11"),
+ CLK_MSR_ID(208, "sys_cpu_osc_ring12"),
+ CLK_MSR_ID(209, "sys_cpu_osc_ring13"),
+ CLK_MSR_ID(210, "sys_cpu_osc_ring14"),
+ CLK_MSR_ID(211, "sys_cpu_osc_ring15"),
+ CLK_MSR_ID(212, "sys_cpu_osc_ring16"),
+ CLK_MSR_ID(213, "sys_cpu_osc_ring17"),
+ CLK_MSR_ID(214, "sys_cpu_osc_ring18"),
+ CLK_MSR_ID(215, "sys_cpu_osc_ring19"),
+ CLK_MSR_ID(216, "sys_cpu_osc_ring20"),
+ CLK_MSR_ID(217, "sys_cpu_osc_ring21"),
+ CLK_MSR_ID(218, "sys_cpu_osc_ring22"),
+ CLK_MSR_ID(219, "sys_cpu_osc_ring23"),
+ CLK_MSR_ID(220, "sys_cpu_osc_ring24"),
+ CLK_MSR_ID(221, "sys_cpu_osc_ring25"),
+ CLK_MSR_ID(222, "sys_cpu_osc_ring26"),
+ CLK_MSR_ID(223, "sys_cpu_osc_ring27"),
+
+};
+
static int meson_measure_id(struct meson_msr_id *clk_msr_id,
- unsigned int duration)
+ unsigned int duration)
{
struct meson_msr *priv = clk_msr_id->priv;
+ const struct msr_reg_offset *reg = priv->data.reg;
unsigned int val;
int ret;
@@ -499,22 +799,22 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id,
if (ret)
return ret;
- regmap_write(priv->regmap, MSR_CLK_REG0, 0);
+ regmap_write(priv->regmap, reg->freq_ctrl, 0);
/* Set measurement duration */
- regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_DURATION,
+ regmap_update_bits(priv->regmap, reg->freq_ctrl, MSR_DURATION,
FIELD_PREP(MSR_DURATION, duration - 1));
/* Set ID */
- regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_CLK_SRC,
+ regmap_update_bits(priv->regmap, reg->freq_ctrl, MSR_CLK_SRC,
FIELD_PREP(MSR_CLK_SRC, clk_msr_id->id));
/* Enable & Start */
- regmap_update_bits(priv->regmap, MSR_CLK_REG0,
+ regmap_update_bits(priv->regmap, reg->freq_ctrl,
MSR_RUN | MSR_ENABLE,
MSR_RUN | MSR_ENABLE);
- ret = regmap_read_poll_timeout(priv->regmap, MSR_CLK_REG0,
+ ret = regmap_read_poll_timeout(priv->regmap, reg->freq_ctrl,
val, !(val & MSR_BUSY), 10, 10000);
if (ret) {
mutex_unlock(&measure_lock);
@@ -522,10 +822,10 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id,
}
/* Disable */
- regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_ENABLE, 0);
+ regmap_update_bits(priv->regmap, reg->freq_ctrl, MSR_ENABLE, 0);
/* Get the value in multiple of gate time counts */
- regmap_read(priv->regmap, MSR_CLK_REG2, &val);
+ regmap_read(priv->regmap, reg->freq_val, &val);
mutex_unlock(&measure_lock);
@@ -573,13 +873,14 @@ DEFINE_SHOW_ATTRIBUTE(clk_msr);
static int clk_msr_summary_show(struct seq_file *s, void *data)
{
struct meson_msr_id *msr_table = s->private;
+ unsigned int msr_count = msr_table->priv->data.msr_count;
unsigned int precision = 0;
int val, i;
seq_puts(s, " clock rate precision\n");
seq_puts(s, "---------------------------------------------\n");
- for (i = 0 ; i < CLK_MSR_MAX ; ++i) {
+ for (i = 0 ; i < msr_count ; ++i) {
if (!msr_table[i].name)
continue;
@@ -595,18 +896,18 @@ static int clk_msr_summary_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(clk_msr_summary);
-static const struct regmap_config meson_clk_msr_regmap_config = {
+static struct regmap_config meson_clk_msr_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .max_register = MSR_CLK_REG2,
};
static int meson_msr_probe(struct platform_device *pdev)
{
- const struct meson_msr_id *match_data;
+ const struct meson_msr_data *match_data;
struct meson_msr *priv;
struct dentry *root, *clks;
+ struct resource *res;
void __iomem *base;
int i;
@@ -621,60 +922,142 @@ static int meson_msr_probe(struct platform_device *pdev)
return -ENODEV;
}
- memcpy(priv->msr_table, match_data, sizeof(priv->msr_table));
+ priv->data.msr_table = devm_kcalloc(&pdev->dev,
+ match_data->msr_count,
+ sizeof(struct meson_msr_id),
+ GFP_KERNEL);
+ if (!priv->data.msr_table)
+ return -ENOMEM;
- base = devm_platform_ioremap_resource(pdev, 0);
+ memcpy(priv->data.msr_table, match_data->msr_table,
+ match_data->msr_count * sizeof(struct meson_msr_id));
+ priv->data.msr_count = match_data->msr_count;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
+ meson_clk_msr_regmap_config.max_register = resource_size(res) - 4;
priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&meson_clk_msr_regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
+ priv->data.reg = devm_kzalloc(&pdev->dev, sizeof(struct msr_reg_offset),
+ GFP_KERNEL);
+ if (!priv->data.reg)
+ return -ENOMEM;
+
+ memcpy((void *)priv->data.reg, match_data->reg,
+ sizeof(struct msr_reg_offset));
+
root = debugfs_create_dir("meson-clk-msr", NULL);
clks = debugfs_create_dir("clks", root);
debugfs_create_file("measure_summary", 0444, root,
- priv->msr_table, &clk_msr_summary_fops);
+ priv->data.msr_table, &clk_msr_summary_fops);
- for (i = 0 ; i < CLK_MSR_MAX ; ++i) {
- if (!priv->msr_table[i].name)
+ for (i = 0 ; i < priv->data.msr_count ; ++i) {
+ if (!priv->data.msr_table[i].name)
continue;
- priv->msr_table[i].priv = priv;
+ priv->data.msr_table[i].priv = priv;
- debugfs_create_file(priv->msr_table[i].name, 0444, clks,
- &priv->msr_table[i], &clk_msr_fops);
+ debugfs_create_file(priv->data.msr_table[i].name, 0444, clks,
+ &priv->data.msr_table[i], &clk_msr_fops);
}
return 0;
}
+static const struct msr_reg_offset msr_reg_offset = {
+ .duty_val = 0x0,
+ .freq_ctrl = 0x4,
+ .duty_ctrl = 0x8,
+ .freq_val = 0xc,
+};
+
+static const struct meson_msr_data clk_msr_gx_data = {
+ .msr_table = (void *)clk_msr_gx,
+ .msr_count = ARRAY_SIZE(clk_msr_gx),
+ .reg = &msr_reg_offset,
+};
+
+static const struct meson_msr_data clk_msr_m8_data = {
+ .msr_table = (void *)clk_msr_m8,
+ .msr_count = ARRAY_SIZE(clk_msr_m8),
+ .reg = &msr_reg_offset,
+};
+
+static const struct meson_msr_data clk_msr_axg_data = {
+ .msr_table = (void *)clk_msr_axg,
+ .msr_count = ARRAY_SIZE(clk_msr_axg),
+ .reg = &msr_reg_offset,
+};
+
+static const struct meson_msr_data clk_msr_g12a_data = {
+ .msr_table = (void *)clk_msr_g12a,
+ .msr_count = ARRAY_SIZE(clk_msr_g12a),
+ .reg = &msr_reg_offset,
+};
+
+static const struct meson_msr_data clk_msr_sm1_data = {
+ .msr_table = (void *)clk_msr_sm1,
+ .msr_count = ARRAY_SIZE(clk_msr_sm1),
+ .reg = &msr_reg_offset,
+};
+
+static const struct msr_reg_offset msr_reg_offset_v2 = {
+ .freq_ctrl = 0x0,
+ .duty_ctrl = 0x4,
+ .freq_val = 0x8,
+ .duty_val = 0x18,
+};
+
+static const struct meson_msr_data clk_msr_c3_data = {
+ .msr_table = (void *)clk_msr_c3,
+ .msr_count = ARRAY_SIZE(clk_msr_c3),
+ .reg = &msr_reg_offset_v2,
+};
+
+static const struct meson_msr_data clk_msr_s4_data = {
+ .msr_table = (void *)clk_msr_s4,
+ .msr_count = ARRAY_SIZE(clk_msr_s4),
+ .reg = &msr_reg_offset_v2,
+};
+
static const struct of_device_id meson_msr_match_table[] = {
{
.compatible = "amlogic,meson-gx-clk-measure",
- .data = (void *)clk_msr_gx,
+ .data = &clk_msr_gx_data,
},
{
.compatible = "amlogic,meson8-clk-measure",
- .data = (void *)clk_msr_m8,
+ .data = &clk_msr_m8_data,
},
{
.compatible = "amlogic,meson8b-clk-measure",
- .data = (void *)clk_msr_m8,
+ .data = &clk_msr_m8_data,
},
{
.compatible = "amlogic,meson-axg-clk-measure",
- .data = (void *)clk_msr_axg,
+ .data = &clk_msr_axg_data,
},
{
.compatible = "amlogic,meson-g12a-clk-measure",
- .data = (void *)clk_msr_g12a,
+ .data = &clk_msr_g12a_data,
},
{
.compatible = "amlogic,meson-sm1-clk-measure",
- .data = (void *)clk_msr_sm1,
+ .data = &clk_msr_sm1_data,
+ },
+ {
+ .compatible = "amlogic,c3-clk-measure",
+ .data = &clk_msr_c3_data,
+ },
+ {
+ .compatible = "amlogic,s4-clk-measure",
+ .data = &clk_msr_s4_data,
},
{ /* sentinel */ }
};
@@ -688,4 +1071,5 @@ static struct platform_driver meson_msr_driver = {
},
};
module_platform_driver(meson_msr_driver);
+MODULE_DESCRIPTION("Amlogic Meson SoC Clock Measure driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
index 6abb730344ab..2a54ca43cd13 100644
--- a/drivers/soc/amlogic/meson-gx-socinfo.c
+++ b/drivers/soc/amlogic/meson-gx-socinfo.c
@@ -41,6 +41,14 @@ static const struct meson_gx_soc_id {
{ "G12B", 0x29 },
{ "SM1", 0x2b },
{ "A1", 0x2c },
+ { "T7", 0x36 },
+ { "S4", 0x37 },
+ { "A5", 0x3c },
+ { "C3", 0x3d },
+ { "A4", 0x40 },
+ { "S7", 0x46 },
+ { "S7D", 0x47 },
+ { "S6", 0x48 },
};
static const struct meson_gx_package_id {
@@ -63,7 +71,9 @@ static const struct meson_gx_package_id {
{ "962X", 0x24, 0x10, 0xf0 },
{ "962E", 0x24, 0x20, 0xf0 },
{ "A113X", 0x25, 0x37, 0xff },
+ { "A113X", 0x25, 0x43, 0xff },
{ "A113D", 0x25, 0x22, 0xff },
+ { "S905L", 0x26, 0, 0x0 },
{ "S905D2", 0x28, 0x10, 0xf0 },
{ "S905Y2", 0x28, 0x30, 0xf0 },
{ "S905X2", 0x28, 0x40, 0xf0 },
@@ -74,6 +84,14 @@ static const struct meson_gx_package_id {
{ "S905X3", 0x2b, 0x10, 0x3f },
{ "S905D3", 0x2b, 0x30, 0x3f },
{ "A113L", 0x2c, 0x0, 0xf8 },
+ { "S805X2", 0x37, 0x2, 0xf },
+ { "C308L", 0x3d, 0x1, 0xf },
+ { "A311D2", 0x36, 0x1, 0xf },
+ { "A113X2", 0x3c, 0x1, 0xf },
+ { "A113L2", 0x40, 0x1, 0xf },
+ { "S805X3", 0x46, 0x3, 0xf },
+ { "S905X5M", 0x47, 0x1, 0xf },
+ { "S905X5", 0x48, 0x1, 0xf },
};
static inline unsigned int socinfo_to_major(u32 socinfo)
diff --git a/drivers/soc/apple/Kconfig b/drivers/soc/apple/Kconfig
index a1596fefacff..ad6736889231 100644
--- a/drivers/soc/apple/Kconfig
+++ b/drivers/soc/apple/Kconfig
@@ -4,24 +4,22 @@ if ARCH_APPLE || COMPILE_TEST
menu "Apple SoC drivers"
-config APPLE_PMGR_PWRSTATE
- bool "Apple SoC PMGR power state control"
+config APPLE_MAILBOX
+ tristate "Apple SoC mailboxes"
depends on PM
- select REGMAP
- select MFD_SYSCON
- select PM_GENERIC_DOMAINS
- select RESET_CONTROLLER
- default ARCH_APPLE
+ depends on ARCH_APPLE || (64BIT && COMPILE_TEST)
help
- The PMGR block in Apple SoCs provides high-level power state
- controls for SoC devices. This driver manages them through the
- generic power domain framework, and also provides reset support.
+ Apple SoCs have various co-processors required for certain
+ peripherals to work (NVMe, display controller, etc.). This
+ driver adds support for the mailbox controller used to
+ communicate with those.
+
+ Say Y here if you have an Apple SoC.
config APPLE_RTKIT
tristate "Apple RTKit co-processor IPC protocol"
- depends on MAILBOX
+ depends on APPLE_MAILBOX
depends on ARCH_APPLE || COMPILE_TEST
- default ARCH_APPLE
help
Apple SoCs such as the M1 come with various co-processors running
their proprietary RTKit operating system. This option enables support
@@ -33,7 +31,6 @@ config APPLE_RTKIT
config APPLE_SART
tristate "Apple SART DMA address filter"
depends on ARCH_APPLE || COMPILE_TEST
- default ARCH_APPLE
help
Apple SART is a simple DMA address filter used on Apple SoCs such
as the M1. It is usually required for the NVMe coprocessor which does
diff --git a/drivers/soc/apple/Makefile b/drivers/soc/apple/Makefile
index b241e6a65e5b..4d9ab8f3037b 100644
--- a/drivers/soc/apple/Makefile
+++ b/drivers/soc/apple/Makefile
@@ -1,4 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_APPLE_MAILBOX) += apple-mailbox.o
+apple-mailbox-y = mailbox.o
+
obj-$(CONFIG_APPLE_RTKIT) += apple-rtkit.o
apple-rtkit-y = rtkit.o rtkit-crashlog.o
diff --git a/drivers/soc/apple/mailbox.c b/drivers/soc/apple/mailbox.c
new file mode 100644
index 000000000000..5c48455185c9
--- /dev/null
+++ b/drivers/soc/apple/mailbox.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple mailbox driver
+ *
+ * Copyright The Asahi Linux Contributors
+ *
+ * This driver adds support for two mailbox variants (called ASC and M3 by
+ * Apple) found in Apple SoCs such as the M1. It consists of two FIFOs used to
+ * exchange 64+32 bit messages between the main CPU and a co-processor.
+ * Various coprocessors implement different IPC protocols based on these simple
+ * messages and shared memory buffers.
+ *
+ * Both the main CPU and the co-processor see the same set of registers but
+ * the first FIFO (A2I) is always used to transfer messages from the application
+ * processor (us) to the I/O processor and the second one (I2A) for the
+ * other direction.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include "mailbox.h"
+
+#define APPLE_ASC_MBOX_CONTROL_FULL BIT(16)
+#define APPLE_ASC_MBOX_CONTROL_EMPTY BIT(17)
+
+#define APPLE_ASC_MBOX_A2I_CONTROL 0x110
+#define APPLE_ASC_MBOX_A2I_SEND0 0x800
+#define APPLE_ASC_MBOX_A2I_SEND1 0x808
+#define APPLE_ASC_MBOX_A2I_RECV0 0x810
+#define APPLE_ASC_MBOX_A2I_RECV1 0x818
+
+#define APPLE_ASC_MBOX_I2A_CONTROL 0x114
+#define APPLE_ASC_MBOX_I2A_SEND0 0x820
+#define APPLE_ASC_MBOX_I2A_SEND1 0x828
+#define APPLE_ASC_MBOX_I2A_RECV0 0x830
+#define APPLE_ASC_MBOX_I2A_RECV1 0x838
+
+#define APPLE_T8015_MBOX_A2I_CONTROL 0x108
+#define APPLE_T8015_MBOX_I2A_CONTROL 0x10c
+
+#define APPLE_M3_MBOX_CONTROL_FULL BIT(16)
+#define APPLE_M3_MBOX_CONTROL_EMPTY BIT(17)
+
+#define APPLE_M3_MBOX_A2I_CONTROL 0x50
+#define APPLE_M3_MBOX_A2I_SEND0 0x60
+#define APPLE_M3_MBOX_A2I_SEND1 0x68
+#define APPLE_M3_MBOX_A2I_RECV0 0x70
+#define APPLE_M3_MBOX_A2I_RECV1 0x78
+
+#define APPLE_M3_MBOX_I2A_CONTROL 0x80
+#define APPLE_M3_MBOX_I2A_SEND0 0x90
+#define APPLE_M3_MBOX_I2A_SEND1 0x98
+#define APPLE_M3_MBOX_I2A_RECV0 0xa0
+#define APPLE_M3_MBOX_I2A_RECV1 0xa8
+
+#define APPLE_M3_MBOX_IRQ_ENABLE 0x48
+#define APPLE_M3_MBOX_IRQ_ACK 0x4c
+#define APPLE_M3_MBOX_IRQ_A2I_EMPTY BIT(0)
+#define APPLE_M3_MBOX_IRQ_A2I_NOT_EMPTY BIT(1)
+#define APPLE_M3_MBOX_IRQ_I2A_EMPTY BIT(2)
+#define APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY BIT(3)
+
+#define APPLE_MBOX_MSG1_OUTCNT GENMASK(56, 52)
+#define APPLE_MBOX_MSG1_INCNT GENMASK(51, 48)
+#define APPLE_MBOX_MSG1_OUTPTR GENMASK(47, 44)
+#define APPLE_MBOX_MSG1_INPTR GENMASK(43, 40)
+#define APPLE_MBOX_MSG1_MSG GENMASK(31, 0)
+
+#define APPLE_MBOX_TX_TIMEOUT 500
+
+struct apple_mbox_hw {
+ unsigned int control_full;
+ unsigned int control_empty;
+
+ unsigned int a2i_control;
+ unsigned int a2i_send0;
+ unsigned int a2i_send1;
+
+ unsigned int i2a_control;
+ unsigned int i2a_recv0;
+ unsigned int i2a_recv1;
+
+ bool has_irq_controls;
+ unsigned int irq_enable;
+ unsigned int irq_ack;
+ unsigned int irq_bit_recv_not_empty;
+ unsigned int irq_bit_send_empty;
+};
+
+int apple_mbox_send(struct apple_mbox *mbox, const struct apple_mbox_msg msg,
+ bool atomic)
+{
+ unsigned long flags;
+ int ret;
+ u32 mbox_ctrl;
+ long t;
+
+ spin_lock_irqsave(&mbox->tx_lock, flags);
+ mbox_ctrl = readl_relaxed(mbox->regs + mbox->hw->a2i_control);
+
+ while (mbox_ctrl & mbox->hw->control_full) {
+ if (atomic) {
+ ret = readl_poll_timeout_atomic(
+ mbox->regs + mbox->hw->a2i_control, mbox_ctrl,
+ !(mbox_ctrl & mbox->hw->control_full), 100,
+ APPLE_MBOX_TX_TIMEOUT * 1000);
+
+ if (ret) {
+ spin_unlock_irqrestore(&mbox->tx_lock, flags);
+ return ret;
+ }
+
+ break;
+ }
+ /*
+ * The interrupt is level triggered and will keep firing as long as the
+ * FIFO is empty. It will also keep firing if the FIFO was empty
+ * at any point in the past until it has been acknowledged at the
+ * mailbox level. By acknowledging it here we can ensure that we will
+ * only get the interrupt once the FIFO has been cleared again.
+ * If the FIFO is already empty before the ack it will fire again
+ * immediately after the ack.
+ */
+ if (mbox->hw->has_irq_controls) {
+ writel_relaxed(mbox->hw->irq_bit_send_empty,
+ mbox->regs + mbox->hw->irq_ack);
+ }
+ enable_irq(mbox->irq_send_empty);
+ reinit_completion(&mbox->tx_empty);
+ spin_unlock_irqrestore(&mbox->tx_lock, flags);
+
+ t = wait_for_completion_interruptible_timeout(
+ &mbox->tx_empty,
+ msecs_to_jiffies(APPLE_MBOX_TX_TIMEOUT));
+ if (t < 0)
+ return t;
+ else if (t == 0)
+ return -ETIMEDOUT;
+
+ spin_lock_irqsave(&mbox->tx_lock, flags);
+ mbox_ctrl = readl_relaxed(mbox->regs + mbox->hw->a2i_control);
+ }
+
+ writeq_relaxed(msg.msg0, mbox->regs + mbox->hw->a2i_send0);
+ writeq_relaxed(FIELD_PREP(APPLE_MBOX_MSG1_MSG, msg.msg1),
+ mbox->regs + mbox->hw->a2i_send1);
+
+ spin_unlock_irqrestore(&mbox->tx_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(apple_mbox_send);
+
+static irqreturn_t apple_mbox_send_empty_irq(int irq, void *data)
+{
+ struct apple_mbox *mbox = data;
+
+ /*
+ * We don't need to acknowledge the interrupt at the mailbox level
+ * here even if supported by the hardware. It will keep firing but that
+ * doesn't matter since it's disabled at the main interrupt controller.
+ * apple_mbox_send will acknowledge it before enabling
+ * it at the main controller again.
+ */
+ spin_lock(&mbox->tx_lock);
+ disable_irq_nosync(mbox->irq_send_empty);
+ complete(&mbox->tx_empty);
+ spin_unlock(&mbox->tx_lock);
+
+ return IRQ_HANDLED;
+}
+
+static int apple_mbox_poll_locked(struct apple_mbox *mbox)
+{
+ struct apple_mbox_msg msg;
+ int ret = 0;
+
+ u32 mbox_ctrl = readl_relaxed(mbox->regs + mbox->hw->i2a_control);
+
+ while (!(mbox_ctrl & mbox->hw->control_empty)) {
+ msg.msg0 = readq_relaxed(mbox->regs + mbox->hw->i2a_recv0);
+ msg.msg1 = FIELD_GET(
+ APPLE_MBOX_MSG1_MSG,
+ readq_relaxed(mbox->regs + mbox->hw->i2a_recv1));
+
+ mbox->rx(mbox, msg, mbox->cookie);
+ ret++;
+ mbox_ctrl = readl_relaxed(mbox->regs + mbox->hw->i2a_control);
+ }
+
+ /*
+ * The interrupt will keep firing even if there are no more messages
+ * unless we also acknowledge it at the mailbox level here.
+ * There's no race if a message comes in between the check in the while
+ * loop above and the ack below: If a new messages arrives inbetween
+ * those two the interrupt will just fire again immediately after the
+ * ack since it's level triggered.
+ */
+ if (mbox->hw->has_irq_controls) {
+ writel_relaxed(mbox->hw->irq_bit_recv_not_empty,
+ mbox->regs + mbox->hw->irq_ack);
+ }
+
+ return ret;
+}
+
+static irqreturn_t apple_mbox_recv_irq(int irq, void *data)
+{
+ struct apple_mbox *mbox = data;
+
+ spin_lock(&mbox->rx_lock);
+ apple_mbox_poll_locked(mbox);
+ spin_unlock(&mbox->rx_lock);
+
+ return IRQ_HANDLED;
+}
+
+int apple_mbox_poll(struct apple_mbox *mbox)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mbox->rx_lock, flags);
+ ret = apple_mbox_poll_locked(mbox);
+ spin_unlock_irqrestore(&mbox->rx_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(apple_mbox_poll);
+
+int apple_mbox_start(struct apple_mbox *mbox)
+{
+ int ret;
+
+ if (mbox->active)
+ return 0;
+
+ ret = pm_runtime_resume_and_get(mbox->dev);
+ if (ret)
+ return ret;
+
+ /*
+ * Only some variants of this mailbox HW provide interrupt control
+ * at the mailbox level. We therefore need to handle enabling/disabling
+ * interrupts at the main interrupt controller anyway for hardware that
+ * doesn't. Just always keep the interrupts we care about enabled at
+ * the mailbox level so that both hardware revisions behave almost
+ * the same.
+ */
+ if (mbox->hw->has_irq_controls) {
+ writel_relaxed(mbox->hw->irq_bit_recv_not_empty |
+ mbox->hw->irq_bit_send_empty,
+ mbox->regs + mbox->hw->irq_enable);
+ }
+
+ enable_irq(mbox->irq_recv_not_empty);
+ mbox->active = true;
+ return 0;
+}
+EXPORT_SYMBOL(apple_mbox_start);
+
+void apple_mbox_stop(struct apple_mbox *mbox)
+{
+ if (!mbox->active)
+ return;
+
+ mbox->active = false;
+ disable_irq(mbox->irq_recv_not_empty);
+ pm_runtime_mark_last_busy(mbox->dev);
+ pm_runtime_put_autosuspend(mbox->dev);
+}
+EXPORT_SYMBOL(apple_mbox_stop);
+
+struct apple_mbox *apple_mbox_get(struct device *dev, int index)
+{
+ struct of_phandle_args args;
+ struct platform_device *pdev;
+ struct apple_mbox *mbox;
+ int ret;
+
+ ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
+ index, &args);
+ if (ret || !args.np)
+ return ERR_PTR(ret);
+
+ pdev = of_find_device_by_node(args.np);
+ of_node_put(args.np);
+
+ if (!pdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ mbox = platform_get_drvdata(pdev);
+ if (!mbox) {
+ mbox = ERR_PTR(-EPROBE_DEFER);
+ goto out_put_pdev;
+ }
+
+ if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_CONSUMER)) {
+ mbox = ERR_PTR(-ENODEV);
+ goto out_put_pdev;
+ }
+
+out_put_pdev:
+ put_device(&pdev->dev);
+
+ return mbox;
+}
+EXPORT_SYMBOL(apple_mbox_get);
+
+struct apple_mbox *apple_mbox_get_byname(struct device *dev, const char *name)
+{
+ int index;
+
+ index = of_property_match_string(dev->of_node, "mbox-names", name);
+ if (index < 0)
+ return ERR_PTR(index);
+
+ return apple_mbox_get(dev, index);
+}
+EXPORT_SYMBOL(apple_mbox_get_byname);
+
+static int apple_mbox_probe(struct platform_device *pdev)
+{
+ int ret;
+ char *irqname;
+ struct apple_mbox *mbox;
+ struct device *dev = &pdev->dev;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ mbox->dev = &pdev->dev;
+ mbox->hw = of_device_get_match_data(dev);
+ if (!mbox->hw)
+ return -EINVAL;
+
+ mbox->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->regs))
+ return PTR_ERR(mbox->regs);
+
+ mbox->irq_recv_not_empty =
+ platform_get_irq_byname(pdev, "recv-not-empty");
+ if (mbox->irq_recv_not_empty < 0)
+ return -ENODEV;
+
+ mbox->irq_send_empty = platform_get_irq_byname(pdev, "send-empty");
+ if (mbox->irq_send_empty < 0)
+ return -ENODEV;
+
+ spin_lock_init(&mbox->rx_lock);
+ spin_lock_init(&mbox->tx_lock);
+ init_completion(&mbox->tx_empty);
+
+ irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-recv", dev_name(dev));
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, mbox->irq_recv_not_empty,
+ apple_mbox_recv_irq,
+ IRQF_NO_AUTOEN | IRQF_NO_SUSPEND, irqname, mbox);
+ if (ret)
+ return ret;
+
+ irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-send", dev_name(dev));
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, mbox->irq_send_empty,
+ apple_mbox_send_empty_irq,
+ IRQF_NO_AUTOEN | IRQF_NO_SUSPEND, irqname, mbox);
+ if (ret)
+ return ret;
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, mbox);
+ return 0;
+}
+
+static const struct apple_mbox_hw apple_mbox_t8015_hw = {
+ .control_full = APPLE_ASC_MBOX_CONTROL_FULL,
+ .control_empty = APPLE_ASC_MBOX_CONTROL_EMPTY,
+
+ .a2i_control = APPLE_T8015_MBOX_A2I_CONTROL,
+ .a2i_send0 = APPLE_ASC_MBOX_A2I_SEND0,
+ .a2i_send1 = APPLE_ASC_MBOX_A2I_SEND1,
+
+ .i2a_control = APPLE_T8015_MBOX_I2A_CONTROL,
+ .i2a_recv0 = APPLE_ASC_MBOX_I2A_RECV0,
+ .i2a_recv1 = APPLE_ASC_MBOX_I2A_RECV1,
+
+ .has_irq_controls = false,
+};
+
+static const struct apple_mbox_hw apple_mbox_asc_hw = {
+ .control_full = APPLE_ASC_MBOX_CONTROL_FULL,
+ .control_empty = APPLE_ASC_MBOX_CONTROL_EMPTY,
+
+ .a2i_control = APPLE_ASC_MBOX_A2I_CONTROL,
+ .a2i_send0 = APPLE_ASC_MBOX_A2I_SEND0,
+ .a2i_send1 = APPLE_ASC_MBOX_A2I_SEND1,
+
+ .i2a_control = APPLE_ASC_MBOX_I2A_CONTROL,
+ .i2a_recv0 = APPLE_ASC_MBOX_I2A_RECV0,
+ .i2a_recv1 = APPLE_ASC_MBOX_I2A_RECV1,
+
+ .has_irq_controls = false,
+};
+
+static const struct apple_mbox_hw apple_mbox_m3_hw = {
+ .control_full = APPLE_M3_MBOX_CONTROL_FULL,
+ .control_empty = APPLE_M3_MBOX_CONTROL_EMPTY,
+
+ .a2i_control = APPLE_M3_MBOX_A2I_CONTROL,
+ .a2i_send0 = APPLE_M3_MBOX_A2I_SEND0,
+ .a2i_send1 = APPLE_M3_MBOX_A2I_SEND1,
+
+ .i2a_control = APPLE_M3_MBOX_I2A_CONTROL,
+ .i2a_recv0 = APPLE_M3_MBOX_I2A_RECV0,
+ .i2a_recv1 = APPLE_M3_MBOX_I2A_RECV1,
+
+ .has_irq_controls = true,
+ .irq_enable = APPLE_M3_MBOX_IRQ_ENABLE,
+ .irq_ack = APPLE_M3_MBOX_IRQ_ACK,
+ .irq_bit_recv_not_empty = APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY,
+ .irq_bit_send_empty = APPLE_M3_MBOX_IRQ_A2I_EMPTY,
+};
+
+static const struct of_device_id apple_mbox_of_match[] = {
+ { .compatible = "apple,asc-mailbox-v4", .data = &apple_mbox_asc_hw },
+ { .compatible = "apple,t8015-asc-mailbox", .data = &apple_mbox_t8015_hw },
+ { .compatible = "apple,m3-mailbox-v2", .data = &apple_mbox_m3_hw },
+ {}
+};
+MODULE_DEVICE_TABLE(of, apple_mbox_of_match);
+
+static struct platform_driver apple_mbox_driver = {
+ .driver = {
+ .name = "apple-mailbox",
+ .of_match_table = apple_mbox_of_match,
+ },
+ .probe = apple_mbox_probe,
+};
+module_platform_driver(apple_mbox_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple Mailbox driver");
diff --git a/drivers/soc/apple/mailbox.h b/drivers/soc/apple/mailbox.h
new file mode 100644
index 000000000000..f73a8913da95
--- /dev/null
+++ b/drivers/soc/apple/mailbox.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple mailbox message format
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#ifndef _APPLE_MAILBOX_H_
+#define _APPLE_MAILBOX_H_
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+/* encodes a single 96bit message sent over the single channel */
+struct apple_mbox_msg {
+ u64 msg0;
+ u32 msg1;
+};
+
+struct apple_mbox {
+ struct device *dev;
+ void __iomem *regs;
+ const struct apple_mbox_hw *hw;
+ bool active;
+
+ int irq_recv_not_empty;
+ int irq_send_empty;
+
+ spinlock_t rx_lock;
+ spinlock_t tx_lock;
+
+ struct completion tx_empty;
+
+ /** Receive callback for incoming messages */
+ void (*rx)(struct apple_mbox *mbox, struct apple_mbox_msg msg, void *cookie);
+ void *cookie;
+};
+
+struct apple_mbox *apple_mbox_get(struct device *dev, int index);
+struct apple_mbox *apple_mbox_get_byname(struct device *dev, const char *name);
+
+int apple_mbox_start(struct apple_mbox *mbox);
+void apple_mbox_stop(struct apple_mbox *mbox);
+int apple_mbox_poll(struct apple_mbox *mbox);
+int apple_mbox_send(struct apple_mbox *mbox, struct apple_mbox_msg msg,
+ bool atomic);
+
+#endif
diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h
index 24bd619ec5e4..b8d5244678f0 100644
--- a/drivers/soc/apple/rtkit-internal.h
+++ b/drivers/soc/apple/rtkit-internal.h
@@ -7,18 +7,17 @@
#ifndef _APPLE_RTKIT_INTERAL_H
#define _APPLE_RTKIT_INTERAL_H
-#include <linux/apple-mailbox.h>
#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/soc/apple/rtkit.h>
#include <linux/workqueue.h>
+#include "mailbox.h"
#define APPLE_RTKIT_APP_ENDPOINT_START 0x20
#define APPLE_RTKIT_MAX_ENDPOINTS 0x100
@@ -28,10 +27,7 @@ struct apple_rtkit {
const struct apple_rtkit_ops *ops;
struct device *dev;
- const char *mbox_name;
- int mbox_idx;
- struct mbox_client mbox_cl;
- struct mbox_chan *mbox_chan;
+ struct apple_mbox *mbox;
struct completion epmap_completion;
struct completion iop_pwr_ack_completion;
@@ -48,6 +44,7 @@ struct apple_rtkit {
struct apple_rtkit_shmem ioreport_buffer;
struct apple_rtkit_shmem crashlog_buffer;
+ struct apple_rtkit_shmem oslog_buffer;
struct apple_rtkit_shmem syslog_buffer;
char *syslog_msg_buffer;
diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
index d9f19dc99da5..b8d4da147d23 100644
--- a/drivers/soc/apple/rtkit.c
+++ b/drivers/soc/apple/rtkit.c
@@ -12,6 +12,7 @@ enum {
APPLE_RTKIT_PWR_STATE_IDLE = 0x201, /* sleeping, retain state */
APPLE_RTKIT_PWR_STATE_QUIESCED = 0x10, /* running but no communication */
APPLE_RTKIT_PWR_STATE_ON = 0x20, /* normal operating state */
+ APPLE_RTKIT_PWR_STATE_INIT = 0x220, /* init after starting the coproc */
};
enum {
@@ -66,17 +67,13 @@ enum {
#define APPLE_RTKIT_SYSLOG_MSG_SIZE GENMASK_ULL(31, 24)
#define APPLE_RTKIT_OSLOG_TYPE GENMASK_ULL(63, 56)
-#define APPLE_RTKIT_OSLOG_INIT 1
-#define APPLE_RTKIT_OSLOG_ACK 3
+#define APPLE_RTKIT_OSLOG_BUFFER_REQUEST 1
+#define APPLE_RTKIT_OSLOG_SIZE GENMASK_ULL(55, 36)
+#define APPLE_RTKIT_OSLOG_IOVA GENMASK_ULL(35, 0)
#define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11
#define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12
-struct apple_rtkit_msg {
- struct completion *completion;
- struct apple_mbox_msg mbox_msg;
-};
-
struct apple_rtkit_rx_work {
struct apple_rtkit *rtk;
u8 ep;
@@ -102,12 +99,19 @@ bool apple_rtkit_is_crashed(struct apple_rtkit *rtk)
}
EXPORT_SYMBOL_GPL(apple_rtkit_is_crashed);
-static void apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
+static int apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
u64 msg)
{
+ int ret;
+
msg &= ~APPLE_RTKIT_MGMT_TYPE;
msg |= FIELD_PREP(APPLE_RTKIT_MGMT_TYPE, type);
- apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
+ ret = apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
+
+ if (ret)
+ dev_err(rtk->dev, "RTKit: Failed to send management message: %d\n", ret);
+
+ return ret;
}
static void apple_rtkit_management_rx_hello(struct apple_rtkit *rtk, u64 msg)
@@ -256,21 +260,26 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
struct apple_rtkit_shmem *buffer,
u8 ep, u64 msg)
{
- size_t n_4kpages = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg);
u64 reply;
int err;
+ /* The different size vs. IOVA shifts look odd but are indeed correct this way */
+ if (ep == APPLE_RTKIT_EP_OSLOG) {
+ buffer->size = FIELD_GET(APPLE_RTKIT_OSLOG_SIZE, msg);
+ buffer->iova = FIELD_GET(APPLE_RTKIT_OSLOG_IOVA, msg) << 12;
+ } else {
+ buffer->size = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg) << 12;
+ buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
+ }
+
buffer->buffer = NULL;
buffer->iomem = NULL;
buffer->is_mapped = false;
- buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
- buffer->size = n_4kpages << 12;
dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n",
buffer->size, &buffer->iova);
- if (buffer->iova &&
- (!rtk->ops->shmem_setup || !rtk->ops->shmem_destroy)) {
+ if (buffer->iova && !rtk->ops->shmem_setup) {
err = -EINVAL;
goto error;
}
@@ -289,17 +298,30 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
}
if (!buffer->is_mapped) {
- reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
- APPLE_RTKIT_BUFFER_REQUEST);
- reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, n_4kpages);
- reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
- buffer->iova);
+ /* oslog uses different fields and needs a shifted IOVA instead of size */
+ if (ep == APPLE_RTKIT_EP_OSLOG) {
+ reply = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE,
+ APPLE_RTKIT_OSLOG_BUFFER_REQUEST);
+ reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_SIZE, buffer->size);
+ reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_IOVA,
+ buffer->iova >> 12);
+ } else {
+ reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
+ APPLE_RTKIT_BUFFER_REQUEST);
+ reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE,
+ buffer->size >> 12);
+ reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
+ buffer->iova);
+ }
apple_rtkit_send_message(rtk, ep, reply, NULL, false);
}
return 0;
error:
+ dev_err(rtk->dev, "RTKit: failed buffer request for 0x%zx bytes (%d)\n",
+ buffer->size, err);
+
buffer->buffer = NULL;
buffer->iomem = NULL;
buffer->iova = 0;
@@ -365,7 +387,6 @@ static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg)
apple_rtkit_memcpy(rtk, bfr, &rtk->crashlog_buffer, 0,
rtk->crashlog_buffer.size);
apple_rtkit_crashlog_dump(rtk, bfr, rtk->crashlog_buffer.size);
- kfree(bfr);
} else {
dev_err(rtk->dev,
"RTKit: Couldn't allocate crashlog shadow buffer\n");
@@ -373,7 +394,9 @@ static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg)
rtk->crashed = true;
if (rtk->ops->crashed)
- rtk->ops->crashed(rtk->cookie);
+ rtk->ops->crashed(rtk->cookie, bfr, rtk->crashlog_buffer.size);
+
+ kfree(bfr);
}
static void apple_rtkit_ioreport_rx(struct apple_rtkit *rtk, u64 msg)
@@ -453,7 +476,7 @@ static void apple_rtkit_syslog_rx_log(struct apple_rtkit *rtk, u64 msg)
log_context[sizeof(log_context) - 1] = 0;
- msglen = rtk->syslog_msg_size - 1;
+ msglen = strnlen(rtk->syslog_msg_buffer, rtk->syslog_msg_size - 1);
while (msglen > 0 &&
should_crop_syslog_char(rtk->syslog_msg_buffer[msglen - 1]))
msglen--;
@@ -487,25 +510,18 @@ static void apple_rtkit_syslog_rx(struct apple_rtkit *rtk, u64 msg)
}
}
-static void apple_rtkit_oslog_rx_init(struct apple_rtkit *rtk, u64 msg)
-{
- u64 ack;
-
- dev_dbg(rtk->dev, "RTKit: oslog init: msg: 0x%llx\n", msg);
- ack = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, APPLE_RTKIT_OSLOG_ACK);
- apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_OSLOG, ack, NULL, false);
-}
-
static void apple_rtkit_oslog_rx(struct apple_rtkit *rtk, u64 msg)
{
u8 type = FIELD_GET(APPLE_RTKIT_OSLOG_TYPE, msg);
switch (type) {
- case APPLE_RTKIT_OSLOG_INIT:
- apple_rtkit_oslog_rx_init(rtk, msg);
+ case APPLE_RTKIT_OSLOG_BUFFER_REQUEST:
+ apple_rtkit_common_rx_get_buffer(rtk, &rtk->oslog_buffer,
+ APPLE_RTKIT_EP_OSLOG, msg);
break;
default:
- dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", msg);
+ dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n",
+ msg);
}
}
@@ -550,12 +566,12 @@ static void apple_rtkit_rx_work(struct work_struct *work)
kfree(rtk_work);
}
-static void apple_rtkit_rx(struct mbox_client *cl, void *mssg)
+static void apple_rtkit_rx(struct apple_mbox *mbox, struct apple_mbox_msg msg,
+ void *cookie)
{
- struct apple_rtkit *rtk = container_of(cl, struct apple_rtkit, mbox_cl);
- struct apple_mbox_msg *msg = mssg;
+ struct apple_rtkit *rtk = cookie;
struct apple_rtkit_rx_work *work;
- u8 ep = msg->msg1;
+ u8 ep = msg.msg1;
/*
* The message was read from a MMIO FIFO and we have to make
@@ -571,7 +587,7 @@ static void apple_rtkit_rx(struct mbox_client *cl, void *mssg)
if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
rtk->ops->recv_message_early &&
- rtk->ops->recv_message_early(rtk->cookie, ep, msg->msg0))
+ rtk->ops->recv_message_early(rtk->cookie, ep, msg.msg0))
return;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
@@ -580,49 +596,31 @@ static void apple_rtkit_rx(struct mbox_client *cl, void *mssg)
work->rtk = rtk;
work->ep = ep;
- work->msg = msg->msg0;
+ work->msg = msg.msg0;
INIT_WORK(&work->work, apple_rtkit_rx_work);
queue_work(rtk->wq, &work->work);
}
-static void apple_rtkit_tx_done(struct mbox_client *cl, void *mssg, int r)
-{
- struct apple_rtkit_msg *msg =
- container_of(mssg, struct apple_rtkit_msg, mbox_msg);
-
- if (r == -ETIME)
- return;
-
- if (msg->completion)
- complete(msg->completion);
- kfree(msg);
-}
-
int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
struct completion *completion, bool atomic)
{
- struct apple_rtkit_msg *msg;
- int ret;
- gfp_t flags;
+ struct apple_mbox_msg msg = {
+ .msg0 = message,
+ .msg1 = ep,
+ };
- if (rtk->crashed)
+ if (rtk->crashed) {
+ dev_warn(rtk->dev,
+ "RTKit: Device is crashed, cannot send message\n");
return -EINVAL;
+ }
+
if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
- !apple_rtkit_is_running(rtk))
+ !apple_rtkit_is_running(rtk)) {
+ dev_warn(rtk->dev,
+ "RTKit: Endpoint 0x%02x is not running, cannot send message\n", ep);
return -EINVAL;
-
- if (atomic)
- flags = GFP_ATOMIC;
- else
- flags = GFP_KERNEL;
-
- msg = kzalloc(sizeof(*msg), flags);
- if (!msg)
- return -ENOMEM;
-
- msg->mbox_msg.msg0 = message;
- msg->mbox_msg.msg1 = ep;
- msg->completion = completion;
+ }
/*
* The message will be sent with a MMIO write. We need the barrier
@@ -631,51 +629,13 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
*/
dma_wmb();
- ret = mbox_send_message(rtk->mbox_chan, &msg->mbox_msg);
- if (ret < 0) {
- kfree(msg);
- return ret;
- }
-
- return 0;
+ return apple_mbox_send(rtk->mbox, msg, atomic);
}
EXPORT_SYMBOL_GPL(apple_rtkit_send_message);
-int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message,
- unsigned long timeout, bool atomic)
-{
- DECLARE_COMPLETION_ONSTACK(completion);
- int ret;
- long t;
-
- ret = apple_rtkit_send_message(rtk, ep, message, &completion, atomic);
- if (ret < 0)
- return ret;
-
- if (atomic) {
- ret = mbox_flush(rtk->mbox_chan, timeout);
- if (ret < 0)
- return ret;
-
- if (try_wait_for_completion(&completion))
- return 0;
-
- return -ETIME;
- } else {
- t = wait_for_completion_interruptible_timeout(
- &completion, msecs_to_jiffies(timeout));
- if (t < 0)
- return t;
- else if (t == 0)
- return -ETIME;
- return 0;
- }
-}
-EXPORT_SYMBOL_GPL(apple_rtkit_send_message_wait);
-
int apple_rtkit_poll(struct apple_rtkit *rtk)
{
- return mbox_client_peek_data(rtk->mbox_chan);
+ return apple_mbox_poll(rtk->mbox);
}
EXPORT_SYMBOL_GPL(apple_rtkit_poll);
@@ -697,20 +657,6 @@ int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint)
}
EXPORT_SYMBOL_GPL(apple_rtkit_start_ep);
-static int apple_rtkit_request_mbox_chan(struct apple_rtkit *rtk)
-{
- if (rtk->mbox_name)
- rtk->mbox_chan = mbox_request_channel_byname(&rtk->mbox_cl,
- rtk->mbox_name);
- else
- rtk->mbox_chan =
- mbox_request_channel(&rtk->mbox_cl, rtk->mbox_idx);
-
- if (IS_ERR(rtk->mbox_chan))
- return PTR_ERR(rtk->mbox_chan);
- return 0;
-}
-
struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
const char *mbox_name, int mbox_idx,
const struct apple_rtkit_ops *ops)
@@ -736,22 +682,27 @@ struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
bitmap_zero(rtk->endpoints, APPLE_RTKIT_MAX_ENDPOINTS);
set_bit(APPLE_RTKIT_EP_MGMT, rtk->endpoints);
- rtk->mbox_name = mbox_name;
- rtk->mbox_idx = mbox_idx;
- rtk->mbox_cl.dev = dev;
- rtk->mbox_cl.tx_block = false;
- rtk->mbox_cl.knows_txdone = false;
- rtk->mbox_cl.rx_callback = &apple_rtkit_rx;
- rtk->mbox_cl.tx_done = &apple_rtkit_tx_done;
+ if (mbox_name)
+ rtk->mbox = apple_mbox_get_byname(dev, mbox_name);
+ else
+ rtk->mbox = apple_mbox_get(dev, mbox_idx);
+
+ if (IS_ERR(rtk->mbox)) {
+ ret = PTR_ERR(rtk->mbox);
+ goto free_rtk;
+ }
+
+ rtk->mbox->rx = apple_rtkit_rx;
+ rtk->mbox->cookie = rtk;
- rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM,
+ rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_HIGHPRI | WQ_MEM_RECLAIM,
dev_name(rtk->dev));
if (!rtk->wq) {
ret = -ENOMEM;
goto free_rtk;
}
- ret = apple_rtkit_request_mbox_chan(rtk);
+ ret = apple_mbox_start(rtk->mbox);
if (ret)
goto destroy_wq;
@@ -782,11 +733,12 @@ static int apple_rtkit_wait_for_completion(struct completion *c)
int apple_rtkit_reinit(struct apple_rtkit *rtk)
{
/* make sure we don't handle any messages while reinitializing */
- mbox_free_channel(rtk->mbox_chan);
+ apple_mbox_stop(rtk->mbox);
flush_workqueue(rtk->wq);
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer);
apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
kfree(rtk->syslog_msg_buffer);
@@ -806,7 +758,7 @@ int apple_rtkit_reinit(struct apple_rtkit *rtk)
rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_OFF;
rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_OFF;
- return apple_rtkit_request_mbox_chan(rtk);
+ return apple_mbox_start(rtk->mbox);
}
EXPORT_SYMBOL_GPL(apple_rtkit_reinit);
@@ -819,8 +771,10 @@ static int apple_rtkit_set_ap_power_state(struct apple_rtkit *rtk,
reinit_completion(&rtk->ap_pwr_ack_completion);
msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
- apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
- msg);
+ ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
+ msg);
+ if (ret)
+ return ret;
ret = apple_rtkit_wait_for_completion(&rtk->ap_pwr_ack_completion);
if (ret)
@@ -840,8 +794,10 @@ static int apple_rtkit_set_iop_power_state(struct apple_rtkit *rtk,
reinit_completion(&rtk->iop_pwr_ack_completion);
msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
- apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
- msg);
+ ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+ msg);
+ if (ret)
+ return ret;
ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion);
if (ret)
@@ -942,6 +898,7 @@ EXPORT_SYMBOL_GPL(apple_rtkit_quiesce);
int apple_rtkit_wake(struct apple_rtkit *rtk)
{
u64 msg;
+ int ret;
if (apple_rtkit_is_running(rtk))
return -EINVAL;
@@ -952,9 +909,11 @@ int apple_rtkit_wake(struct apple_rtkit *rtk)
* Use open-coded apple_rtkit_set_iop_power_state since apple_rtkit_boot
* will wait for the completion anyway.
*/
- msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_ON);
- apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
- msg);
+ msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_INIT);
+ ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+ msg);
+ if (ret)
+ return ret;
return apple_rtkit_boot(rtk);
}
@@ -962,11 +921,12 @@ EXPORT_SYMBOL_GPL(apple_rtkit_wake);
void apple_rtkit_free(struct apple_rtkit *rtk)
{
- mbox_free_channel(rtk->mbox_chan);
+ apple_mbox_stop(rtk->mbox);
destroy_workqueue(rtk->wq);
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer);
apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
kfree(rtk->syslog_msg_buffer);
diff --git a/drivers/soc/apple/sart.c b/drivers/soc/apple/sart.c
index afa111736899..9eaf3febb382 100644
--- a/drivers/soc/apple/sart.c
+++ b/drivers/soc/apple/sart.c
@@ -25,8 +25,17 @@
#define APPLE_SART_MAX_ENTRIES 16
-/* This is probably a bitfield but the exact meaning of each bit is unknown. */
-#define APPLE_SART_FLAGS_ALLOW 0xff
+/* SARTv0 registers */
+#define APPLE_SART0_CONFIG(idx) (0x00 + 4 * (idx))
+#define APPLE_SART0_CONFIG_FLAGS GENMASK(28, 24)
+#define APPLE_SART0_CONFIG_SIZE GENMASK(18, 0)
+#define APPLE_SART0_CONFIG_SIZE_SHIFT 12
+#define APPLE_SART0_CONFIG_SIZE_MAX GENMASK(18, 0)
+
+#define APPLE_SART0_PADDR(idx) (0x40 + 4 * (idx))
+#define APPLE_SART0_PADDR_SHIFT 12
+
+#define APPLE_SART0_FLAGS_ALLOW 0xf
/* SARTv2 registers */
#define APPLE_SART2_CONFIG(idx) (0x00 + 4 * (idx))
@@ -38,6 +47,8 @@
#define APPLE_SART2_PADDR(idx) (0x40 + 4 * (idx))
#define APPLE_SART2_PADDR_SHIFT 12
+#define APPLE_SART2_FLAGS_ALLOW 0xff
+
/* SARTv3 registers */
#define APPLE_SART3_CONFIG(idx) (0x00 + 4 * (idx))
@@ -48,11 +59,15 @@
#define APPLE_SART3_SIZE_SHIFT 12
#define APPLE_SART3_SIZE_MAX GENMASK(29, 0)
+#define APPLE_SART3_FLAGS_ALLOW 0xff
+
struct apple_sart_ops {
void (*get_entry)(struct apple_sart *sart, int index, u8 *flags,
phys_addr_t *paddr, size_t *size);
void (*set_entry)(struct apple_sart *sart, int index, u8 flags,
phys_addr_t paddr_shifted, size_t size_shifted);
+ /* This is probably a bitfield but the exact meaning of each bit is unknown. */
+ unsigned int flags_allow;
unsigned int size_shift;
unsigned int paddr_shift;
size_t size_max;
@@ -68,6 +83,39 @@ struct apple_sart {
unsigned long used_entries;
};
+static void sart0_get_entry(struct apple_sart *sart, int index, u8 *flags,
+ phys_addr_t *paddr, size_t *size)
+{
+ u32 cfg = readl(sart->regs + APPLE_SART0_CONFIG(index));
+ phys_addr_t paddr_ = readl(sart->regs + APPLE_SART0_PADDR(index));
+ size_t size_ = FIELD_GET(APPLE_SART0_CONFIG_SIZE, cfg);
+
+ *flags = FIELD_GET(APPLE_SART0_CONFIG_FLAGS, cfg);
+ *size = size_ << APPLE_SART0_CONFIG_SIZE_SHIFT;
+ *paddr = paddr_ << APPLE_SART0_PADDR_SHIFT;
+}
+
+static void sart0_set_entry(struct apple_sart *sart, int index, u8 flags,
+ phys_addr_t paddr_shifted, size_t size_shifted)
+{
+ u32 cfg;
+
+ cfg = FIELD_PREP(APPLE_SART0_CONFIG_FLAGS, flags);
+ cfg |= FIELD_PREP(APPLE_SART0_CONFIG_SIZE, size_shifted);
+
+ writel(paddr_shifted, sart->regs + APPLE_SART0_PADDR(index));
+ writel(cfg, sart->regs + APPLE_SART0_CONFIG(index));
+}
+
+static struct apple_sart_ops sart_ops_v0 = {
+ .get_entry = sart0_get_entry,
+ .set_entry = sart0_set_entry,
+ .flags_allow = APPLE_SART0_FLAGS_ALLOW,
+ .size_shift = APPLE_SART0_CONFIG_SIZE_SHIFT,
+ .paddr_shift = APPLE_SART0_PADDR_SHIFT,
+ .size_max = APPLE_SART0_CONFIG_SIZE_MAX,
+};
+
static void sart2_get_entry(struct apple_sart *sart, int index, u8 *flags,
phys_addr_t *paddr, size_t *size)
{
@@ -95,6 +143,7 @@ static void sart2_set_entry(struct apple_sart *sart, int index, u8 flags,
static struct apple_sart_ops sart_ops_v2 = {
.get_entry = sart2_get_entry,
.set_entry = sart2_set_entry,
+ .flags_allow = APPLE_SART2_FLAGS_ALLOW,
.size_shift = APPLE_SART2_CONFIG_SIZE_SHIFT,
.paddr_shift = APPLE_SART2_PADDR_SHIFT,
.size_max = APPLE_SART2_CONFIG_SIZE_MAX,
@@ -122,6 +171,7 @@ static void sart3_set_entry(struct apple_sart *sart, int index, u8 flags,
static struct apple_sart_ops sart_ops_v3 = {
.get_entry = sart3_get_entry,
.set_entry = sart3_set_entry,
+ .flags_allow = APPLE_SART3_FLAGS_ALLOW,
.size_shift = APPLE_SART3_SIZE_SHIFT,
.paddr_shift = APPLE_SART3_PADDR_SHIFT,
.size_max = APPLE_SART3_SIZE_MAX,
@@ -164,17 +214,11 @@ static int apple_sart_probe(struct platform_device *pdev)
return 0;
}
-static void apple_sart_put_device(void *dev)
-{
- put_device(dev);
-}
-
struct apple_sart *devm_apple_sart_get(struct device *dev)
{
struct device_node *sart_node;
struct platform_device *sart_pdev;
struct apple_sart *sart;
- int ret;
sart_node = of_parse_phandle(dev->of_node, "apple,sart", 0);
if (!sart_node)
@@ -192,14 +236,11 @@ struct apple_sart *devm_apple_sart_get(struct device *dev)
return ERR_PTR(-EPROBE_DEFER);
}
- ret = devm_add_action_or_reset(dev, apple_sart_put_device,
- &sart_pdev->dev);
- if (ret)
- return ERR_PTR(ret);
-
device_link_add(dev, &sart_pdev->dev,
DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
+ put_device(&sart_pdev->dev);
+
return sart;
}
EXPORT_SYMBOL_GPL(devm_apple_sart_get);
@@ -233,7 +274,7 @@ int apple_sart_add_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
if (test_and_set_bit(i, &sart->used_entries))
continue;
- ret = sart_set_entry(sart, i, APPLE_SART_FLAGS_ALLOW, paddr,
+ ret = sart_set_entry(sart, i, sart->ops->flags_allow, paddr,
size);
if (ret) {
dev_dbg(sart->dev,
@@ -314,6 +355,10 @@ static const struct of_device_id apple_sart_of_match[] = {
.compatible = "apple,t8103-sart",
.data = &sart_ops_v2,
},
+ {
+ .compatible = "apple,t8015-sart",
+ .data = &sart_ops_v0,
+ },
{}
};
MODULE_DEVICE_TABLE(of, apple_sart_of_match);
diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
index 258894ed234b..b7dbb12bd095 100644
--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/regmap.h>
@@ -254,17 +255,8 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, lpc_ctrl);
/* If memory-region is described in device tree then store */
- node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (!node) {
- dev_dbg(dev, "Didn't find reserved memory\n");
- } else {
- rc = of_address_to_resource(node, 0, &resm);
- of_node_put(node);
- if (rc) {
- dev_err(dev, "Couldn't address to resource for reserved memory\n");
- return -ENXIO;
- }
-
+ rc = of_reserved_mem_region_to_resource(dev->of_node, 0, &resm);
+ if (!rc) {
lpc_ctrl->mem_size = resource_size(&resm);
lpc_ctrl->mem_base = resm.start;
@@ -332,14 +324,12 @@ err:
return rc;
}
-static int aspeed_lpc_ctrl_remove(struct platform_device *pdev)
+static void aspeed_lpc_ctrl_remove(struct platform_device *pdev)
{
struct aspeed_lpc_ctrl *lpc_ctrl = dev_get_drvdata(&pdev->dev);
misc_deregister(&lpc_ctrl->miscdev);
clk_disable_unprepare(lpc_ctrl->clk);
-
- return 0;
}
static const struct of_device_id aspeed_lpc_ctrl_match[] = {
diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
index 773dbcbc03a6..b03310c0830d 100644
--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
+++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
@@ -12,6 +12,7 @@
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/dev_printk.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/kfifo.h>
@@ -25,7 +26,6 @@
#define DEVICE_NAME "aspeed-lpc-snoop"
-#define NUM_SNOOP_CHANNELS 2
#define SNOOP_FIFO_SIZE 2048
#define HICR5 0x80
@@ -57,7 +57,23 @@ struct aspeed_lpc_snoop_model_data {
unsigned int has_hicrb_ensnp;
};
+enum aspeed_lpc_snoop_index {
+ ASPEED_LPC_SNOOP_INDEX_0 = 0,
+ ASPEED_LPC_SNOOP_INDEX_1 = 1,
+ ASPEED_LPC_SNOOP_INDEX_MAX = ASPEED_LPC_SNOOP_INDEX_1,
+};
+
+struct aspeed_lpc_snoop_channel_cfg {
+ enum aspeed_lpc_snoop_index index;
+ u32 hicr5_en;
+ u32 snpwadr_mask;
+ u32 snpwadr_shift;
+ u32 hicrb_en;
+};
+
struct aspeed_lpc_snoop_channel {
+ const struct aspeed_lpc_snoop_channel_cfg *cfg;
+ bool enabled;
struct kfifo fifo;
wait_queue_head_t wq;
struct miscdevice miscdev;
@@ -67,7 +83,24 @@ struct aspeed_lpc_snoop {
struct regmap *regmap;
int irq;
struct clk *clk;
- struct aspeed_lpc_snoop_channel chan[NUM_SNOOP_CHANNELS];
+ struct aspeed_lpc_snoop_channel chan[ASPEED_LPC_SNOOP_INDEX_MAX + 1];
+};
+
+static const struct aspeed_lpc_snoop_channel_cfg channel_cfgs[ASPEED_LPC_SNOOP_INDEX_MAX + 1] = {
+ {
+ .index = ASPEED_LPC_SNOOP_INDEX_0,
+ .hicr5_en = HICR5_EN_SNP0W | HICR5_ENINT_SNP0W,
+ .snpwadr_mask = SNPWADR_CH0_MASK,
+ .snpwadr_shift = SNPWADR_CH0_SHIFT,
+ .hicrb_en = HICRB_ENSNP0D,
+ },
+ {
+ .index = ASPEED_LPC_SNOOP_INDEX_1,
+ .hicr5_en = HICR5_EN_SNP1W | HICR5_ENINT_SNP1W,
+ .snpwadr_mask = SNPWADR_CH1_MASK,
+ .snpwadr_shift = SNPWADR_CH1_SHIFT,
+ .hicrb_en = HICRB_ENSNP1D,
+ },
};
static struct aspeed_lpc_snoop_channel *snoop_file_to_chan(struct file *file)
@@ -166,7 +199,7 @@ static int aspeed_lpc_snoop_config_irq(struct aspeed_lpc_snoop *lpc_snoop,
int rc;
lpc_snoop->irq = platform_get_irq(pdev, 0);
- if (!lpc_snoop->irq)
+ if (lpc_snoop->irq < 0)
return -ENODEV;
rc = devm_request_irq(dev, lpc_snoop->irq,
@@ -181,87 +214,88 @@ static int aspeed_lpc_snoop_config_irq(struct aspeed_lpc_snoop *lpc_snoop,
return 0;
}
-static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
- struct device *dev,
- int channel, u16 lpc_port)
+__attribute__((nonnull))
+static int aspeed_lpc_enable_snoop(struct device *dev,
+ struct aspeed_lpc_snoop *lpc_snoop,
+ struct aspeed_lpc_snoop_channel *channel,
+ const struct aspeed_lpc_snoop_channel_cfg *cfg,
+ u16 lpc_port)
{
+ const struct aspeed_lpc_snoop_model_data *model_data;
int rc = 0;
- u32 hicr5_en, snpwadr_mask, snpwadr_shift, hicrb_en;
- const struct aspeed_lpc_snoop_model_data *model_data =
- of_device_get_match_data(dev);
-
- init_waitqueue_head(&lpc_snoop->chan[channel].wq);
- /* Create FIFO datastructure */
- rc = kfifo_alloc(&lpc_snoop->chan[channel].fifo,
- SNOOP_FIFO_SIZE, GFP_KERNEL);
+
+ if (WARN_ON(channel->enabled))
+ return -EBUSY;
+
+ init_waitqueue_head(&channel->wq);
+
+ channel->cfg = cfg;
+ channel->miscdev.minor = MISC_DYNAMIC_MINOR;
+ channel->miscdev.fops = &snoop_fops;
+ channel->miscdev.parent = dev;
+
+ channel->miscdev.name =
+ devm_kasprintf(dev, GFP_KERNEL, "%s%d", DEVICE_NAME, cfg->index);
+ if (!channel->miscdev.name)
+ return -ENOMEM;
+
+ rc = kfifo_alloc(&channel->fifo, SNOOP_FIFO_SIZE, GFP_KERNEL);
if (rc)
return rc;
- lpc_snoop->chan[channel].miscdev.minor = MISC_DYNAMIC_MINOR;
- lpc_snoop->chan[channel].miscdev.name =
- devm_kasprintf(dev, GFP_KERNEL, "%s%d", DEVICE_NAME, channel);
- lpc_snoop->chan[channel].miscdev.fops = &snoop_fops;
- lpc_snoop->chan[channel].miscdev.parent = dev;
- rc = misc_register(&lpc_snoop->chan[channel].miscdev);
+ rc = misc_register(&channel->miscdev);
if (rc)
- return rc;
+ goto err_free_fifo;
/* Enable LPC snoop channel at requested port */
- switch (channel) {
- case 0:
- hicr5_en = HICR5_EN_SNP0W | HICR5_ENINT_SNP0W;
- snpwadr_mask = SNPWADR_CH0_MASK;
- snpwadr_shift = SNPWADR_CH0_SHIFT;
- hicrb_en = HICRB_ENSNP0D;
- break;
- case 1:
- hicr5_en = HICR5_EN_SNP1W | HICR5_ENINT_SNP1W;
- snpwadr_mask = SNPWADR_CH1_MASK;
- snpwadr_shift = SNPWADR_CH1_SHIFT;
- hicrb_en = HICRB_ENSNP1D;
- break;
- default:
- return -EINVAL;
- }
+ regmap_set_bits(lpc_snoop->regmap, HICR5, cfg->hicr5_en);
+ regmap_update_bits(lpc_snoop->regmap, SNPWADR, cfg->snpwadr_mask,
+ lpc_port << cfg->snpwadr_shift);
+
+ model_data = of_device_get_match_data(dev);
+ if (model_data && model_data->has_hicrb_ensnp)
+ regmap_set_bits(lpc_snoop->regmap, HICRB, cfg->hicrb_en);
- regmap_update_bits(lpc_snoop->regmap, HICR5, hicr5_en, hicr5_en);
- regmap_update_bits(lpc_snoop->regmap, SNPWADR, snpwadr_mask,
- lpc_port << snpwadr_shift);
- if (model_data->has_hicrb_ensnp)
- regmap_update_bits(lpc_snoop->regmap, HICRB,
- hicrb_en, hicrb_en);
+ channel->enabled = true;
+ return 0;
+
+err_free_fifo:
+ kfifo_free(&channel->fifo);
return rc;
}
+__attribute__((nonnull))
static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
- int channel)
+ struct aspeed_lpc_snoop_channel *channel)
{
- switch (channel) {
- case 0:
- regmap_update_bits(lpc_snoop->regmap, HICR5,
- HICR5_EN_SNP0W | HICR5_ENINT_SNP0W,
- 0);
- break;
- case 1:
- regmap_update_bits(lpc_snoop->regmap, HICR5,
- HICR5_EN_SNP1W | HICR5_ENINT_SNP1W,
- 0);
- break;
- default:
+ if (!channel->enabled)
return;
- }
- kfifo_free(&lpc_snoop->chan[channel].fifo);
- misc_deregister(&lpc_snoop->chan[channel].miscdev);
+ /* Disable interrupts along with the device */
+ regmap_clear_bits(lpc_snoop->regmap, HICR5, channel->cfg->hicr5_en);
+
+ channel->enabled = false;
+ /* Consider improving safety wrt concurrent reader(s) */
+ misc_deregister(&channel->miscdev);
+ kfifo_free(&channel->fifo);
+}
+
+static void aspeed_lpc_snoop_remove(struct platform_device *pdev)
+{
+ struct aspeed_lpc_snoop *lpc_snoop = dev_get_drvdata(&pdev->dev);
+
+ /* Disable both snoop channels */
+ aspeed_lpc_disable_snoop(lpc_snoop, &lpc_snoop->chan[0]);
+ aspeed_lpc_disable_snoop(lpc_snoop, &lpc_snoop->chan[1]);
}
static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
{
struct aspeed_lpc_snoop *lpc_snoop;
- struct device *dev;
struct device_node *np;
- u32 port;
+ struct device *dev;
+ int idx;
int rc;
dev = &pdev->dev;
@@ -279,69 +313,40 @@ static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
}
lpc_snoop->regmap = syscon_node_to_regmap(np);
- if (IS_ERR(lpc_snoop->regmap)) {
- dev_err(dev, "Couldn't get regmap\n");
- return -ENODEV;
- }
+ if (IS_ERR(lpc_snoop->regmap))
+ return dev_err_probe(dev, PTR_ERR(lpc_snoop->regmap), "Couldn't get regmap\n");
dev_set_drvdata(&pdev->dev, lpc_snoop);
- rc = of_property_read_u32_index(dev->of_node, "snoop-ports", 0, &port);
- if (rc) {
- dev_err(dev, "no snoop ports configured\n");
- return -ENODEV;
- }
-
- lpc_snoop->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(lpc_snoop->clk)) {
- rc = PTR_ERR(lpc_snoop->clk);
- if (rc != -EPROBE_DEFER)
- dev_err(dev, "couldn't get clock\n");
- return rc;
- }
- rc = clk_prepare_enable(lpc_snoop->clk);
- if (rc) {
- dev_err(dev, "couldn't enable clock\n");
- return rc;
- }
+ lpc_snoop->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(lpc_snoop->clk))
+ return dev_err_probe(dev, PTR_ERR(lpc_snoop->clk), "couldn't get clock");
rc = aspeed_lpc_snoop_config_irq(lpc_snoop, pdev);
if (rc)
- goto err;
-
- rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 0, port);
- if (rc)
- goto err;
-
- /* Configuration of 2nd snoop channel port is optional */
- if (of_property_read_u32_index(dev->of_node, "snoop-ports",
- 1, &port) == 0) {
- rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 1, port);
- if (rc) {
- aspeed_lpc_disable_snoop(lpc_snoop, 0);
- goto err;
- }
- }
-
- return 0;
+ return rc;
-err:
- clk_disable_unprepare(lpc_snoop->clk);
+ static_assert(ARRAY_SIZE(channel_cfgs) == ARRAY_SIZE(lpc_snoop->chan),
+ "Broken implementation assumption regarding cfg count");
+ for (idx = ASPEED_LPC_SNOOP_INDEX_0; idx <= ASPEED_LPC_SNOOP_INDEX_MAX; idx++) {
+ u32 port;
- return rc;
-}
+ rc = of_property_read_u32_index(dev->of_node, "snoop-ports", idx, &port);
+ if (rc)
+ break;
-static int aspeed_lpc_snoop_remove(struct platform_device *pdev)
-{
- struct aspeed_lpc_snoop *lpc_snoop = dev_get_drvdata(&pdev->dev);
+ rc = aspeed_lpc_enable_snoop(dev, lpc_snoop, &lpc_snoop->chan[idx],
+ &channel_cfgs[idx], port);
+ if (rc)
+ goto cleanup_channels;
+ }
- /* Disable both snoop channels */
- aspeed_lpc_disable_snoop(lpc_snoop, 0);
- aspeed_lpc_disable_snoop(lpc_snoop, 1);
+ return idx == ASPEED_LPC_SNOOP_INDEX_0 ? -ENODEV : 0;
- clk_disable_unprepare(lpc_snoop->clk);
+cleanup_channels:
+ aspeed_lpc_snoop_remove(pdev);
- return 0;
+ return rc;
}
static const struct aspeed_lpc_snoop_model_data ast2400_model_data = {
diff --git a/drivers/soc/aspeed/aspeed-p2a-ctrl.c b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
index 548f44da28a9..3be2e1b1085b 100644
--- a/drivers/soc/aspeed/aspeed-p2a-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
@@ -19,7 +19,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -334,7 +334,6 @@ static int aspeed_p2a_ctrl_probe(struct platform_device *pdev)
struct aspeed_p2a_ctrl *misc_ctrl;
struct device *dev;
struct resource resm;
- struct device_node *node;
int rc = 0;
dev = &pdev->dev;
@@ -346,15 +345,8 @@ static int aspeed_p2a_ctrl_probe(struct platform_device *pdev)
mutex_init(&misc_ctrl->tracking);
/* optional. */
- node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (node) {
- rc = of_address_to_resource(node, 0, &resm);
- of_node_put(node);
- if (rc) {
- dev_err(dev, "Couldn't address to resource for reserved memory\n");
- return -ENODEV;
- }
-
+ rc = of_reserved_mem_region_to_resource(dev->of_node, 0, &resm);
+ if (!rc) {
misc_ctrl->mem_size = resource_size(&resm);
misc_ctrl->mem_base = resm.start;
}
@@ -383,13 +375,11 @@ static int aspeed_p2a_ctrl_probe(struct platform_device *pdev)
return rc;
}
-static int aspeed_p2a_ctrl_remove(struct platform_device *pdev)
+static void aspeed_p2a_ctrl_remove(struct platform_device *pdev)
{
struct aspeed_p2a_ctrl *p2a_ctrl = dev_get_drvdata(&pdev->dev);
misc_deregister(&p2a_ctrl->miscdev);
-
- return 0;
}
#define SCU2C_DRAM BIT(25)
diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c
index 3f759121dc00..67e9ac3d08ec 100644
--- a/drivers/soc/aspeed/aspeed-socinfo.c
+++ b/drivers/soc/aspeed/aspeed-socinfo.c
@@ -27,6 +27,10 @@ static struct {
{ "AST2620", 0x05010203 },
{ "AST2605", 0x05030103 },
{ "AST2625", 0x05030403 },
+ /* AST2700 */
+ { "AST2750", 0x06000003 },
+ { "AST2700", 0x06000103 },
+ { "AST2720", 0x06000203 },
};
static const char *siliconid_to_name(u32 siliconid)
diff --git a/drivers/soc/aspeed/aspeed-uart-routing.c b/drivers/soc/aspeed/aspeed-uart-routing.c
index 3a4c1f28cb34..0191e36e66e1 100644
--- a/drivers/soc/aspeed/aspeed-uart-routing.c
+++ b/drivers/soc/aspeed/aspeed-uart-routing.c
@@ -565,14 +565,12 @@ static int aspeed_uart_routing_probe(struct platform_device *pdev)
return 0;
}
-static int aspeed_uart_routing_remove(struct platform_device *pdev)
+static void aspeed_uart_routing_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct aspeed_uart_routing *uart_routing = platform_get_drvdata(pdev);
sysfs_remove_group(&dev->kobj, uart_routing->attr_grp);
-
- return 0;
}
static const struct of_device_id aspeed_uart_routing_table[] = {
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index cc9a3e107479..09347bccdb1d 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -101,6 +101,29 @@ static const struct at91_soc socs[] __initconst = {
AT91_CIDR_VERSION_MASK, SAM9X60_D6K_EXID_MATCH,
"sam9x60 8MiB SDRAM SiP", "sam9x60"),
#endif
+#ifdef CONFIG_SOC_SAM9X7
+ AT91_SOC(SAM9X7_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAM9X70_EXID_MATCH,
+ "sam9x70", "sam9x7"),
+ AT91_SOC(SAM9X7_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAM9X72_EXID_MATCH,
+ "sam9x72", "sam9x7"),
+ AT91_SOC(SAM9X7_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAM9X75_EXID_MATCH,
+ "sam9x75", "sam9x7"),
+ AT91_SOC(SAM9X7_CIDR_MATCH, SAM9X75_D1M_EXID_MATCH,
+ AT91_CIDR_VERSION_MASK, SAM9X75_EXID_MATCH,
+ "sam9x75 16MB DDR2 SiP", "sam9x7"),
+ AT91_SOC(SAM9X7_CIDR_MATCH, SAM9X75_D5M_EXID_MATCH,
+ AT91_CIDR_VERSION_MASK, SAM9X75_EXID_MATCH,
+ "sam9x75 64MB DDR2 SiP", "sam9x7"),
+ AT91_SOC(SAM9X7_CIDR_MATCH, SAM9X75_D1G_EXID_MATCH,
+ AT91_CIDR_VERSION_MASK, SAM9X75_EXID_MATCH,
+ "sam9x75 125MB DDR3L SiP ", "sam9x7"),
+ AT91_SOC(SAM9X7_CIDR_MATCH, SAM9X75_D2G_EXID_MATCH,
+ AT91_CIDR_VERSION_MASK, SAM9X75_EXID_MATCH,
+ "sam9x75 250MB DDR3L SiP", "sam9x7"),
+#endif
#ifdef CONFIG_SOC_SAMA5
AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
AT91_CIDR_VERSION_MASK, SAMA5D21CU_EXID_MATCH,
@@ -223,6 +246,9 @@ static const struct at91_soc socs[] __initconst = {
"samv70q19", "samv7"),
#endif
#ifdef CONFIG_SOC_SAMA7
+ AT91_SOC(SAMA7D65_CIDR_MATCH, AT91_CIDR_MASK_SAMA7G5,
+ AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7D65_EXID_MATCH,
+ "sama7d65", "sama7d6"),
AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G51_EXID_MATCH,
"sama7g51", "sama7g5"),
@@ -282,6 +308,7 @@ static int __init at91_get_cidr_exid_from_chipid(u32 *cidr, u32 *exid)
void __iomem *regs;
static const struct of_device_id chipids[] = {
{ .compatible = "atmel,sama5d2-chipid" },
+ { .compatible = "microchip,sama7d65-chipid" },
{ .compatible = "microchip,sama7g5-chipid" },
{ },
};
@@ -370,13 +397,14 @@ static const struct of_device_id at91_soc_allowed_list[] __initconst = {
{ .compatible = "atmel,at91sam9", },
{ .compatible = "atmel,sama5", },
{ .compatible = "atmel,samv7", },
+ { .compatible = "microchip,sama7d65", },
{ .compatible = "microchip,sama7g5", },
{ }
};
static int __init atmel_soc_device_init(void)
{
- struct device_node *np = of_find_node_by_path("/");
+ struct device_node *np __free(device_node) = of_find_node_by_path("/");
if (!of_match_node(at91_soc_allowed_list, np))
return 0;
diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h
index 7a9f47ce85fb..66a74017d9a3 100644
--- a/drivers/soc/atmel/soc.h
+++ b/drivers/soc/atmel/soc.h
@@ -44,6 +44,8 @@ at91_soc_init(const struct at91_soc *socs);
#define AT91SAM9X5_CIDR_MATCH 0x019a05a0
#define AT91SAM9N12_CIDR_MATCH 0x019a07a0
#define SAM9X60_CIDR_MATCH 0x019b35a0
+#define SAM9X7_CIDR_MATCH 0x09750020
+#define SAMA7D65_CIDR_MATCH 0x00262100
#define SAMA7G5_CIDR_MATCH 0x00162100
#define AT91SAM9M11_EXID_MATCH 0x00000001
@@ -66,6 +68,16 @@ at91_soc_init(const struct at91_soc *socs);
#define SAM9X60_D1G_EXID_MATCH 0x00000010
#define SAM9X60_D6K_EXID_MATCH 0x00000011
+#define SAM9X70_EXID_MATCH 0x00000005
+#define SAM9X72_EXID_MATCH 0x00000004
+#define SAM9X75_D1G_EXID_MATCH 0x00000018
+#define SAM9X75_D2G_EXID_MATCH 0x00000020
+#define SAM9X75_D1M_EXID_MATCH 0x00000003
+#define SAM9X75_D5M_EXID_MATCH 0x00000010
+#define SAM9X75_EXID_MATCH 0x00000000
+
+#define SAMA7D65_EXID_MATCH 0x00000080
+
#define SAMA7G51_EXID_MATCH 0x3
#define SAMA7G52_EXID_MATCH 0x2
#define SAMA7G53_EXID_MATCH 0x1
diff --git a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig
index f96906795fa6..c921a22f6c11 100644
--- a/drivers/soc/bcm/Kconfig
+++ b/drivers/soc/bcm/Kconfig
@@ -1,39 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "Broadcom SoC drivers"
-config BCM2835_POWER
- bool "BCM2835 power domain driver"
- depends on ARCH_BCM2835 || (COMPILE_TEST && OF)
- default y if ARCH_BCM2835
- select PM_GENERIC_DOMAINS if PM
- select RESET_CONTROLLER
- help
- This enables support for the BCM2835 power domains and reset
- controller. Any usage of power domains by the Raspberry Pi
- firmware means that Linux usage of the same power domain
- must be accessed using the RASPBERRYPI_POWER driver
-
-config RASPBERRYPI_POWER
- bool "Raspberry Pi power domain driver"
- depends on ARCH_BCM2835 || (COMPILE_TEST && OF)
- depends on RASPBERRYPI_FIRMWARE=y
- select PM_GENERIC_DOMAINS if PM
- help
- This enables support for the RPi power domains which can be enabled
- or disabled via the RPi firmware.
-
-config SOC_BCM63XX
- bool "Broadcom 63xx SoC drivers"
- depends on BMIPS_GENERIC || COMPILE_TEST
- help
- Enables drivers for the Broadcom 63xx series of chips.
- Drivers can be enabled individually within this menu.
-
- If unsure, say N.
-
config SOC_BRCMSTB
bool "Broadcom STB SoC drivers"
- depends on ARM || ARM64 || BMIPS_GENERIC || COMPILE_TEST
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
select SOC_BUS
help
Enables drivers for the Broadcom Set-Top Box (STB) series of chips.
@@ -42,27 +12,6 @@ config SOC_BRCMSTB
If unsure, say N.
-config BCM_PMB
- bool "Broadcom PMB (Power Management Bus) driver"
- depends on ARCH_BCMBCA || (COMPILE_TEST && OF)
- default ARCH_BCMBCA
- select PM_GENERIC_DOMAINS if PM
- help
- This enables support for the Broadcom's PMB (Power Management Bus) that
- is used for disabling and enabling SoC devices.
-
-if SOC_BCM63XX
-
-config BCM63XX_POWER
- bool "BCM63xx power domain driver"
- depends on BMIPS_GENERIC || (COMPILE_TEST && OF)
- select PM_GENERIC_DOMAINS if PM
- help
- This enables support for the BCM63xx power domains controller on
- BCM6318, BCM6328, BCM6362 and BCM63268 SoCs.
-
-endif # SOC_BCM63XX
-
source "drivers/soc/bcm/brcmstb/Kconfig"
endmenu
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
index 364ddbe365c2..bd830649b60d 100644
--- a/drivers/soc/bcm/brcmstb/biuctrl.c
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -298,7 +298,7 @@ out:
#ifdef CONFIG_PM_SLEEP
static u32 cpubiuctrl_reg_save[NUM_CPU_BIUCTRL_REGS];
-static int brcmstb_cpu_credit_reg_suspend(void)
+static int brcmstb_cpu_credit_reg_suspend(void *data)
{
unsigned int i;
@@ -311,7 +311,7 @@ static int brcmstb_cpu_credit_reg_suspend(void)
return 0;
}
-static void brcmstb_cpu_credit_reg_resume(void)
+static void brcmstb_cpu_credit_reg_resume(void *data)
{
unsigned int i;
@@ -322,10 +322,14 @@ static void brcmstb_cpu_credit_reg_resume(void)
cbc_writel(cpubiuctrl_reg_save[i], i);
}
-static struct syscore_ops brcmstb_cpu_credit_syscore_ops = {
+static const struct syscore_ops brcmstb_cpu_credit_syscore_ops = {
.suspend = brcmstb_cpu_credit_reg_suspend,
.resume = brcmstb_cpu_credit_reg_resume,
};
+
+static struct syscore brcmstb_cpu_credit_syscore = {
+ .ops = &brcmstb_cpu_credit_syscore_ops,
+};
#endif
@@ -354,7 +358,7 @@ static int __init brcmstb_biuctrl_init(void)
a72_b53_rac_enable_all(np);
mcp_a72_b53_set();
#ifdef CONFIG_PM_SLEEP
- register_syscore_ops(&brcmstb_cpu_credit_syscore_ops);
+ register_syscore(&brcmstb_cpu_credit_syscore);
#endif
ret = 0;
out_put:
diff --git a/drivers/soc/bcm/brcmstb/pm/pm.h b/drivers/soc/bcm/brcmstb/pm/pm.h
index 94a380470a2f..17f7a06a7a83 100644
--- a/drivers/soc/bcm/brcmstb/pm/pm.h
+++ b/drivers/soc/bcm/brcmstb/pm/pm.h
@@ -60,7 +60,7 @@
PM_DEEP_STANDBY | \
PM_PLL_PWRDOWN | PM_PWR_DOWN)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifndef CONFIG_MIPS
extern const unsigned long brcmstb_pm_do_s2_sz;
diff --git a/drivers/soc/canaan/Kconfig b/drivers/soc/canaan/Kconfig
index 43ced2bf8444..3121d351fea6 100644
--- a/drivers/soc/canaan/Kconfig
+++ b/drivers/soc/canaan/Kconfig
@@ -2,9 +2,9 @@
config SOC_K210_SYSCTL
bool "Canaan Kendryte K210 SoC system controller"
- depends on RISCV && SOC_CANAAN && OF
+ depends on RISCV && SOC_CANAAN_K210 && OF
depends on COMMON_CLK_K210
- default SOC_CANAAN
+ default SOC_CANAAN_K210
select PM
select MFD_SYSCON
help
diff --git a/drivers/soc/cirrus/Kconfig b/drivers/soc/cirrus/Kconfig
new file mode 100644
index 000000000000..d8b3b1e68998
--- /dev/null
+++ b/drivers/soc/cirrus/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+if ARCH_EP93XX
+
+config EP93XX_SOC
+ bool "Cirrus EP93xx chips SoC"
+ select SOC_BUS
+ select AUXILIARY_BUS
+ default y
+ help
+ Enable support SoC for Cirrus EP93xx chips.
+
+ Cirrus EP93xx chips have several swlocked registers,
+ this driver provides locked access for reset, pinctrl
+ and clk devices implemented as auxiliary devices.
+
+endif
diff --git a/drivers/soc/cirrus/Makefile b/drivers/soc/cirrus/Makefile
new file mode 100644
index 000000000000..9e6608b67f76
--- /dev/null
+++ b/drivers/soc/cirrus/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += soc-ep93xx.o
diff --git a/drivers/soc/cirrus/soc-ep93xx.c b/drivers/soc/cirrus/soc-ep93xx.c
new file mode 100644
index 000000000000..3e79b3b13aef
--- /dev/null
+++ b/drivers/soc/cirrus/soc-ep93xx.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * SoC driver for Cirrus EP93xx chips.
+ * Copyright (C) 2022 Nikita Shubin <nikita.shubin@maquefel.me>
+ *
+ * Based on a rewrite of arch/arm/mach-ep93xx/core.c
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Copyright (C) 2007 Herbert Valerio Riedel <hvr@gnu.org>
+ *
+ * Thanks go to Michael Burian and Ray Lehtiniemi for their key
+ * role in the ep93xx Linux community.
+ */
+
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/init.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
+
+#include <linux/soc/cirrus/ep93xx.h>
+
+#define EP93XX_SYSCON_DEVCFG 0x80
+
+#define EP93XX_SWLOCK_MAGICK 0xaa
+#define EP93XX_SYSCON_SWLOCK 0xc0
+#define EP93XX_SYSCON_SYSCFG 0x9c
+#define EP93XX_SYSCON_SYSCFG_REV_MASK GENMASK(31, 28)
+#define EP93XX_SYSCON_SYSCFG_REV_SHIFT 28
+
+struct ep93xx_map_info {
+ spinlock_t lock;
+ void __iomem *base;
+ struct regmap *map;
+};
+
+/*
+ * EP93xx System Controller software locked register write
+ *
+ * Logic safeguards are included to condition the control signals for
+ * power connection to the matrix to prevent part damage. In addition, a
+ * software lock register is included that must be written with 0xAA
+ * before each register write to change the values of the four switch
+ * matrix control registers.
+ */
+static void ep93xx_regmap_write(struct regmap *map, spinlock_t *lock,
+ unsigned int reg, unsigned int val)
+{
+ guard(spinlock_irqsave)(lock);
+
+ regmap_write(map, EP93XX_SYSCON_SWLOCK, EP93XX_SWLOCK_MAGICK);
+ regmap_write(map, reg, val);
+}
+
+static void ep93xx_regmap_update_bits(struct regmap *map, spinlock_t *lock,
+ unsigned int reg, unsigned int mask,
+ unsigned int val)
+{
+ guard(spinlock_irqsave)(lock);
+
+ regmap_write(map, EP93XX_SYSCON_SWLOCK, EP93XX_SWLOCK_MAGICK);
+ /* force write is required to clear swlock if no changes are made */
+ regmap_update_bits_base(map, reg, mask, val, NULL, false, true);
+}
+
+static void ep93xx_unregister_adev(void *_adev)
+{
+ struct auxiliary_device *adev = _adev;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+static void ep93xx_adev_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+ struct ep93xx_regmap_adev *rdev = to_ep93xx_regmap_adev(adev);
+
+ kfree(rdev);
+}
+
+static struct auxiliary_device __init *ep93xx_adev_alloc(struct device *parent,
+ const char *name,
+ struct ep93xx_map_info *info)
+{
+ struct ep93xx_regmap_adev *rdev __free(kfree) = NULL;
+ struct auxiliary_device *adev;
+ int ret;
+
+ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+ if (!rdev)
+ return ERR_PTR(-ENOMEM);
+
+ rdev->map = info->map;
+ rdev->base = info->base;
+ rdev->lock = &info->lock;
+ rdev->write = ep93xx_regmap_write;
+ rdev->update_bits = ep93xx_regmap_update_bits;
+
+ adev = &rdev->adev;
+ adev->name = name;
+ adev->dev.parent = parent;
+ adev->dev.release = ep93xx_adev_release;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &no_free_ptr(rdev)->adev;
+}
+
+static int __init ep93xx_controller_register(struct device *parent, const char *name,
+ struct ep93xx_map_info *info)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = ep93xx_adev_alloc(parent, name, info);
+ if (IS_ERR(adev))
+ return PTR_ERR(adev);
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(parent, ep93xx_unregister_adev, adev);
+}
+
+static unsigned int __init ep93xx_soc_revision(struct regmap *map)
+{
+ unsigned int val;
+
+ regmap_read(map, EP93XX_SYSCON_SYSCFG, &val);
+ val &= EP93XX_SYSCON_SYSCFG_REV_MASK;
+ val >>= EP93XX_SYSCON_SYSCFG_REV_SHIFT;
+ return val;
+}
+
+static const char __init *ep93xx_get_soc_rev(unsigned int rev)
+{
+ switch (rev) {
+ case EP93XX_CHIP_REV_D0:
+ return "D0";
+ case EP93XX_CHIP_REV_D1:
+ return "D1";
+ case EP93XX_CHIP_REV_E0:
+ return "E0";
+ case EP93XX_CHIP_REV_E1:
+ return "E1";
+ case EP93XX_CHIP_REV_E2:
+ return "E2";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *pinctrl_names[] __initconst = {
+ "pinctrl-ep9301", /* EP93XX_9301_SOC */
+ "pinctrl-ep9307", /* EP93XX_9307_SOC */
+ "pinctrl-ep9312", /* EP93XX_9312_SOC */
+};
+
+static int __init ep93xx_syscon_probe(struct platform_device *pdev)
+{
+ enum ep93xx_soc_model model;
+ struct ep93xx_map_info *map_info;
+ struct soc_device_attribute *attrs;
+ struct soc_device *soc_dev;
+ struct device *dev = &pdev->dev;
+ struct regmap *map;
+ void __iomem *base;
+ unsigned int rev;
+ int ret;
+
+ model = (enum ep93xx_soc_model)(uintptr_t)device_get_match_data(dev);
+
+ map = device_node_to_regmap(dev->of_node);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ attrs = devm_kzalloc(dev, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ rev = ep93xx_soc_revision(map);
+
+ attrs->machine = of_flat_dt_get_machine_name();
+ attrs->family = "Cirrus Logic EP93xx";
+ attrs->revision = ep93xx_get_soc_rev(rev);
+
+ soc_dev = soc_device_register(attrs);
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
+
+ map_info = devm_kzalloc(dev, sizeof(*map_info), GFP_KERNEL);
+ if (!map_info)
+ return -ENOMEM;
+
+ spin_lock_init(&map_info->lock);
+ map_info->map = map;
+ map_info->base = base;
+
+ ret = ep93xx_controller_register(dev, pinctrl_names[model], map_info);
+ if (ret)
+ dev_err(dev, "registering pinctrl controller failed\n");
+
+ /*
+ * EP93xx SSP clock rate was doubled in version E2. For more information
+ * see section 6 "2x SSP (Synchronous Serial Port) Clock – Revision E2 only":
+ * http://www.cirrus.com/en/pubs/appNote/AN273REV4.pdf
+ */
+ if (rev == EP93XX_CHIP_REV_E2)
+ ret = ep93xx_controller_register(dev, "clk-ep93xx.e2", map_info);
+ else
+ ret = ep93xx_controller_register(dev, "clk-ep93xx", map_info);
+ if (ret)
+ dev_err(dev, "registering clock controller failed\n");
+
+ ret = ep93xx_controller_register(dev, "reset-ep93xx", map_info);
+ if (ret)
+ dev_err(dev, "registering reset controller failed\n");
+
+ return 0;
+}
+
+static const struct of_device_id ep9301_syscon_of_device_ids[] = {
+ { .compatible = "cirrus,ep9301-syscon", .data = (void *)EP93XX_9301_SOC },
+ { .compatible = "cirrus,ep9302-syscon", .data = (void *)EP93XX_9301_SOC },
+ { .compatible = "cirrus,ep9307-syscon", .data = (void *)EP93XX_9307_SOC },
+ { .compatible = "cirrus,ep9312-syscon", .data = (void *)EP93XX_9312_SOC },
+ { .compatible = "cirrus,ep9315-syscon", .data = (void *)EP93XX_9312_SOC },
+ { /* sentinel */ }
+};
+
+static struct platform_driver ep9301_syscon_driver = {
+ .driver = {
+ .name = "ep9301-syscon",
+ .of_match_table = ep9301_syscon_of_device_ids,
+ },
+};
+builtin_platform_driver_probe(ep9301_syscon_driver, ep93xx_syscon_probe);
diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c
index ffc5311c0ed8..7bbd3f940e4d 100644
--- a/drivers/soc/dove/pmu.c
+++ b/drivers/soc/dove/pmu.c
@@ -257,10 +257,9 @@ static void pmu_irq_handler(struct irq_desc *desc)
* So, let's structure the code so that the window is as small as
* possible.
*/
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
done &= readl_relaxed(base + PMC_IRQ_CAUSE);
writel_relaxed(done, base + PMC_IRQ_CAUSE);
- irq_gc_unlock(gc);
}
static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq)
@@ -274,8 +273,8 @@ static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq)
writel(0, pmu->pmc_base + PMC_IRQ_MASK);
writel(0, pmu->pmc_base + PMC_IRQ_CAUSE);
- domain = irq_domain_add_linear(pmu->of_node, NR_PMU_IRQS,
- &irq_generic_chip_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(pmu->of_node), NR_PMU_IRQS,
+ &irq_generic_chip_ops, NULL);
if (!domain) {
pr_err("%s: unable to add irq domain\n", name);
return -ENOMEM;
@@ -410,13 +409,16 @@ int __init dove_init_pmu(void)
struct pmu_domain *domain;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
- if (!domain)
+ if (!domain) {
+ of_node_put(np);
break;
+ }
domain->pmu = pmu;
domain->base.name = kasprintf(GFP_KERNEL, "%pOFn", np);
if (!domain->base.name) {
kfree(domain);
+ of_node_put(np);
break;
}
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index fcec6ed83d5e..47870e29c290 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -22,7 +22,7 @@ config FSL_GUTS
config FSL_MC_DPIO
tristate "QorIQ DPAA2 DPIO driver"
- depends on FSL_MC_BUS
+ depends on FSL_MC_BUS && NET
select SOC_BUS
select FSL_GUTS
select DIMLIB
@@ -36,7 +36,7 @@ config FSL_MC_DPIO
config DPAA2_CONSOLE
tristate "QorIQ DPAA2 console driver"
depends on OF && (ARCH_LAYERSCAPE || COMPILE_TEST)
- default y
+ default ARCH_LAYERSCAPE
help
Console driver for DPAA2 platforms. Exports 2 char devices,
/dev/dpaa2_mc_console and /dev/dpaa2_aiop_console,
diff --git a/drivers/soc/fsl/dpaa2-console.c b/drivers/soc/fsl/dpaa2-console.c
index 1dca693b6b38..6310f54e68a2 100644
--- a/drivers/soc/fsl/dpaa2-console.c
+++ b/drivers/soc/fsl/dpaa2-console.c
@@ -300,12 +300,10 @@ err_register_mc:
return error;
}
-static int dpaa2_console_remove(struct platform_device *pdev)
+static void dpaa2_console_remove(struct platform_device *pdev)
{
misc_deregister(&dpaa2_mc_console_dev);
misc_deregister(&dpaa2_aiop_console_dev);
-
- return 0;
}
static const struct of_device_id dpaa2_console_match_table[] = {
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 1d2b27e3ea63..0b60ed16297c 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -523,7 +523,7 @@ int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
struct qbman_eq_desc *ed;
int i, ret;
- ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL);
+ ed = kcalloc(32, sizeof(struct qbman_eq_desc), GFP_KERNEL);
if (!ed)
return -ENOMEM;
@@ -891,7 +891,7 @@ void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes)
d->frames += frames;
dim_update_sample(d->event_ctr, d->frames, d->bytes, &dim_sample);
- net_dim(&d->rx_dim, dim_sample);
+ net_dim(&d->rx_dim, &dim_sample);
spin_unlock(&d->dim_lock);
}
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig
index bdecb86bb656..27774ec6ff90 100644
--- a/drivers/soc/fsl/qbman/Kconfig
+++ b/drivers/soc/fsl/qbman/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig FSL_DPAA
bool "QorIQ DPAA1 framework support"
- depends on ((FSL_SOC_BOOKE || ARCH_LAYERSCAPE) && ARCH_DMA_ADDR_T_64BIT)
+ depends on ((FSL_SOC_BOOKE || ARCH_LAYERSCAPE || COMPILE_TEST) && ARCH_DMA_ADDR_T_64BIT)
select GENERIC_ALLOCATOR
help
The Freescale Data Path Acceleration Architecture (DPAA) is a set of
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
index cb24a08be084..b0f26f6f731e 100644
--- a/drivers/soc/fsl/qbman/bman_ccsr.c
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -144,17 +144,6 @@ static int bm_set_memory(u64 ba, u32 size)
static dma_addr_t fbpr_a;
static size_t fbpr_sz;
-static int bman_fbpr(struct reserved_mem *rmem)
-{
- fbpr_a = rmem->base;
- fbpr_sz = rmem->size;
-
- WARN_ON(!(fbpr_a && fbpr_sz));
-
- return 0;
-}
-RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
-
static irqreturn_t bman_isr(int irq, void *ptr)
{
u32 isr_val, ier_val, ecsr_val, isr_mask, i;
@@ -242,17 +231,11 @@ static int fsl_bman_probe(struct platform_device *pdev)
return -ENODEV;
}
- /*
- * If FBPR memory wasn't defined using the qbman compatible string
- * try using the of_reserved_mem_device method
- */
- if (!fbpr_a) {
- ret = qbman_init_private_mem(dev, 0, &fbpr_a, &fbpr_sz);
- if (ret) {
- dev_err(dev, "qbman_init_private_mem() failed 0x%x\n",
- ret);
- return -ENODEV;
- }
+ ret = qbman_init_private_mem(dev, 0, "fsl,bman-fbpr", &fbpr_a, &fbpr_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() failed 0x%x\n",
+ ret);
+ return -ENODEV;
}
dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz);
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c
index 33751450047e..e1d7b79cc450 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.c
+++ b/drivers/soc/fsl/qbman/dpaa_sys.c
@@ -34,8 +34,8 @@
/*
* Initialize a devices private memory region
*/
-int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
- size_t *size)
+int qbman_init_private_mem(struct device *dev, int idx, const char *compat,
+ dma_addr_t *addr, size_t *size)
{
struct device_node *mem_node;
struct reserved_mem *rmem;
@@ -44,8 +44,12 @@ int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
mem_node = of_parse_phandle(dev->of_node, "memory-region", idx);
if (!mem_node) {
- dev_err(dev, "No memory-region found for index %d\n", idx);
- return -ENODEV;
+ mem_node = of_find_compatible_node(NULL, NULL, compat);
+ if (!mem_node) {
+ dev_err(dev, "No memory-region found for index %d or compatible '%s'\n",
+ idx, compat);
+ return -ENODEV;
+ }
}
rmem = of_reserved_mem_lookup(mem_node);
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
index ae8afa552b1e..16485bde9636 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.h
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -101,8 +101,8 @@ static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
#define DPAA_GENALLOC_OFF 0x80000000
/* Initialize the devices private memory region */
-int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
- size_t *size);
+int qbman_init_private_mem(struct device *dev, int idx, const char *compat,
+ dma_addr_t *addr, size_t *size);
/* memremap() attributes for different platforms */
#ifdef CONFIG_PPC
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 739e4eee6b75..6b392b3ad4b1 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -991,7 +991,7 @@ struct qman_portal {
/* linked-list of CSCN handlers. */
struct list_head cgr_cbs;
/* list lock */
- spinlock_t cgr_lock;
+ raw_spinlock_t cgr_lock;
struct work_struct congestion_work;
struct work_struct mr_work;
char irqname[MAX_IRQNAME];
@@ -1073,7 +1073,7 @@ EXPORT_SYMBOL(qman_portal_set_iperiod);
int qman_wq_alloc(void)
{
- qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
+ qm_portal_wq = alloc_workqueue("qman_portal_wq", WQ_PERCPU, 1);
if (!qm_portal_wq)
return -ENOMEM;
return 0;
@@ -1270,7 +1270,7 @@ static int qman_create_portal(struct qman_portal *portal,
qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
- portal->cgrs = kmalloc_array(2, sizeof(*cgrs), GFP_KERNEL);
+ portal->cgrs = kmalloc_array(2, sizeof(*portal->cgrs), GFP_KERNEL);
if (!portal->cgrs)
goto fail_cgrs;
/* initial snapshot is no-depletion */
@@ -1281,7 +1281,7 @@ static int qman_create_portal(struct qman_portal *portal,
/* if the given mask is NULL, assume all CGRs can be seen */
qman_cgrs_fill(&portal->cgrs[0]);
INIT_LIST_HEAD(&portal->cgr_cbs);
- spin_lock_init(&portal->cgr_lock);
+ raw_spin_lock_init(&portal->cgr_lock);
INIT_WORK(&portal->congestion_work, qm_congestion_task);
INIT_WORK(&portal->mr_work, qm_mr_process_task);
portal->bits = 0;
@@ -1456,11 +1456,14 @@ static void qm_congestion_task(struct work_struct *work)
union qm_mc_result *mcr;
struct qman_cgr *cgr;
- spin_lock(&p->cgr_lock);
+ /*
+ * FIXME: QM_MCR_TIMEOUT is 10ms, which is too long for a raw spinlock!
+ */
+ raw_spin_lock_irq(&p->cgr_lock);
qm_mc_start(&p->p);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
- spin_unlock(&p->cgr_lock);
+ raw_spin_unlock_irq(&p->cgr_lock);
dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
qman_p_irqsource_add(p, QM_PIRQ_CSCI);
return;
@@ -1476,7 +1479,7 @@ static void qm_congestion_task(struct work_struct *work)
list_for_each_entry(cgr, &p->cgr_cbs, node)
if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
- spin_unlock(&p->cgr_lock);
+ raw_spin_unlock_irq(&p->cgr_lock);
qman_p_irqsource_add(p, QM_PIRQ_CSCI);
}
@@ -2440,7 +2443,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
preempt_enable();
cgr->chan = p->config->channel;
- spin_lock(&p->cgr_lock);
+ raw_spin_lock_irq(&p->cgr_lock);
if (opts) {
struct qm_mcc_initcgr local_opts = *opts;
@@ -2477,7 +2480,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
cgr->cb(p, cgr, 1);
out:
- spin_unlock(&p->cgr_lock);
+ raw_spin_unlock_irq(&p->cgr_lock);
put_affine_portal();
return ret;
}
@@ -2512,7 +2515,7 @@ int qman_delete_cgr(struct qman_cgr *cgr)
return -EINVAL;
memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
- spin_lock_irqsave(&p->cgr_lock, irqflags);
+ raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
list_del(&cgr->node);
/*
* If there are no other CGR objects for this CGRID in the list,
@@ -2537,17 +2540,12 @@ int qman_delete_cgr(struct qman_cgr *cgr)
/* add back to the list */
list_add(&cgr->node, &p->cgr_cbs);
release_lock:
- spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
put_affine_portal();
return ret;
}
EXPORT_SYMBOL(qman_delete_cgr);
-struct cgr_comp {
- struct qman_cgr *cgr;
- struct completion completion;
-};
-
static void qman_delete_cgr_smp_call(void *p)
{
qman_delete_cgr((struct qman_cgr *)p);
@@ -2577,9 +2575,9 @@ static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
if (!p)
return -EINVAL;
- spin_lock_irqsave(&p->cgr_lock, irqflags);
+ raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
ret = qm_modify_cgr(cgr, 0, opts);
- spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
put_affine_portal();
return ret;
}
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 157659fd033a..aa5348f4902f 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -468,28 +468,6 @@ static int zero_priv_mem(phys_addr_t addr, size_t sz)
return 0;
}
-
-static int qman_fqd(struct reserved_mem *rmem)
-{
- fqd_a = rmem->base;
- fqd_sz = rmem->size;
-
- WARN_ON(!(fqd_a && fqd_sz));
- return 0;
-}
-RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
-
-static int qman_pfdr(struct reserved_mem *rmem)
-{
- pfdr_a = rmem->base;
- pfdr_sz = rmem->size;
-
- WARN_ON(!(pfdr_a && pfdr_sz));
-
- return 0;
-}
-RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
-
#endif
unsigned int qm_get_fqid_maxcnt(void)
@@ -796,39 +774,32 @@ static int fsl_qman_probe(struct platform_device *pdev)
qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
}
- if (fqd_a) {
+ /*
+ * Order of memory regions is assumed as FQD followed by PFDR
+ * in order to ensure allocations from the correct regions the
+ * driver initializes then allocates each piece in order
+ */
+ ret = qbman_init_private_mem(dev, 0, "fsl,qman-fqd", &fqd_a, &fqd_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
#ifdef CONFIG_PPC
- /*
- * For PPC backward DT compatibility
- * FQD memory MUST be zero'd by software
- */
- zero_priv_mem(fqd_a, fqd_sz);
-#else
- WARN(1, "Unexpected architecture using non shared-dma-mem reservations");
+ /*
+ * For PPC backward DT compatibility
+ * FQD memory MUST be zero'd by software
+ */
+ zero_priv_mem(fqd_a, fqd_sz);
#endif
- } else {
- /*
- * Order of memory regions is assumed as FQD followed by PFDR
- * in order to ensure allocations from the correct regions the
- * driver initializes then allocates each piece in order
- */
- ret = qbman_init_private_mem(dev, 0, &fqd_a, &fqd_sz);
- if (ret) {
- dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n",
- ret);
- return -ENODEV;
- }
- }
dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);
- if (!pfdr_a) {
- /* Setup PFDR memory */
- ret = qbman_init_private_mem(dev, 1, &pfdr_a, &pfdr_sz);
- if (ret) {
- dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n",
- ret);
- return -ENODEV;
- }
+ /* Setup PFDR memory */
+ ret = qbman_init_private_mem(dev, 1, "fsl,qman-pfdr", &pfdr_a, &pfdr_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n",
+ ret);
+ return -ENODEV;
}
dev_dbg(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index e23b60618c1a..456ef5d5c199 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -48,9 +48,10 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
struct device *dev = pcfg->dev;
int ret;
- pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
- if (!pcfg->iommu_domain) {
+ pcfg->iommu_domain = iommu_paging_domain_alloc(dev);
+ if (IS_ERR(pcfg->iommu_domain)) {
dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
+ pcfg->iommu_domain = NULL;
goto no_iommu;
}
ret = fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu);
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
index b7e8e5ec884c..6009e8b32c44 100644
--- a/drivers/soc/fsl/qbman/qman_test_stash.c
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -103,19 +103,17 @@ static int on_all_cpus(int (*fn)(void))
{
int cpu;
- for_each_cpu(cpu, cpu_online_mask) {
+ for_each_online_cpu(cpu) {
struct bstrap bstrap = {
.fn = fn,
.started = ATOMIC_INIT(0)
};
- struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
- "hotpotato%d", cpu);
+ struct task_struct *k = kthread_run_on_cpu(bstrap_fn, &bstrap,
+ cpu, "hotpotato%d");
int ret;
if (IS_ERR(k))
return -ENOMEM;
- kthread_bind(k, cpu);
- wake_up_process(k);
/*
* If we call kthread_stop() before the "wake up" has had an
* effect, then the thread may exit with -EINTR without ever
@@ -221,7 +219,7 @@ static int allocate_frame_data(void)
pcfg = qman_get_qm_portal_config(qman_dma_portal);
- __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
+ __frame_ptr = kmalloc_array(4, HP_NUM_WORDS, GFP_KERNEL);
if (!__frame_ptr)
return -ENOMEM;
diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
index fa9ffbed0e92..eb03f42ab978 100644
--- a/drivers/soc/fsl/qe/Kconfig
+++ b/drivers/soc/fsl/qe/Kconfig
@@ -17,7 +17,7 @@ config QUICC_ENGINE
config UCC_SLOW
bool
- default y if SERIAL_QE
+ default y if SERIAL_QE || (CPM_QMC && QUICC_ENGINE)
help
This option provides qe_lib support to UCC slow
protocols: UART, BISYNC, QMC
@@ -31,26 +31,27 @@ config UCC_FAST
config UCC
bool
- default y if UCC_FAST || UCC_SLOW
+ default y if UCC_FAST || UCC_SLOW || (CPM_TSA && QUICC_ENGINE)
config CPM_TSA
- tristate "CPM TSA support"
+ tristate "CPM/QE TSA support"
depends on OF && HAS_IOMEM
- depends on CPM1 || (CPM && COMPILE_TEST)
+ depends on CPM1 || QUICC_ENGINE || \
+ ((CPM || QUICC_ENGINE) && COMPILE_TEST)
help
- Freescale CPM Time Slot Assigner (TSA)
+ Freescale CPM/QE Time Slot Assigner (TSA)
controller.
This option enables support for this
controller
config CPM_QMC
- tristate "CPM QMC support"
+ tristate "CPM/QE QMC support"
depends on OF && HAS_IOMEM
- depends on CPM1 || (FSL_SOC && CPM && COMPILE_TEST)
+ depends on FSL_SOC
depends on CPM_TSA
help
- Freescale CPM QUICC Multichannel Controller
+ Freescale CPM/QE QUICC Multichannel Controller
(QMC)
This option enables support for this
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index 3ef24ba0245b..c54154b404df 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -12,18 +12,19 @@
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/gpio/legacy-of-mm-gpiochip.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/export.h>
-#include <linux/property.h>
+#include <linux/platform_device.h>
#include <soc/fsl/qe/qe.h>
+#define PIN_MASK(gpio) (1UL << (QE_PIO_PINS - 1 - (gpio)))
+
struct qe_gpio_chip {
- struct of_mm_gpio_chip mm_gc;
+ struct gpio_chip gc;
+ void __iomem *regs;
spinlock_t lock;
/* shadowed data register to clear/set bits safely */
@@ -33,11 +34,9 @@ struct qe_gpio_chip {
struct qe_pio_regs saved_regs;
};
-static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
+static void qe_gpio_save_regs(struct qe_gpio_chip *qe_gc)
{
- struct qe_gpio_chip *qe_gc =
- container_of(mm_gc, struct qe_gpio_chip, mm_gc);
- struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ struct qe_pio_regs __iomem *regs = qe_gc->regs;
qe_gc->cpdata = ioread32be(&regs->cpdata);
qe_gc->saved_regs.cpdata = qe_gc->cpdata;
@@ -50,20 +49,19 @@ static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct qe_pio_regs __iomem *regs = mm_gc->regs;
- u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ struct qe_pio_regs __iomem *regs = qe_gc->regs;
+ u32 pin_mask = PIN_MASK(gpio);
return !!(ioread32be(&regs->cpdata) & pin_mask);
}
-static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+static int qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
- struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ struct qe_pio_regs __iomem *regs = qe_gc->regs;
unsigned long flags;
- u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+ u32 pin_mask = PIN_MASK(gpio);
spin_lock_irqsave(&qe_gc->lock, flags);
@@ -75,14 +73,15 @@ static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
iowrite32be(qe_gc->cpdata, &regs->cpdata);
spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ return 0;
}
-static void qe_gpio_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int qe_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
- struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ struct qe_pio_regs __iomem *regs = qe_gc->regs;
unsigned long flags;
int i;
@@ -93,26 +92,27 @@ static void qe_gpio_set_multiple(struct gpio_chip *gc,
break;
if (__test_and_clear_bit(i, mask)) {
if (test_bit(i, bits))
- qe_gc->cpdata |= (1U << (QE_PIO_PINS - 1 - i));
+ qe_gc->cpdata |= PIN_MASK(i);
else
- qe_gc->cpdata &= ~(1U << (QE_PIO_PINS - 1 - i));
+ qe_gc->cpdata &= ~PIN_MASK(i);
}
}
iowrite32be(qe_gc->cpdata, &regs->cpdata);
spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ return 0;
}
static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
unsigned long flags;
spin_lock_irqsave(&qe_gc->lock, flags);
- __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0);
+ __par_io_config_pin(qe_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0);
spin_unlock_irqrestore(&qe_gc->lock, flags);
@@ -121,7 +121,6 @@ static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
unsigned long flags;
@@ -129,7 +128,7 @@ static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
spin_lock_irqsave(&qe_gc->lock, flags);
- __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0);
+ __par_io_config_pin(qe_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0);
spin_unlock_irqrestore(&qe_gc->lock, flags);
@@ -235,7 +234,7 @@ EXPORT_SYMBOL(qe_pin_free);
void qe_pin_set_dedicated(struct qe_pin *qe_pin)
{
struct qe_gpio_chip *qe_gc = qe_pin->controller;
- struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+ struct qe_pio_regs __iomem *regs = qe_gc->regs;
struct qe_pio_regs *sregs = &qe_gc->saved_regs;
int pin = qe_pin->num;
u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1));
@@ -264,7 +263,6 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin)
iowrite32be(qe_gc->cpdata, &regs->cpdata);
qe_clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
-
spin_unlock_irqrestore(&qe_gc->lock, flags);
}
EXPORT_SYMBOL(qe_pin_set_dedicated);
@@ -279,7 +277,7 @@ EXPORT_SYMBOL(qe_pin_set_dedicated);
void qe_pin_set_gpio(struct qe_pin *qe_pin)
{
struct qe_gpio_chip *qe_gc = qe_pin->controller;
- struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+ struct qe_pio_regs __iomem *regs = qe_gc->regs;
unsigned long flags;
spin_lock_irqsave(&qe_gc->lock, flags);
@@ -291,45 +289,62 @@ void qe_pin_set_gpio(struct qe_pin *qe_pin)
}
EXPORT_SYMBOL(qe_pin_set_gpio);
-static int __init qe_add_gpiochips(void)
+static int qe_gpio_probe(struct platform_device *ofdev)
{
- struct device_node *np;
-
- for_each_compatible_node(np, NULL, "fsl,mpc8323-qe-pario-bank") {
- int ret;
- struct qe_gpio_chip *qe_gc;
- struct of_mm_gpio_chip *mm_gc;
- struct gpio_chip *gc;
-
- qe_gc = kzalloc(sizeof(*qe_gc), GFP_KERNEL);
- if (!qe_gc) {
- ret = -ENOMEM;
- goto err;
- }
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = dev->of_node;
+ struct qe_gpio_chip *qe_gc;
+ struct gpio_chip *gc;
- spin_lock_init(&qe_gc->lock);
-
- mm_gc = &qe_gc->mm_gc;
- gc = &mm_gc->gc;
-
- mm_gc->save_regs = qe_gpio_save_regs;
- gc->ngpio = QE_PIO_PINS;
- gc->direction_input = qe_gpio_dir_in;
- gc->direction_output = qe_gpio_dir_out;
- gc->get = qe_gpio_get;
- gc->set = qe_gpio_set;
- gc->set_multiple = qe_gpio_set_multiple;
-
- ret = of_mm_gpiochip_add_data(np, mm_gc, qe_gc);
- if (ret)
- goto err;
- continue;
-err:
- pr_err("%pOF: registration failed with status %d\n",
- np, ret);
- kfree(qe_gc);
- /* try others anyway */
- }
- return 0;
+ qe_gc = devm_kzalloc(dev, sizeof(*qe_gc), GFP_KERNEL);
+ if (!qe_gc)
+ return -ENOMEM;
+
+ spin_lock_init(&qe_gc->lock);
+
+ gc = &qe_gc->gc;
+
+ gc->base = -1;
+ gc->ngpio = QE_PIO_PINS;
+ gc->direction_input = qe_gpio_dir_in;
+ gc->direction_output = qe_gpio_dir_out;
+ gc->get = qe_gpio_get;
+ gc->set = qe_gpio_set;
+ gc->set_multiple = qe_gpio_set_multiple;
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
+ if (!gc->label)
+ return -ENOMEM;
+
+ qe_gc->regs = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(qe_gc->regs))
+ return PTR_ERR(qe_gc->regs);
+
+ qe_gpio_save_regs(qe_gc);
+
+ return devm_gpiochip_add_data(dev, gc, qe_gc);
+}
+
+static const struct of_device_id qe_gpio_match[] = {
+ {
+ .compatible = "fsl,mpc8323-qe-pario-bank",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, qe_gpio_match);
+
+static struct platform_driver qe_gpio_driver = {
+ .probe = qe_gpio_probe,
+ .driver = {
+ .name = "qe-gpio",
+ .of_match_table = qe_gpio_match,
+ },
+};
+
+static int __init qe_gpio_init(void)
+{
+ return platform_driver_register(&qe_gpio_driver);
}
-arch_initcall(qe_add_gpiochips);
+arch_initcall(qe_gpio_init);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 3ee0c7c1e9a4..70b6eddb867b 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -430,7 +430,7 @@ static void qe_upload_microcode(const void *base,
/*
* Upload a microcode to the I-RAM at a specific address.
*
- * See Documentation/powerpc/qe_firmware.rst for information on QE microcode
+ * See Documentation/arch/powerpc/qe_firmware.rst for information on QE microcode
* uploading.
*
* Currently, only version 1 is supported, so the 'version' field must be
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index 9729ce86db59..02c29f5f86d3 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -13,6 +13,7 @@
* 2006 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
*/
+#include <linux/device.h>
#include <linux/genalloc.h>
#include <linux/init.h>
#include <linux/list.h>
@@ -141,7 +142,7 @@ static s32 cpm_muram_alloc_common(unsigned long size,
*
* This function returns a non-negative offset into the muram area, or
* a negative errno on failure.
- * Use cpm_dpram_addr() to get the virtual address of the area.
+ * Use cpm_muram_addr() to get the virtual address of the area.
* Use cpm_muram_free() to free the allocation.
*/
s32 cpm_muram_alloc(unsigned long size, unsigned long align)
@@ -187,13 +188,56 @@ void cpm_muram_free(s32 offset)
}
EXPORT_SYMBOL(cpm_muram_free);
+static void devm_cpm_muram_release(struct device *dev, void *res)
+{
+ s32 *info = res;
+
+ cpm_muram_free(*info);
+}
+
+/**
+ * devm_cpm_muram_alloc - Resource-managed cpm_muram_alloc
+ * @dev: Device to allocate memory for
+ * @size: number of bytes to allocate
+ * @align: requested alignment, in bytes
+ *
+ * This function returns a non-negative offset into the muram area, or
+ * a negative errno on failure as cpm_muram_alloc() does.
+ * Use cpm_muram_addr() to get the virtual address of the area.
+ *
+ * Compare against cpm_muram_alloc(), the memory allocated by this
+ * resource-managed version is automatically freed on driver detach and so,
+ * cpm_muram_free() must not be called to release the allocated memory.
+ */
+s32 devm_cpm_muram_alloc(struct device *dev, unsigned long size,
+ unsigned long align)
+{
+ s32 info;
+ s32 *dr;
+
+ dr = devres_alloc(devm_cpm_muram_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ info = cpm_muram_alloc(size, align);
+ if (info >= 0) {
+ *dr = info;
+ devres_add(dev, dr);
+ } else {
+ devres_free(dr);
+ }
+
+ return info;
+}
+EXPORT_SYMBOL(devm_cpm_muram_alloc);
+
/*
* cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
* @offset: offset of allocation start address
* @size: number of bytes to allocate
* This function returns @offset if the area was available, a negative
* errno otherwise.
- * Use cpm_dpram_addr() to get the virtual address of the area.
+ * Use cpm_muram_addr() to get the virtual address of the area.
* Use cpm_muram_free() to free the allocation.
*/
s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
@@ -212,6 +256,42 @@ s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
EXPORT_SYMBOL(cpm_muram_alloc_fixed);
/**
+ * devm_cpm_muram_alloc_fixed - Resource-managed cpm_muram_alloc_fixed
+ * @dev: Device to allocate memory for
+ * @offset: offset of allocation start address
+ * @size: number of bytes to allocate
+ *
+ * This function returns a non-negative offset into the muram area, or
+ * a negative errno on failure as cpm_muram_alloc_fixed() does.
+ * Use cpm_muram_addr() to get the virtual address of the area.
+ *
+ * Compare against cpm_muram_alloc_fixed(), the memory allocated by this
+ * resource-managed version is automatically freed on driver detach and so,
+ * cpm_muram_free() must not be called to release the allocated memory.
+ */
+s32 devm_cpm_muram_alloc_fixed(struct device *dev, unsigned long offset,
+ unsigned long size)
+{
+ s32 info;
+ s32 *dr;
+
+ dr = devres_alloc(devm_cpm_muram_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ info = cpm_muram_alloc_fixed(offset, size);
+ if (info >= 0) {
+ *dr = info;
+ devres_add(dev, dr);
+ } else {
+ devres_free(dr);
+ }
+
+ return info;
+}
+EXPORT_SYMBOL(devm_cpm_muram_alloc_fixed);
+
+/**
* cpm_muram_addr - turn a muram offset into a virtual address
* @offset: muram offset to convert
*/
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
index bbae3d39c7be..943911053af6 100644
--- a/drivers/soc/fsl/qe/qe_ic.c
+++ b/drivers/soc/fsl/qe/qe_ic.c
@@ -232,11 +232,6 @@ static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg,
iowrite32be(value, base + (reg >> 2));
}
-static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
-{
- return irq_get_chip_data(virq);
-}
-
static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
{
return irq_data_get_irq_chip_data(d);
@@ -344,7 +339,7 @@ static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
if (irq == 0)
return 0;
- return irq_linear_revmap(qe_ic->irqhost, irq);
+ return irq_find_mapping(qe_ic->irqhost, irq);
}
/* Return an interrupt vector or 0 if no interrupt is pending. */
@@ -360,7 +355,7 @@ static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
if (irq == 0)
return 0;
- return irq_linear_revmap(qe_ic->irqhost, irq);
+ return irq_find_mapping(qe_ic->irqhost, irq);
}
static void qe_ic_cascade_low(struct irq_desc *desc)
@@ -412,7 +407,6 @@ static int qe_ic_init(struct platform_device *pdev)
void (*high_handler)(struct irq_desc *desc);
struct qe_ic *qe_ic;
struct resource *res;
- struct device_node *node = pdev->dev.of_node;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
@@ -446,8 +440,8 @@ static int qe_ic_init(struct platform_device *pdev)
high_handler = NULL;
}
- qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
- &qe_ic_host_ops, qe_ic);
+ qe_ic->irqhost = irq_domain_create_linear(dev_fwnode(&pdev->dev), NR_QE_IC_INTS,
+ &qe_ic_host_ops, qe_ic);
if (qe_ic->irqhost == NULL) {
dev_err(dev, "failed to add irq domain\n");
return -ENODEV;
@@ -455,13 +449,11 @@ static int qe_ic_init(struct platform_device *pdev)
qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
- irq_set_handler_data(qe_ic->virq_low, qe_ic);
- irq_set_chained_handler(qe_ic->virq_low, low_handler);
+ irq_set_chained_handler_and_data(qe_ic->virq_low, low_handler, qe_ic);
- if (high_handler) {
- irq_set_handler_data(qe_ic->virq_high, qe_ic);
- irq_set_chained_handler(qe_ic->virq_high, high_handler);
- }
+ if (high_handler)
+ irq_set_chained_handler_and_data(qe_ic->virq_high,
+ high_handler, qe_ic);
return 0;
}
static const struct of_device_id qe_ic_ids[] = {
diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c
index b3c292c9a14e..da5ea6d35618 100644
--- a/drivers/soc/fsl/qe/qmc.c
+++ b/drivers/soc/fsl/qe/qmc.c
@@ -8,7 +8,9 @@
*/
#include <soc/fsl/qe/qmc.h>
+#include <linux/bitfield.h>
#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
#include <linux/hdlc.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -18,31 +20,41 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <soc/fsl/cpm.h>
+#include <soc/fsl/qe/ucc_slow.h>
+#include <soc/fsl/qe/qe.h>
#include <sysdev/fsl_soc.h>
#include "tsa.h"
-/* SCC general mode register high (32 bits) */
+/* SCC general mode register low (32 bits) (GUMR_L in QE) */
#define SCC_GSMRL 0x00
-#define SCC_GSMRL_ENR (1 << 5)
-#define SCC_GSMRL_ENT (1 << 4)
-#define SCC_GSMRL_MODE_QMC (0x0A << 0)
+#define SCC_GSMRL_ENR BIT(5)
+#define SCC_GSMRL_ENT BIT(4)
+#define SCC_GSMRL_MODE_MASK GENMASK(3, 0)
+#define SCC_CPM1_GSMRL_MODE_QMC FIELD_PREP_CONST(SCC_GSMRL_MODE_MASK, 0x0A)
+#define SCC_QE_GSMRL_MODE_QMC FIELD_PREP_CONST(SCC_GSMRL_MODE_MASK, 0x02)
-/* SCC general mode register low (32 bits) */
+/* SCC general mode register high (32 bits) (identical to GUMR_H in QE) */
#define SCC_GSMRH 0x04
-#define SCC_GSMRH_CTSS (1 << 7)
-#define SCC_GSMRH_CDS (1 << 8)
-#define SCC_GSMRH_CTSP (1 << 9)
-#define SCC_GSMRH_CDP (1 << 10)
-
-/* SCC event register (16 bits) */
+#define SCC_GSMRH_CTSS BIT(7)
+#define SCC_GSMRH_CDS BIT(8)
+#define SCC_GSMRH_CTSP BIT(9)
+#define SCC_GSMRH_CDP BIT(10)
+#define SCC_GSMRH_TTX BIT(11)
+#define SCC_GSMRH_TRX BIT(12)
+
+/* SCC event register (16 bits) (identical to UCCE in QE) */
#define SCC_SCCE 0x10
-#define SCC_SCCE_IQOV (1 << 3)
-#define SCC_SCCE_GINT (1 << 2)
-#define SCC_SCCE_GUN (1 << 1)
-#define SCC_SCCE_GOV (1 << 0)
+#define SCC_SCCE_IQOV BIT(3)
+#define SCC_SCCE_GINT BIT(2)
+#define SCC_SCCE_GUN BIT(1)
+#define SCC_SCCE_GOV BIT(0)
/* SCC mask register (16 bits) */
#define SCC_SCCM 0x14
+
+/* UCC Extended Mode Register (8 bits, QE only) */
+#define SCC_QE_UCC_GUEMR 0x90
+
/* Multichannel base pointer (32 bits) */
#define QMC_GBL_MCBASE 0x00
/* Multichannel controller state (16 bits) */
@@ -73,27 +85,42 @@
#define QMC_GBL_TSATTX 0x60
/* CRC constant (16 bits) */
#define QMC_GBL_C_MASK16 0xA0
+/* Rx framer base pointer (16 bits, QE only) */
+#define QMC_QE_GBL_RX_FRM_BASE 0xAC
+/* Tx framer base pointer (16 bits, QE only) */
+#define QMC_QE_GBL_TX_FRM_BASE 0xAE
+/* A reserved area (0xB0 -> 0xC3) that must be initialized to 0 (QE only) */
+#define QMC_QE_GBL_RSV_B0_START 0xB0
+#define QMC_QE_GBL_RSV_B0_SIZE 0x14
+/* QMC Global Channel specific base (32 bits, QE only) */
+#define QMC_QE_GBL_GCSBASE 0xC4
/* TSA entry (16bit entry in TSATRX and TSATTX) */
-#define QMC_TSA_VALID (1 << 15)
-#define QMC_TSA_WRAP (1 << 14)
-#define QMC_TSA_MASK (0x303F)
-#define QMC_TSA_CHANNEL(x) ((x) << 6)
+#define QMC_TSA_VALID BIT(15)
+#define QMC_TSA_WRAP BIT(14)
+#define QMC_TSA_MASK_MASKH GENMASK(13, 12)
+#define QMC_TSA_MASK_MASKL GENMASK(5, 0)
+#define QMC_TSA_MASK_8BIT (FIELD_PREP_CONST(QMC_TSA_MASK_MASKH, 0x3) | \
+ FIELD_PREP_CONST(QMC_TSA_MASK_MASKL, 0x3F))
+#define QMC_TSA_CHANNEL_MASK GENMASK(11, 6)
+#define QMC_TSA_CHANNEL(x) FIELD_PREP(QMC_TSA_CHANNEL_MASK, x)
/* Tx buffer descriptor base address (16 bits, offset from MCBASE) */
#define QMC_SPE_TBASE 0x00
/* Channel mode register (16 bits) */
#define QMC_SPE_CHAMR 0x02
-#define QMC_SPE_CHAMR_MODE_HDLC (1 << 15)
-#define QMC_SPE_CHAMR_MODE_TRANSP ((0 << 15) | (1 << 13))
-#define QMC_SPE_CHAMR_ENT (1 << 12)
-#define QMC_SPE_CHAMR_POL (1 << 8)
-#define QMC_SPE_CHAMR_HDLC_IDLM (1 << 13)
-#define QMC_SPE_CHAMR_HDLC_CRC (1 << 7)
-#define QMC_SPE_CHAMR_HDLC_NOF (0x0f << 0)
-#define QMC_SPE_CHAMR_TRANSP_RD (1 << 14)
-#define QMC_SPE_CHAMR_TRANSP_SYNC (1 << 10)
+#define QMC_SPE_CHAMR_MODE_MASK GENMASK(15, 15)
+#define QMC_SPE_CHAMR_MODE_HDLC FIELD_PREP_CONST(QMC_SPE_CHAMR_MODE_MASK, 1)
+#define QMC_SPE_CHAMR_MODE_TRANSP (FIELD_PREP_CONST(QMC_SPE_CHAMR_MODE_MASK, 0) | BIT(13))
+#define QMC_SPE_CHAMR_ENT BIT(12)
+#define QMC_SPE_CHAMR_POL BIT(8)
+#define QMC_SPE_CHAMR_HDLC_IDLM BIT(13)
+#define QMC_SPE_CHAMR_HDLC_CRC BIT(7)
+#define QMC_SPE_CHAMR_HDLC_NOF_MASK GENMASK(3, 0)
+#define QMC_SPE_CHAMR_HDLC_NOF(x) FIELD_PREP(QMC_SPE_CHAMR_HDLC_NOF_MASK, x)
+#define QMC_SPE_CHAMR_TRANSP_RD BIT(14)
+#define QMC_SPE_CHAMR_TRANSP_SYNC BIT(10)
/* Tx internal state (32 bits) */
#define QMC_SPE_TSTATE 0x04
@@ -120,43 +147,47 @@
/* Transparent synchronization (16 bits) */
#define QMC_SPE_TRNSYNC 0x3C
-#define QMC_SPE_TRNSYNC_RX(x) ((x) << 8)
-#define QMC_SPE_TRNSYNC_TX(x) ((x) << 0)
+#define QMC_SPE_TRNSYNC_RX_MASK GENMASK(15, 8)
+#define QMC_SPE_TRNSYNC_RX(x) FIELD_PREP(QMC_SPE_TRNSYNC_RX_MASK, x)
+#define QMC_SPE_TRNSYNC_TX_MASK GENMASK(7, 0)
+#define QMC_SPE_TRNSYNC_TX(x) FIELD_PREP(QMC_SPE_TRNSYNC_TX_MASK, x)
/* Interrupt related registers bits */
-#define QMC_INT_V (1 << 15)
-#define QMC_INT_W (1 << 14)
-#define QMC_INT_NID (1 << 13)
-#define QMC_INT_IDL (1 << 12)
-#define QMC_INT_GET_CHANNEL(x) (((x) & 0x0FC0) >> 6)
-#define QMC_INT_MRF (1 << 5)
-#define QMC_INT_UN (1 << 4)
-#define QMC_INT_RXF (1 << 3)
-#define QMC_INT_BSY (1 << 2)
-#define QMC_INT_TXB (1 << 1)
-#define QMC_INT_RXB (1 << 0)
+#define QMC_INT_V BIT(15)
+#define QMC_INT_W BIT(14)
+#define QMC_INT_NID BIT(13)
+#define QMC_INT_IDL BIT(12)
+#define QMC_INT_CHANNEL_MASK GENMASK(11, 6)
+#define QMC_INT_GET_CHANNEL(x) FIELD_GET(QMC_INT_CHANNEL_MASK, x)
+#define QMC_INT_MRF BIT(5)
+#define QMC_INT_UN BIT(4)
+#define QMC_INT_RXF BIT(3)
+#define QMC_INT_BSY BIT(2)
+#define QMC_INT_TXB BIT(1)
+#define QMC_INT_RXB BIT(0)
/* BD related registers bits */
-#define QMC_BD_RX_E (1 << 15)
-#define QMC_BD_RX_W (1 << 13)
-#define QMC_BD_RX_I (1 << 12)
-#define QMC_BD_RX_L (1 << 11)
-#define QMC_BD_RX_F (1 << 10)
-#define QMC_BD_RX_CM (1 << 9)
-#define QMC_BD_RX_UB (1 << 7)
-#define QMC_BD_RX_LG (1 << 5)
-#define QMC_BD_RX_NO (1 << 4)
-#define QMC_BD_RX_AB (1 << 3)
-#define QMC_BD_RX_CR (1 << 2)
-
-#define QMC_BD_TX_R (1 << 15)
-#define QMC_BD_TX_W (1 << 13)
-#define QMC_BD_TX_I (1 << 12)
-#define QMC_BD_TX_L (1 << 11)
-#define QMC_BD_TX_TC (1 << 10)
-#define QMC_BD_TX_CM (1 << 9)
-#define QMC_BD_TX_UB (1 << 7)
-#define QMC_BD_TX_PAD (0x0f << 0)
+#define QMC_BD_RX_E BIT(15)
+#define QMC_BD_RX_W BIT(13)
+#define QMC_BD_RX_I BIT(12)
+#define QMC_BD_RX_L BIT(11)
+#define QMC_BD_RX_F BIT(10)
+#define QMC_BD_RX_CM BIT(9)
+#define QMC_BD_RX_UB BIT(7)
+#define QMC_BD_RX_LG BIT(5)
+#define QMC_BD_RX_NO BIT(4)
+#define QMC_BD_RX_AB BIT(3)
+#define QMC_BD_RX_CR BIT(2)
+
+#define QMC_BD_TX_R BIT(15)
+#define QMC_BD_TX_W BIT(13)
+#define QMC_BD_TX_I BIT(12)
+#define QMC_BD_TX_L BIT(11)
+#define QMC_BD_TX_TC BIT(10)
+#define QMC_BD_TX_CM BIT(9)
+#define QMC_BD_TX_UB BIT(7)
+#define QMC_BD_TX_PAD_MASK GENMASK(3, 0)
+#define QMC_BD_TX_PAD(x) FIELD_PREP(QMC_BD_TX_PAD_MASK, x)
/* Numbers of BDs and interrupt items */
#define QMC_NB_TXBDS 8
@@ -166,7 +197,7 @@
struct qmc_xfer_desc {
union {
void (*tx_complete)(void *context);
- void (*rx_complete)(void *context, size_t length);
+ void (*rx_complete)(void *context, size_t length, unsigned int flags);
};
void *context;
};
@@ -175,13 +206,16 @@ struct qmc_chan {
struct list_head list;
unsigned int id;
struct qmc *qmc;
- void *__iomem s_param;
+ void __iomem *s_param;
enum qmc_mode mode;
+ spinlock_t ts_lock; /* Protect timeslots */
+ u64 tx_ts_mask_avail;
u64 tx_ts_mask;
+ u64 rx_ts_mask_avail;
u64 rx_ts_mask;
bool is_reverse_data;
- spinlock_t tx_lock;
+ spinlock_t tx_lock; /* Protect Tx related data */
cbd_t __iomem *txbds;
cbd_t __iomem *txbd_free;
cbd_t __iomem *txbd_done;
@@ -189,7 +223,7 @@ struct qmc_chan {
u64 nb_tx_underrun;
bool is_tx_stopped;
- spinlock_t rx_lock;
+ spinlock_t rx_lock; /* Protect Rx related data */
cbd_t __iomem *rxbds;
cbd_t __iomem *rxbd_free;
cbd_t __iomem *rxbd_done;
@@ -200,13 +234,31 @@ struct qmc_chan {
bool is_rx_stopped;
};
+enum qmc_version {
+ QMC_CPM1,
+ QMC_QE,
+};
+
+struct qmc_data {
+ enum qmc_version version;
+ u32 tstate; /* Initial TSTATE value */
+ u32 rstate; /* Initial RSTATE value */
+ u32 zistate; /* Initial ZISTATE value */
+ u32 zdstate_hdlc; /* Initial ZDSTATE value (HDLC mode) */
+ u32 zdstate_transp; /* Initial ZDSTATE value (Transparent mode) */
+ u32 rpack; /* Initial RPACK value */
+};
+
struct qmc {
struct device *dev;
+ const struct qmc_data *data;
struct tsa_serial *tsa_serial;
- void *__iomem scc_regs;
- void *__iomem scc_pram;
- void *__iomem dpram;
+ void __iomem *scc_regs;
+ void __iomem *scc_pram;
+ void __iomem *dpram;
u16 scc_pram_offset;
+ u32 dpram_offset;
+ u32 qe_subblock;
cbd_t __iomem *bd_table;
dma_addr_t bd_dma_addr;
size_t bd_size;
@@ -214,49 +266,68 @@ struct qmc {
u16 __iomem *int_curr;
dma_addr_t int_dma_addr;
size_t int_size;
+ bool is_tsa_64rxtx;
struct list_head chan_head;
struct qmc_chan *chans[64];
};
-static inline void qmc_write16(void *__iomem addr, u16 val)
+static void qmc_write8(void __iomem *addr, u8 val)
+{
+ iowrite8(val, addr);
+}
+
+static void qmc_write16(void __iomem *addr, u16 val)
{
iowrite16be(val, addr);
}
-static inline u16 qmc_read16(void *__iomem addr)
+static u16 qmc_read16(void __iomem *addr)
{
return ioread16be(addr);
}
-static inline void qmc_setbits16(void *__iomem addr, u16 set)
+static void qmc_setbits16(void __iomem *addr, u16 set)
{
qmc_write16(addr, qmc_read16(addr) | set);
}
-static inline void qmc_clrbits16(void *__iomem addr, u16 clr)
+static void qmc_clrbits16(void __iomem *addr, u16 clr)
{
qmc_write16(addr, qmc_read16(addr) & ~clr);
}
-static inline void qmc_write32(void *__iomem addr, u32 val)
+static void qmc_clrsetbits16(void __iomem *addr, u16 clr, u16 set)
+{
+ qmc_write16(addr, (qmc_read16(addr) & ~clr) | set);
+}
+
+static void qmc_write32(void __iomem *addr, u32 val)
{
iowrite32be(val, addr);
}
-static inline u32 qmc_read32(void *__iomem addr)
+static u32 qmc_read32(void __iomem *addr)
{
return ioread32be(addr);
}
-static inline void qmc_setbits32(void *__iomem addr, u32 set)
+static void qmc_setbits32(void __iomem *addr, u32 set)
{
qmc_write32(addr, qmc_read32(addr) | set);
}
+static bool qmc_is_qe(const struct qmc *qmc)
+{
+ if (IS_ENABLED(CONFIG_QUICC_ENGINE) && IS_ENABLED(CONFIG_CPM))
+ return qmc->data->version == QMC_QE;
+
+ return IS_ENABLED(CONFIG_QUICC_ENGINE);
+}
int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
{
struct tsa_serial_info tsa_info;
+ unsigned long flags;
int ret;
/* Retrieve info from the TSA related serial */
@@ -264,6 +335,8 @@ int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
if (ret)
return ret;
+ spin_lock_irqsave(&chan->ts_lock, flags);
+
info->mode = chan->mode;
info->rx_fs_rate = tsa_info.rx_fs_rate;
info->rx_bit_rate = tsa_info.rx_bit_rate;
@@ -272,10 +345,63 @@ int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
info->tx_bit_rate = tsa_info.tx_bit_rate;
info->nb_rx_ts = hweight64(chan->rx_ts_mask);
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+
return 0;
}
EXPORT_SYMBOL(qmc_chan_get_info);
+int qmc_chan_get_ts_info(struct qmc_chan *chan, struct qmc_chan_ts_info *ts_info)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->ts_lock, flags);
+
+ ts_info->rx_ts_mask_avail = chan->rx_ts_mask_avail;
+ ts_info->tx_ts_mask_avail = chan->tx_ts_mask_avail;
+ ts_info->rx_ts_mask = chan->rx_ts_mask;
+ ts_info->tx_ts_mask = chan->tx_ts_mask;
+
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(qmc_chan_get_ts_info);
+
+int qmc_chan_set_ts_info(struct qmc_chan *chan, const struct qmc_chan_ts_info *ts_info)
+{
+ unsigned long flags;
+ int ret;
+
+ /* Only a subset of available timeslots is allowed */
+ if ((ts_info->rx_ts_mask & chan->rx_ts_mask_avail) != ts_info->rx_ts_mask)
+ return -EINVAL;
+ if ((ts_info->tx_ts_mask & chan->tx_ts_mask_avail) != ts_info->tx_ts_mask)
+ return -EINVAL;
+
+ /* In case of common rx/tx table, rx/tx masks must be identical */
+ if (chan->qmc->is_tsa_64rxtx) {
+ if (ts_info->rx_ts_mask != ts_info->tx_ts_mask)
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&chan->ts_lock, flags);
+
+ if ((chan->tx_ts_mask != ts_info->tx_ts_mask && !chan->is_tx_stopped) ||
+ (chan->rx_ts_mask != ts_info->rx_ts_mask && !chan->is_rx_stopped)) {
+ dev_err(chan->qmc->dev, "Channel rx and/or tx not stopped\n");
+ ret = -EBUSY;
+ } else {
+ chan->tx_ts_mask = ts_info->tx_ts_mask;
+ chan->rx_ts_mask = ts_info->rx_ts_mask;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(qmc_chan_set_ts_info);
+
int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param)
{
if (param->mode != chan->mode)
@@ -283,8 +409,8 @@ int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param
switch (param->mode) {
case QMC_HDLC:
- if ((param->hdlc.max_rx_buf_size % 4) ||
- (param->hdlc.max_rx_buf_size < 8))
+ if (param->hdlc.max_rx_buf_size % 4 ||
+ param->hdlc.max_rx_buf_size < 8)
return -EINVAL;
qmc_write16(chan->qmc->scc_pram + QMC_GBL_MRBLR,
@@ -318,7 +444,7 @@ int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
{
struct qmc_xfer_desc *xfer_desc;
unsigned long flags;
- cbd_t *__iomem bd;
+ cbd_t __iomem *bd;
u16 ctrl;
int ret;
@@ -335,9 +461,16 @@ int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
ctrl = qmc_read16(&bd->cbd_sc);
if (ctrl & (QMC_BD_TX_R | QMC_BD_TX_UB)) {
- /* We are full ... */
- ret = -EBUSY;
- goto end;
+ if (!(ctrl & (QMC_BD_TX_R | QMC_BD_TX_I)) && bd == chan->txbd_done) {
+ if (ctrl & QMC_BD_TX_W)
+ chan->txbd_done = chan->txbds;
+ else
+ chan->txbd_done++;
+ } else {
+ /* We are full ... */
+ ret = -EBUSY;
+ goto end;
+ }
}
qmc_write16(&bd->cbd_datlen, length);
@@ -349,6 +482,10 @@ int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
/* Activate the descriptor */
ctrl |= (QMC_BD_TX_R | QMC_BD_TX_UB);
+ if (complete)
+ ctrl |= QMC_BD_TX_I;
+ else
+ ctrl &= ~QMC_BD_TX_I;
wmb(); /* Be sure to flush the descriptor before control update */
qmc_write16(&bd->cbd_sc, ctrl);
@@ -374,7 +511,7 @@ static void qmc_chan_write_done(struct qmc_chan *chan)
void (*complete)(void *context);
unsigned long flags;
void *context;
- cbd_t *__iomem bd;
+ cbd_t __iomem *bd;
u16 ctrl;
/*
@@ -421,11 +558,12 @@ end:
}
int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
- void (*complete)(void *context, size_t length), void *context)
+ void (*complete)(void *context, size_t length, unsigned int flags),
+ void *context)
{
struct qmc_xfer_desc *xfer_desc;
unsigned long flags;
- cbd_t *__iomem bd;
+ cbd_t __iomem *bd;
u16 ctrl;
int ret;
@@ -442,9 +580,16 @@ int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
ctrl = qmc_read16(&bd->cbd_sc);
if (ctrl & (QMC_BD_RX_E | QMC_BD_RX_UB)) {
- /* We are full ... */
- ret = -EBUSY;
- goto end;
+ if (!(ctrl & (QMC_BD_RX_E | QMC_BD_RX_I)) && bd == chan->rxbd_done) {
+ if (ctrl & QMC_BD_RX_W)
+ chan->rxbd_done = chan->rxbds;
+ else
+ chan->rxbd_done++;
+ } else {
+ /* We are full ... */
+ ret = -EBUSY;
+ goto end;
+ }
}
qmc_write16(&bd->cbd_datlen, 0); /* data length is updated by the QMC */
@@ -454,19 +599,28 @@ int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
xfer_desc->rx_complete = complete;
xfer_desc->context = context;
+ /* Clear previous status flags */
+ ctrl &= ~(QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG | QMC_BD_RX_NO |
+ QMC_BD_RX_AB | QMC_BD_RX_CR);
+
/* Activate the descriptor */
ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB);
+ if (complete)
+ ctrl |= QMC_BD_RX_I;
+ else
+ ctrl &= ~QMC_BD_RX_I;
wmb(); /* Be sure to flush data before descriptor activation */
qmc_write16(&bd->cbd_sc, ctrl);
/* Restart receiver if needed */
if (chan->is_rx_halted && !chan->is_rx_stopped) {
/* Restart receiver */
- if (chan->mode == QMC_TRANSPARENT)
- qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
- else
- qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
- qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
+ qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack);
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE,
+ chan->mode == QMC_TRANSPARENT ?
+ chan->qmc->data->zdstate_transp :
+ chan->qmc->data->zdstate_hdlc);
+ qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate);
chan->is_rx_halted = false;
}
chan->rx_pending++;
@@ -485,10 +639,10 @@ EXPORT_SYMBOL(qmc_chan_read_submit);
static void qmc_chan_read_done(struct qmc_chan *chan)
{
- void (*complete)(void *context, size_t size);
+ void (*complete)(void *context, size_t size, unsigned int flags);
struct qmc_xfer_desc *xfer_desc;
unsigned long flags;
- cbd_t *__iomem bd;
+ cbd_t __iomem *bd;
void *context;
u16 datalen;
u16 ctrl;
@@ -527,7 +681,23 @@ static void qmc_chan_read_done(struct qmc_chan *chan)
if (complete) {
spin_unlock_irqrestore(&chan->rx_lock, flags);
- complete(context, datalen);
+
+ /*
+ * Avoid conversion between internal hardware flags and
+ * the software API flags.
+ * -> Be sure that the software API flags are consistent
+ * with the hardware flags
+ */
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_LAST != QMC_BD_RX_L);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_FIRST != QMC_BD_RX_F);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_OVF != QMC_BD_RX_LG);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_UNA != QMC_BD_RX_NO);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_ABORT != QMC_BD_RX_AB);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_CRC != QMC_BD_RX_CR);
+
+ complete(context, datalen,
+ ctrl & (QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG |
+ QMC_BD_RX_NO | QMC_BD_RX_AB | QMC_BD_RX_CR));
spin_lock_irqsave(&chan->rx_lock, flags);
}
@@ -539,11 +709,167 @@ end:
spin_unlock_irqrestore(&chan->rx_lock, flags);
}
-static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode)
+static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_serial_info *info,
+ bool enable)
+{
+ unsigned int i;
+ u16 curr;
+ u16 val;
+
+ /*
+ * Use a common Tx/Rx 64 entries table.
+ * Tx and Rx related stuffs must be identical
+ */
+ if (chan->tx_ts_mask != chan->rx_ts_mask) {
+ dev_err(chan->qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
+ return -EINVAL;
+ }
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id);
+
+ /* Check entries based on Rx stuff*/
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
+ if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
+ dev_err(chan->qmc->dev, "chan %u TxRx entry %d already used\n",
+ chan->id, i);
+ return -EBUSY;
+ }
+ }
+
+ /* Set entries based on Rx stuff*/
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
+ (u16)~QMC_TSA_WRAP, enable ? val : 0x0000);
+ }
+
+ return 0;
+}
+
+static int qmc_chan_setup_tsa_32rx(struct qmc_chan *chan, const struct tsa_serial_info *info,
+ bool enable)
+{
+ unsigned int i;
+ u16 curr;
+ u16 val;
+
+ /* Use a Rx 32 entries table */
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id);
+
+ /* Check entries based on Rx stuff */
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
+ if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
+ dev_err(chan->qmc->dev, "chan %u Rx entry %d already used\n",
+ chan->id, i);
+ return -EBUSY;
+ }
+ }
+
+ /* Set entries based on Rx stuff */
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
+ (u16)~QMC_TSA_WRAP, enable ? val : 0x0000);
+ }
+
+ return 0;
+}
+
+static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_serial_info *info,
+ bool enable)
+{
+ unsigned int i;
+ u16 curr;
+ u16 val;
+
+ /* Use a Tx 32 entries table */
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id);
+
+ /* Check entries based on Tx stuff */
+ for (i = 0; i < info->nb_tx_ts; i++) {
+ if (!(chan->tx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2));
+ if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
+ dev_err(chan->qmc->dev, "chan %u Tx entry %d already used\n",
+ chan->id, i);
+ return -EBUSY;
+ }
+ }
+
+ /* Set entries based on Tx stuff */
+ for (i = 0; i < info->nb_tx_ts; i++) {
+ if (!(chan->tx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2),
+ (u16)~QMC_TSA_WRAP, enable ? val : 0x0000);
+ }
+
+ return 0;
+}
+
+static int qmc_chan_setup_tsa_tx(struct qmc_chan *chan, bool enable)
+{
+ struct tsa_serial_info info;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ /* Setup entries */
+ if (chan->qmc->is_tsa_64rxtx)
+ return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
+
+ return qmc_chan_setup_tsa_32tx(chan, &info, enable);
+}
+
+static int qmc_chan_setup_tsa_rx(struct qmc_chan *chan, bool enable)
+{
+ struct tsa_serial_info info;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ /* Setup entries */
+ if (chan->qmc->is_tsa_64rxtx)
+ return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
+
+ return qmc_chan_setup_tsa_32rx(chan, &info, enable);
+}
+
+static int qmc_chan_cpm1_command(struct qmc_chan *chan, u8 qmc_opcode)
{
return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E);
}
+static int qmc_chan_qe_command(struct qmc_chan *chan, u32 cmd)
+{
+ if (!qe_issue_cmd(cmd, chan->qmc->qe_subblock, chan->id, 0))
+ return -EIO;
+ return 0;
+}
+
static int qmc_chan_stop_rx(struct qmc_chan *chan)
{
unsigned long flags;
@@ -551,8 +877,16 @@ static int qmc_chan_stop_rx(struct qmc_chan *chan)
spin_lock_irqsave(&chan->rx_lock, flags);
+ if (chan->is_rx_stopped) {
+ /* The channel is already stopped -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
/* Send STOP RECEIVE command */
- ret = qmc_chan_command(chan, 0x0);
+ ret = qmc_is_qe(chan->qmc) ?
+ qmc_chan_qe_command(chan, QE_QMC_STOP_RX) :
+ qmc_chan_cpm1_command(chan, 0x0);
if (ret) {
dev_err(chan->qmc->dev, "chan %u: Send STOP RECEIVE failed (%d)\n",
chan->id, ret);
@@ -561,6 +895,15 @@ static int qmc_chan_stop_rx(struct qmc_chan *chan)
chan->is_rx_stopped = true;
+ if (!chan->qmc->is_tsa_64rxtx || chan->is_tx_stopped) {
+ ret = qmc_chan_setup_tsa_rx(chan, false);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+ }
+
end:
spin_unlock_irqrestore(&chan->rx_lock, flags);
return ret;
@@ -573,8 +916,16 @@ static int qmc_chan_stop_tx(struct qmc_chan *chan)
spin_lock_irqsave(&chan->tx_lock, flags);
+ if (chan->is_tx_stopped) {
+ /* The channel is already stopped -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
/* Send STOP TRANSMIT command */
- ret = qmc_chan_command(chan, 0x1);
+ ret = qmc_is_qe(chan->qmc) ?
+ qmc_chan_qe_command(chan, QE_QMC_STOP_TX) :
+ qmc_chan_cpm1_command(chan, 0x1);
if (ret) {
dev_err(chan->qmc->dev, "chan %u: Send STOP TRANSMIT failed (%d)\n",
chan->id, ret);
@@ -583,56 +934,171 @@ static int qmc_chan_stop_tx(struct qmc_chan *chan)
chan->is_tx_stopped = true;
+ if (!chan->qmc->is_tsa_64rxtx || chan->is_rx_stopped) {
+ ret = qmc_chan_setup_tsa_tx(chan, false);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+ }
+
end:
spin_unlock_irqrestore(&chan->tx_lock, flags);
return ret;
}
+static int qmc_chan_start_rx(struct qmc_chan *chan);
+
int qmc_chan_stop(struct qmc_chan *chan, int direction)
{
- int ret;
+ bool is_rx_rollback_needed = false;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&chan->ts_lock, flags);
if (direction & QMC_CHAN_READ) {
+ is_rx_rollback_needed = !chan->is_rx_stopped;
ret = qmc_chan_stop_rx(chan);
if (ret)
- return ret;
+ goto end;
}
if (direction & QMC_CHAN_WRITE) {
ret = qmc_chan_stop_tx(chan);
- if (ret)
- return ret;
+ if (ret) {
+ /* Restart rx if needed */
+ if (is_rx_rollback_needed)
+ qmc_chan_start_rx(chan);
+ goto end;
+ }
}
- return 0;
+end:
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+ return ret;
}
EXPORT_SYMBOL(qmc_chan_stop);
-static void qmc_chan_start_rx(struct qmc_chan *chan)
+static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
+{
+ struct tsa_serial_info info;
+ unsigned int w_rx, w_tx;
+ u16 first_rx, last_tx;
+ u16 trnsync;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ w_rx = hweight64(chan->rx_ts_mask);
+ w_tx = hweight64(chan->tx_ts_mask);
+ if (w_rx <= 1 && w_tx <= 1) {
+ dev_dbg(qmc->dev, "only one or zero ts -> disable trnsync\n");
+ qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_TRANSP_SYNC);
+ return 0;
+ }
+
+ /* Find the first Rx TS allocated to the channel */
+ first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
+
+ /* Find the last Tx TS allocated to the channel */
+ last_tx = fls64(chan->tx_ts_mask);
+
+ trnsync = 0;
+ if (info.nb_rx_ts)
+ trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
+ if (info.nb_tx_ts)
+ trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
+
+ qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
+ qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_TRANSP_SYNC);
+
+ dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
+ chan->id, trnsync,
+ first_rx, info.nb_rx_ts, chan->rx_ts_mask,
+ last_tx, info.nb_tx_ts, chan->tx_ts_mask);
+
+ return 0;
+}
+
+static int qmc_chan_start_rx(struct qmc_chan *chan)
{
unsigned long flags;
+ int ret;
spin_lock_irqsave(&chan->rx_lock, flags);
+ if (!chan->is_rx_stopped) {
+ /* The channel is already started -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
+ ret = qmc_chan_setup_tsa_rx(chan, true);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
+ if (chan->mode == QMC_TRANSPARENT) {
+ ret = qmc_setup_chan_trnsync(chan->qmc, chan);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+ }
+
/* Restart the receiver */
- if (chan->mode == QMC_TRANSPARENT)
- qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
- else
- qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
- qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
+ qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack);
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE,
+ chan->mode == QMC_TRANSPARENT ?
+ chan->qmc->data->zdstate_transp :
+ chan->qmc->data->zdstate_hdlc);
+ qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate);
chan->is_rx_halted = false;
chan->is_rx_stopped = false;
+end:
spin_unlock_irqrestore(&chan->rx_lock, flags);
+ return ret;
}
-static void qmc_chan_start_tx(struct qmc_chan *chan)
+static int qmc_chan_start_tx(struct qmc_chan *chan)
{
unsigned long flags;
+ int ret;
spin_lock_irqsave(&chan->tx_lock, flags);
+ if (!chan->is_tx_stopped) {
+ /* The channel is already started -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
+ ret = qmc_chan_setup_tsa_tx(chan, true);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
+ if (chan->mode == QMC_TRANSPARENT) {
+ ret = qmc_setup_chan_trnsync(chan->qmc, chan);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+ }
+
/*
* Enable channel transmitter as it could be disabled if
* qmc_chan_reset() was called.
@@ -644,18 +1110,39 @@ static void qmc_chan_start_tx(struct qmc_chan *chan)
chan->is_tx_stopped = false;
+end:
spin_unlock_irqrestore(&chan->tx_lock, flags);
+ return ret;
}
int qmc_chan_start(struct qmc_chan *chan, int direction)
{
- if (direction & QMC_CHAN_READ)
- qmc_chan_start_rx(chan);
+ bool is_rx_rollback_needed = false;
+ unsigned long flags;
+ int ret = 0;
- if (direction & QMC_CHAN_WRITE)
- qmc_chan_start_tx(chan);
+ spin_lock_irqsave(&chan->ts_lock, flags);
- return 0;
+ if (direction & QMC_CHAN_READ) {
+ is_rx_rollback_needed = chan->is_rx_stopped;
+ ret = qmc_chan_start_rx(chan);
+ if (ret)
+ goto end;
+ }
+
+ if (direction & QMC_CHAN_WRITE) {
+ ret = qmc_chan_start_tx(chan);
+ if (ret) {
+ /* Restop rx if needed */
+ if (is_rx_rollback_needed)
+ qmc_chan_stop_rx(chan);
+ goto end;
+ }
+ }
+
+end:
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+ return ret;
}
EXPORT_SYMBOL(qmc_chan_start);
@@ -663,7 +1150,7 @@ static void qmc_chan_reset_rx(struct qmc_chan *chan)
{
struct qmc_xfer_desc *xfer_desc;
unsigned long flags;
- cbd_t *__iomem bd;
+ cbd_t __iomem *bd;
u16 ctrl;
spin_lock_irqsave(&chan->rx_lock, flags);
@@ -685,7 +1172,6 @@ static void qmc_chan_reset_rx(struct qmc_chan *chan)
qmc_read16(chan->s_param + QMC_SPE_RBASE));
chan->rx_pending = 0;
- chan->is_rx_stopped = false;
spin_unlock_irqrestore(&chan->rx_lock, flags);
}
@@ -694,7 +1180,7 @@ static void qmc_chan_reset_tx(struct qmc_chan *chan)
{
struct qmc_xfer_desc *xfer_desc;
unsigned long flags;
- cbd_t *__iomem bd;
+ cbd_t __iomem *bd;
u16 ctrl;
spin_lock_irqsave(&chan->tx_lock, flags);
@@ -720,8 +1206,8 @@ static void qmc_chan_reset_tx(struct qmc_chan *chan)
qmc_read16(chan->s_param + QMC_SPE_TBASE));
/* Reset TSTATE and ZISTATE to their initial value */
- qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
- qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
+ qmc_write32(chan->s_param + QMC_SPE_TSTATE, chan->qmc->data->tstate);
+ qmc_write32(chan->s_param + QMC_SPE_ZISTATE, chan->qmc->data->zistate);
spin_unlock_irqrestore(&chan->tx_lock, flags);
}
@@ -741,10 +1227,7 @@ EXPORT_SYMBOL(qmc_chan_reset);
static int qmc_check_chans(struct qmc *qmc)
{
struct tsa_serial_info info;
- bool is_one_table = false;
struct qmc_chan *chan;
- u64 tx_ts_mask = 0;
- u64 rx_ts_mask = 0;
u64 tx_ts_assigned_mask;
u64 rx_ts_assigned_mask;
int ret;
@@ -754,7 +1237,7 @@ static int qmc_check_chans(struct qmc *qmc)
if (ret)
return ret;
- if ((info.nb_tx_ts > 64) || (info.nb_rx_ts > 64)) {
+ if (info.nb_tx_ts > 64 || info.nb_rx_ts > 64) {
dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned not supported\n");
return -EINVAL;
}
@@ -763,43 +1246,26 @@ static int qmc_check_chans(struct qmc *qmc)
* If more than 32 TS are assigned to this serial, one common table is
* used for Tx and Rx and so masks must be equal for all channels.
*/
- if ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) {
+ if (info.nb_tx_ts > 32 || info.nb_rx_ts > 32) {
if (info.nb_tx_ts != info.nb_rx_ts) {
dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n");
return -EINVAL;
}
- is_one_table = true;
}
tx_ts_assigned_mask = info.nb_tx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_tx_ts) - 1;
rx_ts_assigned_mask = info.nb_rx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_rx_ts) - 1;
list_for_each_entry(chan, &qmc->chan_head, list) {
- if (chan->tx_ts_mask > tx_ts_assigned_mask) {
- dev_err(qmc->dev, "chan %u uses TSA unassigned Tx TS\n", chan->id);
- return -EINVAL;
- }
- if (tx_ts_mask & chan->tx_ts_mask) {
- dev_err(qmc->dev, "chan %u uses an already used Tx TS\n", chan->id);
+ if (chan->tx_ts_mask_avail > tx_ts_assigned_mask) {
+ dev_err(qmc->dev, "chan %u can use TSA unassigned Tx TS\n", chan->id);
return -EINVAL;
}
- if (chan->rx_ts_mask > rx_ts_assigned_mask) {
- dev_err(qmc->dev, "chan %u uses TSA unassigned Rx TS\n", chan->id);
+ if (chan->rx_ts_mask_avail > rx_ts_assigned_mask) {
+ dev_err(qmc->dev, "chan %u can use TSA unassigned Rx TS\n", chan->id);
return -EINVAL;
}
- if (rx_ts_mask & chan->rx_ts_mask) {
- dev_err(qmc->dev, "chan %u uses an already used Rx TS\n", chan->id);
- return -EINVAL;
- }
-
- if (is_one_table && (chan->tx_ts_mask != chan->rx_ts_mask)) {
- dev_err(qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
- return -EINVAL;
- }
-
- tx_ts_mask |= chan->tx_ts_mask;
- rx_ts_mask |= chan->rx_ts_mask;
}
return 0;
@@ -845,6 +1311,7 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
}
chan->id = chan_id;
+ spin_lock_init(&chan->ts_lock);
spin_lock_init(&chan->rx_lock);
spin_lock_init(&chan->tx_lock);
@@ -855,7 +1322,8 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
of_node_put(chan_np);
return ret;
}
- chan->tx_ts_mask = ts_mask;
+ chan->tx_ts_mask_avail = ts_mask;
+ chan->tx_ts_mask = chan->tx_ts_mask_avail;
ret = of_property_read_u64(chan_np, "fsl,rx-ts-mask", &ts_mask);
if (ret) {
@@ -864,7 +1332,8 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
of_node_put(chan_np);
return ret;
}
- chan->rx_ts_mask = ts_mask;
+ chan->rx_ts_mask_avail = ts_mask;
+ chan->rx_ts_mask = chan->rx_ts_mask_avail;
mode = "transparent";
ret = of_property_read_string(chan_np, "fsl,operational-mode", &mode);
@@ -895,9 +1364,8 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
return qmc_check_chans(qmc);
}
-static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
+static int qmc_init_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
{
- struct qmc_chan *chan;
unsigned int i;
u16 val;
@@ -906,23 +1374,12 @@ static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *i
* Everything was previously checked, Tx and Rx related stuffs are
* identical -> Used Rx related stuff to build the table
*/
+ qmc->is_tsa_64rxtx = true;
/* Invalidate all entries */
for (i = 0; i < 64; i++)
qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
- /* Set entries based on Rx stuff*/
- list_for_each_entry(chan, &qmc->chan_head, list) {
- for (i = 0; i < info->nb_rx_ts; i++) {
- if (!(chan->rx_ts_mask & (((u64)1) << i)))
- continue;
-
- val = QMC_TSA_VALID | QMC_TSA_MASK |
- QMC_TSA_CHANNEL(chan->id);
- qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val);
- }
- }
-
/* Set Wrap bit on last entry */
qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
QMC_TSA_WRAP);
@@ -937,9 +1394,8 @@ static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *i
return 0;
}
-static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
+static int qmc_init_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
{
- struct qmc_chan *chan;
unsigned int i;
u16 val;
@@ -947,6 +1403,7 @@ static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info
* Use a Tx 32 entries table and a Rx 32 entries table.
* Everything was previously checked.
*/
+ qmc->is_tsa_64rxtx = false;
/* Invalidate all entries */
for (i = 0; i < 32; i++) {
@@ -954,28 +1411,6 @@ static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info
qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), 0x0000);
}
- /* Set entries based on Rx and Tx stuff*/
- list_for_each_entry(chan, &qmc->chan_head, list) {
- /* Rx part */
- for (i = 0; i < info->nb_rx_ts; i++) {
- if (!(chan->rx_ts_mask & (((u64)1) << i)))
- continue;
-
- val = QMC_TSA_VALID | QMC_TSA_MASK |
- QMC_TSA_CHANNEL(chan->id);
- qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val);
- }
- /* Tx part */
- for (i = 0; i < info->nb_tx_ts; i++) {
- if (!(chan->tx_ts_mask & (((u64)1) << i)))
- continue;
-
- val = QMC_TSA_VALID | QMC_TSA_MASK |
- QMC_TSA_CHANNEL(chan->id);
- qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), val);
- }
- }
-
/* Set Wrap bit on last entries */
qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
QMC_TSA_WRAP);
@@ -995,7 +1430,7 @@ static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info
return 0;
}
-static int qmc_setup_tsa(struct qmc *qmc)
+static int qmc_init_tsa(struct qmc *qmc)
{
struct tsa_serial_info info;
int ret;
@@ -1006,46 +1441,12 @@ static int qmc_setup_tsa(struct qmc *qmc)
return ret;
/*
- * Setup one common 64 entries table or two 32 entries (one for Tx and
- * one for Tx) according to assigned TS numbers.
+ * Initialize one common 64 entries table or two 32 entries (one for Tx
+ * and one for Tx) according to assigned TS numbers.
*/
return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ?
- qmc_setup_tsa_64rxtx(qmc, &info) :
- qmc_setup_tsa_32rx_32tx(qmc, &info);
-}
-
-static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
-{
- struct tsa_serial_info info;
- u16 first_rx, last_tx;
- u16 trnsync;
- int ret;
-
- /* Retrieve info from the TSA related serial */
- ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
- if (ret)
- return ret;
-
- /* Find the first Rx TS allocated to the channel */
- first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
-
- /* Find the last Tx TS allocated to the channel */
- last_tx = fls64(chan->tx_ts_mask);
-
- trnsync = 0;
- if (info.nb_rx_ts)
- trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
- if (info.nb_tx_ts)
- trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
-
- qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
-
- dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
- chan->id, trnsync,
- first_rx, info.nb_rx_ts, chan->rx_ts_mask,
- last_tx, info.nb_tx_ts, chan->tx_ts_mask);
-
- return 0;
+ qmc_init_tsa_64rxtx(qmc, &info) :
+ qmc_init_tsa_32rx_32tx(qmc, &info);
}
static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
@@ -1077,13 +1478,14 @@ static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
val = ((chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS) * sizeof(cbd_t);
qmc_write16(chan->s_param + QMC_SPE_RBASE, val);
qmc_write16(chan->s_param + QMC_SPE_RBPTR, val);
- qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
- qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
- qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
+ qmc_write32(chan->s_param + QMC_SPE_TSTATE, chan->qmc->data->tstate);
+ qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate);
+ qmc_write32(chan->s_param + QMC_SPE_ZISTATE, chan->qmc->data->zistate);
+ qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack);
if (chan->mode == QMC_TRANSPARENT) {
- qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, chan->qmc->data->zdstate_transp);
qmc_write16(chan->s_param + QMC_SPE_TMRBLR, 60);
- val = QMC_SPE_CHAMR_MODE_TRANSP | QMC_SPE_CHAMR_TRANSP_SYNC;
+ val = QMC_SPE_CHAMR_MODE_TRANSP;
if (chan->is_reverse_data)
val |= QMC_SPE_CHAMR_TRANSP_RD;
qmc_write16(chan->s_param + QMC_SPE_CHAMR, val);
@@ -1091,10 +1493,10 @@ static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
if (ret)
return ret;
} else {
- qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, chan->qmc->data->zdstate_hdlc);
qmc_write16(chan->s_param + QMC_SPE_MFLR, 60);
qmc_write16(chan->s_param + QMC_SPE_CHAMR,
- QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM);
+ QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM);
}
/* Do not enable interrupts now. They will be enabled later */
@@ -1102,19 +1504,19 @@ static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
/* Init Rx BDs and set Wrap bit on last descriptor */
BUILD_BUG_ON(QMC_NB_RXBDS == 0);
- val = QMC_BD_RX_I;
for (i = 0; i < QMC_NB_RXBDS; i++) {
bd = chan->rxbds + i;
- qmc_write16(&bd->cbd_sc, val);
+ qmc_write16(&bd->cbd_sc, 0);
}
bd = chan->rxbds + QMC_NB_RXBDS - 1;
- qmc_write16(&bd->cbd_sc, val | QMC_BD_RX_W);
+ qmc_write16(&bd->cbd_sc, QMC_BD_RX_W);
/* Init Tx BDs and set Wrap bit on last descriptor */
BUILD_BUG_ON(QMC_NB_TXBDS == 0);
- val = QMC_BD_TX_I;
if (chan->mode == QMC_HDLC)
- val |= QMC_BD_TX_L | QMC_BD_TX_TC;
+ val = QMC_BD_TX_L | QMC_BD_TX_TC;
+ else
+ val = 0;
for (i = 0; i < QMC_NB_TXBDS; i++) {
bd = chan->txbds + i;
qmc_write16(&bd->cbd_sc, val);
@@ -1219,11 +1621,14 @@ static void qmc_irq_gint(struct qmc *qmc)
/* Restart the receiver if needed */
spin_lock_irqsave(&chan->rx_lock, flags);
if (chan->rx_pending && !chan->is_rx_stopped) {
- if (chan->mode == QMC_TRANSPARENT)
- qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
- else
- qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
- qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
+ qmc_write32(chan->s_param + QMC_SPE_RPACK,
+ chan->qmc->data->rpack);
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE,
+ chan->mode == QMC_TRANSPARENT ?
+ chan->qmc->data->zdstate_transp :
+ chan->qmc->data->zdstate_hdlc);
+ qmc_write32(chan->s_param + QMC_SPE_RSTATE,
+ chan->qmc->data->rstate);
chan->is_rx_halted = false;
} else {
chan->is_rx_halted = true;
@@ -1267,27 +1672,74 @@ static irqreturn_t qmc_irq_handler(int irq, void *priv)
return IRQ_HANDLED;
}
-static int qmc_probe(struct platform_device *pdev)
+static int qmc_qe_soft_qmc_init(struct qmc *qmc, struct device_node *np)
{
- struct device_node *np = pdev->dev.of_node;
- unsigned int nb_chans;
- struct resource *res;
- struct qmc *qmc;
- int irq;
+ struct qe_firmware_info *qe_fw_info;
+ const struct qe_firmware *qe_fw;
+ const struct firmware *fw;
+ const char *filename;
int ret;
- qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL);
- if (!qmc)
- return -ENOMEM;
+ ret = of_property_read_string(np, "fsl,soft-qmc", &filename);
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ /* fsl,soft-qmc property not set -> Simply do nothing */
+ return 0;
+ default:
+ dev_err(qmc->dev, "%pOF: failed to read fsl,soft-qmc\n",
+ np);
+ return ret;
+ }
- qmc->dev = &pdev->dev;
- INIT_LIST_HEAD(&qmc->chan_head);
+ qe_fw_info = qe_get_firmware_info();
+ if (qe_fw_info) {
+ if (!strstr(qe_fw_info->id, "Soft-QMC")) {
+ dev_err(qmc->dev, "Another Firmware is already loaded\n");
+ return -EALREADY;
+ }
+ dev_info(qmc->dev, "Firmware already loaded\n");
+ return 0;
+ }
+
+ dev_info(qmc->dev, "Using firmware %s\n", filename);
+
+ ret = request_firmware(&fw, filename, qmc->dev);
+ if (ret) {
+ dev_err(qmc->dev, "Failed to request firmware %s\n", filename);
+ return ret;
+ }
+
+ qe_fw = (const struct qe_firmware *)fw->data;
+
+ if (fw->size < sizeof(qe_fw->header) ||
+ be32_to_cpu(qe_fw->header.length) != fw->size) {
+ dev_err(qmc->dev, "Invalid firmware %s\n", filename);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = qe_upload_firmware(qe_fw);
+ if (ret) {
+ dev_err(qmc->dev, "Failed to load firmware %s\n", filename);
+ goto end;
+ }
+
+ ret = 0;
+end:
+ release_firmware(fw);
+ return ret;
+}
+
+static int qmc_cpm1_init_resources(struct qmc *qmc, struct platform_device *pdev)
+{
+ struct resource *res;
qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "scc_regs");
if (IS_ERR(qmc->scc_regs))
return PTR_ERR(qmc->scc_regs);
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scc_pram");
if (!res)
return -EINVAL;
@@ -1300,44 +1752,214 @@ static int qmc_probe(struct platform_device *pdev)
if (IS_ERR(qmc->dpram))
return PTR_ERR(qmc->dpram);
+ return 0;
+}
+
+static int qmc_qe_init_resources(struct qmc *qmc, struct platform_device *pdev)
+{
+ struct resource *res;
+ int ucc_num;
+ s32 info;
+
+ qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "ucc_regs");
+ if (IS_ERR(qmc->scc_regs))
+ return PTR_ERR(qmc->scc_regs);
+
+ ucc_num = tsa_serial_get_num(qmc->tsa_serial);
+ if (ucc_num < 0)
+ return dev_err_probe(qmc->dev, ucc_num, "Failed to get UCC num\n");
+
+ qmc->qe_subblock = ucc_slow_get_qe_cr_subblock(ucc_num);
+ if (qmc->qe_subblock == QE_CR_SUBBLOCK_INVALID) {
+ dev_err(qmc->dev, "Unsupported ucc num %u\n", ucc_num);
+ return -EINVAL;
+ }
+ /* Allocate the 'Global Multichannel Parameters' and the
+ * 'Framer parameters' areas. The 'Framer parameters' area
+ * is located right after the 'Global Multichannel Parameters'.
+ * The 'Framer parameters' need 1 byte per receive and transmit
+ * channel. The maximum number of receive or transmit channel
+ * is 64. So reserve 2 * 64 bytes for the 'Framer parameters'.
+ */
+ info = devm_qe_muram_alloc(qmc->dev, UCC_SLOW_PRAM_SIZE + 2 * 64,
+ ALIGNMENT_OF_UCC_SLOW_PRAM);
+ if (info < 0)
+ return info;
+
+ if (!qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, qmc->qe_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, info)) {
+ dev_err(qmc->dev, "QE_ASSIGN_PAGE_TO_DEVICE cmd failed");
+ return -EIO;
+ }
+ qmc->scc_pram = qe_muram_addr(info);
+ qmc->scc_pram_offset = info;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpram");
+ if (!res)
+ return -EINVAL;
+ qmc->dpram_offset = res->start - qe_muram_dma(qe_muram_addr(0));
+ qmc->dpram = devm_ioremap_resource(qmc->dev, res);
+ if (IS_ERR(qmc->scc_pram))
+ return PTR_ERR(qmc->scc_pram);
+
+ return 0;
+}
+
+static int qmc_init_resources(struct qmc *qmc, struct platform_device *pdev)
+{
+ return qmc_is_qe(qmc) ?
+ qmc_qe_init_resources(qmc, pdev) :
+ qmc_cpm1_init_resources(qmc, pdev);
+}
+
+static int qmc_cpm1_init_scc(struct qmc *qmc)
+{
+ u32 val;
+ int ret;
+
+ /* Connect the serial (SCC) to TSA */
+ ret = tsa_serial_connect(qmc->tsa_serial);
+ if (ret)
+ return dev_err_probe(qmc->dev, ret, "Failed to connect TSA serial\n");
+
+ /* Init GMSR_H and GMSR_L registers */
+ val = SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP;
+ qmc_write32(qmc->scc_regs + SCC_GSMRH, val);
+
+ /* enable QMC mode */
+ qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_CPM1_GSMRL_MODE_QMC);
+
+ /* Disable and clear interrupts */
+ qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
+ qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
+
+ return 0;
+}
+
+static int qmc_qe_init_ucc(struct qmc *qmc)
+{
+ u32 val;
+ int ret;
+
+ /* Set the UCC in slow mode */
+ qmc_write8(qmc->scc_regs + SCC_QE_UCC_GUEMR,
+ UCC_GUEMR_SET_RESERVED3 | UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX);
+
+ /* Connect the serial (UCC) to TSA */
+ ret = tsa_serial_connect(qmc->tsa_serial);
+ if (ret)
+ return dev_err_probe(qmc->dev, ret, "Failed to connect TSA serial\n");
+
+ /* Initialize the QMC tx startup addresses */
+ if (!qe_issue_cmd(QE_PUSHSCHED, qmc->qe_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0x80)) {
+ dev_err(qmc->dev, "QE_CMD_PUSH_SCHED tx cmd failed");
+ ret = -EIO;
+ goto err_tsa_serial_disconnect;
+ }
+
+ /* Initialize the QMC rx startup addresses */
+ if (!qe_issue_cmd(QE_PUSHSCHED, qmc->qe_subblock | 0x00020000,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0x82)) {
+ dev_err(qmc->dev, "QE_CMD_PUSH_SCHED rx cmd failed");
+ ret = -EIO;
+ goto err_tsa_serial_disconnect;
+ }
+
+ /* Re-init RXPTR and TXPTR with the content of RX_S_PTR and
+ * TX_S_PTR (RX_S_PTR and TX_S_PTR are initialized during
+ * qmc_setup_tsa() call
+ */
+ val = qmc_read16(qmc->scc_pram + QMC_GBL_RX_S_PTR);
+ qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
+ val = qmc_read16(qmc->scc_pram + QMC_GBL_TX_S_PTR);
+ qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
+
+ /* Init GUMR_H and GUMR_L registers (SCC GSMR_H and GSMR_L) */
+ val = SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP |
+ SCC_GSMRH_TRX | SCC_GSMRH_TTX;
+ qmc_write32(qmc->scc_regs + SCC_GSMRH, val);
+
+ /* enable QMC mode */
+ qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_QE_GSMRL_MODE_QMC);
+
+ /* Disable and clear interrupts */
+ qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
+ qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
+
+ return 0;
+
+err_tsa_serial_disconnect:
+ tsa_serial_disconnect(qmc->tsa_serial);
+ return ret;
+}
+
+static int qmc_init_xcc(struct qmc *qmc)
+{
+ return qmc_is_qe(qmc) ?
+ qmc_qe_init_ucc(qmc) :
+ qmc_cpm1_init_scc(qmc);
+}
+
+static void qmc_exit_xcc(struct qmc *qmc)
+{
+ /* Disconnect the serial from TSA */
+ tsa_serial_disconnect(qmc->tsa_serial);
+}
+
+static int qmc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int nb_chans;
+ struct qmc *qmc;
+ int irq;
+ int ret;
+
+ qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL);
+ if (!qmc)
+ return -ENOMEM;
+
+ qmc->dev = &pdev->dev;
+ qmc->data = of_device_get_match_data(&pdev->dev);
+ if (!qmc->data) {
+ dev_err(qmc->dev, "Missing match data\n");
+ return -EINVAL;
+ }
+ INIT_LIST_HEAD(&qmc->chan_head);
+
qmc->tsa_serial = devm_tsa_serial_get_byphandle(qmc->dev, np, "fsl,tsa-serial");
if (IS_ERR(qmc->tsa_serial)) {
return dev_err_probe(qmc->dev, PTR_ERR(qmc->tsa_serial),
"Failed to get TSA serial\n");
}
- /* Connect the serial (SCC) to TSA */
- ret = tsa_serial_connect(qmc->tsa_serial);
- if (ret) {
- dev_err(qmc->dev, "Failed to connect TSA serial\n");
+ ret = qmc_init_resources(qmc, pdev);
+ if (ret)
return ret;
+
+ if (qmc_is_qe(qmc)) {
+ ret = qmc_qe_soft_qmc_init(qmc, np);
+ if (ret)
+ return ret;
}
/* Parse channels informationss */
ret = qmc_of_parse_chans(qmc, np);
if (ret)
- goto err_tsa_serial_disconnect;
+ return ret;
nb_chans = qmc_nb_chans(qmc);
- /* Init GMSR_H and GMSR_L registers */
- qmc_write32(qmc->scc_regs + SCC_GSMRH,
- SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP);
-
- /* enable QMC mode */
- qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_MODE_QMC);
-
/*
* Allocate the buffer descriptor table
* 8 rx and 8 tx descriptors per channel
*/
qmc->bd_size = (nb_chans * (QMC_NB_TXBDS + QMC_NB_RXBDS)) * sizeof(cbd_t);
qmc->bd_table = dmam_alloc_coherent(qmc->dev, qmc->bd_size,
- &qmc->bd_dma_addr, GFP_KERNEL);
+ &qmc->bd_dma_addr, GFP_KERNEL);
if (!qmc->bd_table) {
dev_err(qmc->dev, "Failed to allocate bd table\n");
- ret = -ENOMEM;
- goto err_tsa_serial_disconnect;
+ return -ENOMEM;
}
memset(qmc->bd_table, 0, qmc->bd_size);
@@ -1346,11 +1968,10 @@ static int qmc_probe(struct platform_device *pdev)
/* Allocate the interrupt table */
qmc->int_size = QMC_NB_INTS * sizeof(u16);
qmc->int_table = dmam_alloc_coherent(qmc->dev, qmc->int_size,
- &qmc->int_dma_addr, GFP_KERNEL);
+ &qmc->int_dma_addr, GFP_KERNEL);
if (!qmc->int_table) {
dev_err(qmc->dev, "Failed to allocate interrupt table\n");
- ret = -ENOMEM;
- goto err_tsa_serial_disconnect;
+ return -ENOMEM;
}
memset(qmc->int_table, 0, qmc->int_size);
@@ -1367,72 +1988,124 @@ static int qmc_probe(struct platform_device *pdev)
qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3);
qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8);
- ret = qmc_setup_tsa(qmc);
+ if (qmc_is_qe(qmc)) {
+ /* Zeroed the reserved area */
+ memset_io(qmc->scc_pram + QMC_QE_GBL_RSV_B0_START, 0,
+ QMC_QE_GBL_RSV_B0_SIZE);
+
+ qmc_write32(qmc->scc_pram + QMC_QE_GBL_GCSBASE, qmc->dpram_offset);
+
+ /* Init 'framer parameters' area and set the base addresses */
+ memset_io(qmc->scc_pram + UCC_SLOW_PRAM_SIZE, 0x01, 64);
+ memset_io(qmc->scc_pram + UCC_SLOW_PRAM_SIZE + 64, 0x01, 64);
+ qmc_write16(qmc->scc_pram + QMC_QE_GBL_RX_FRM_BASE,
+ qmc->scc_pram_offset + UCC_SLOW_PRAM_SIZE);
+ qmc_write16(qmc->scc_pram + QMC_QE_GBL_TX_FRM_BASE,
+ qmc->scc_pram_offset + UCC_SLOW_PRAM_SIZE + 64);
+ }
+
+ ret = qmc_init_tsa(qmc);
if (ret)
- goto err_tsa_serial_disconnect;
+ return ret;
qmc_write16(qmc->scc_pram + QMC_GBL_QMCSTATE, 0x8000);
ret = qmc_setup_chans(qmc);
if (ret)
- goto err_tsa_serial_disconnect;
+ return ret;
/* Init interrupts table */
ret = qmc_setup_ints(qmc);
if (ret)
- goto err_tsa_serial_disconnect;
+ return ret;
- /* Disable and clear interrupts, set the irq handler */
- qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
- qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
+ /* Init SCC (CPM1) or UCC (QE) */
+ ret = qmc_init_xcc(qmc);
+ if (ret)
+ return ret;
+
+ /* Set the irq handler */
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- goto err_tsa_serial_disconnect;
+ if (irq < 0) {
+ ret = irq;
+ goto err_exit_xcc;
+ }
ret = devm_request_irq(qmc->dev, irq, qmc_irq_handler, 0, "qmc", qmc);
if (ret < 0)
- goto err_tsa_serial_disconnect;
+ goto err_exit_xcc;
/* Enable interrupts */
qmc_write16(qmc->scc_regs + SCC_SCCM,
- SCC_SCCE_IQOV | SCC_SCCE_GINT | SCC_SCCE_GUN | SCC_SCCE_GOV);
+ SCC_SCCE_IQOV | SCC_SCCE_GINT | SCC_SCCE_GUN | SCC_SCCE_GOV);
ret = qmc_finalize_chans(qmc);
if (ret < 0)
goto err_disable_intr;
- /* Enable transmiter and receiver */
+ /* Enable transmitter and receiver */
qmc_setbits32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
platform_set_drvdata(pdev, qmc);
+ /* Populate channel related devices */
+ ret = devm_of_platform_populate(qmc->dev);
+ if (ret)
+ goto err_disable_txrx;
+
return 0;
+err_disable_txrx:
+ qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
+
err_disable_intr:
qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
-err_tsa_serial_disconnect:
- tsa_serial_disconnect(qmc->tsa_serial);
+err_exit_xcc:
+ qmc_exit_xcc(qmc);
return ret;
}
-static int qmc_remove(struct platform_device *pdev)
+static void qmc_remove(struct platform_device *pdev)
{
struct qmc *qmc = platform_get_drvdata(pdev);
- /* Disable transmiter and receiver */
+ /* Disable transmitter and receiver */
qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
/* Disable interrupts */
qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
- /* Disconnect the serial from TSA */
- tsa_serial_disconnect(qmc->tsa_serial);
-
- return 0;
+ /* Exit SCC (CPM1) or UCC (QE) */
+ qmc_exit_xcc(qmc);
}
+static const struct qmc_data qmc_data_cpm1 __maybe_unused = {
+ .version = QMC_CPM1,
+ .tstate = 0x30000000,
+ .rstate = 0x31000000,
+ .zistate = 0x00000100,
+ .zdstate_hdlc = 0x00000080,
+ .zdstate_transp = 0x18000080,
+ .rpack = 0x00000000,
+};
+
+static const struct qmc_data qmc_data_qe __maybe_unused = {
+ .version = QMC_QE,
+ .tstate = 0x30000000,
+ .rstate = 0x30000000,
+ .zistate = 0x00000200,
+ .zdstate_hdlc = 0x80FFFFE0,
+ .zdstate_transp = 0x003FFFE2,
+ .rpack = 0x80000000,
+};
+
static const struct of_device_id qmc_id_table[] = {
- { .compatible = "fsl,cpm1-scc-qmc" },
+#if IS_ENABLED(CONFIG_CPM1)
+ { .compatible = "fsl,cpm1-scc-qmc", .data = &qmc_data_cpm1 },
+#endif
+#if IS_ENABLED(CONFIG_QUICC_ENGINE)
+ { .compatible = "fsl,qe-ucc-qmc", .data = &qmc_data_qe },
+#endif
{} /* sentinel */
};
MODULE_DEVICE_TABLE(of, qmc_id_table);
@@ -1447,26 +2120,16 @@ static struct platform_driver qmc_driver = {
};
module_platform_driver(qmc_driver);
-struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name)
+static struct qmc_chan *qmc_chan_get_from_qmc(struct device_node *qmc_np, unsigned int chan_index)
{
- struct of_phandle_args out_args;
struct platform_device *pdev;
struct qmc_chan *qmc_chan;
struct qmc *qmc;
- int ret;
-
- ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0,
- &out_args);
- if (ret < 0)
- return ERR_PTR(ret);
- if (!of_match_node(qmc_driver.driver.of_match_table, out_args.np)) {
- of_node_put(out_args.np);
+ if (!of_match_node(qmc_driver.driver.of_match_table, qmc_np))
return ERR_PTR(-EINVAL);
- }
- pdev = of_find_device_by_node(out_args.np);
- of_node_put(out_args.np);
+ pdev = of_find_device_by_node(qmc_np);
if (!pdev)
return ERR_PTR(-ENODEV);
@@ -1476,17 +2139,12 @@ struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phan
return ERR_PTR(-EPROBE_DEFER);
}
- if (out_args.args_count != 1) {
- platform_device_put(pdev);
- return ERR_PTR(-EINVAL);
- }
-
- if (out_args.args[0] >= ARRAY_SIZE(qmc->chans)) {
+ if (chan_index >= ARRAY_SIZE(qmc->chans)) {
platform_device_put(pdev);
return ERR_PTR(-EINVAL);
}
- qmc_chan = qmc->chans[out_args.args[0]];
+ qmc_chan = qmc->chans[chan_index];
if (!qmc_chan) {
platform_device_put(pdev);
return ERR_PTR(-ENOENT);
@@ -1494,7 +2152,58 @@ struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phan
return qmc_chan;
}
-EXPORT_SYMBOL(qmc_chan_get_byphandle);
+
+int qmc_chan_count_phandles(struct device_node *np, const char *phandles_name)
+{
+ int count;
+
+ /* phandles are fixed args phandles with one arg */
+ count = of_count_phandle_with_args(np, phandles_name, NULL);
+ if (count < 0)
+ return count;
+
+ return count / 2;
+}
+EXPORT_SYMBOL(qmc_chan_count_phandles);
+
+struct qmc_chan *qmc_chan_get_byphandles_index(struct device_node *np,
+ const char *phandles_name,
+ int index)
+{
+ struct of_phandle_args out_args;
+ struct qmc_chan *qmc_chan;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(np, phandles_name, 1, index,
+ &out_args);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (out_args.args_count != 1) {
+ of_node_put(out_args.np);
+ return ERR_PTR(-EINVAL);
+ }
+
+ qmc_chan = qmc_chan_get_from_qmc(out_args.np, out_args.args[0]);
+ of_node_put(out_args.np);
+ return qmc_chan;
+}
+EXPORT_SYMBOL(qmc_chan_get_byphandles_index);
+
+struct qmc_chan *qmc_chan_get_bychild(struct device_node *np)
+{
+ struct device_node *qmc_np;
+ u32 chan_index;
+ int ret;
+
+ qmc_np = np->parent;
+ ret = of_property_read_u32(np, "reg", &chan_index);
+ if (ret)
+ return ERR_PTR(-EINVAL);
+
+ return qmc_chan_get_from_qmc(qmc_np, chan_index);
+}
+EXPORT_SYMBOL(qmc_chan_get_bychild);
void qmc_chan_put(struct qmc_chan *chan)
{
@@ -1509,9 +2218,32 @@ static void devm_qmc_chan_release(struct device *dev, void *res)
qmc_chan_put(*qmc_chan);
}
-struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev,
- struct device_node *np,
- const char *phandle_name)
+struct qmc_chan *devm_qmc_chan_get_byphandles_index(struct device *dev,
+ struct device_node *np,
+ const char *phandles_name,
+ int index)
+{
+ struct qmc_chan *qmc_chan;
+ struct qmc_chan **dr;
+
+ dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ qmc_chan = qmc_chan_get_byphandles_index(np, phandles_name, index);
+ if (!IS_ERR(qmc_chan)) {
+ *dr = qmc_chan;
+ devres_add(dev, dr);
+ } else {
+ devres_free(dr);
+ }
+
+ return qmc_chan;
+}
+EXPORT_SYMBOL(devm_qmc_chan_get_byphandles_index);
+
+struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev,
+ struct device_node *np)
{
struct qmc_chan *qmc_chan;
struct qmc_chan **dr;
@@ -1520,7 +2252,7 @@ struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev,
if (!dr)
return ERR_PTR(-ENOMEM);
- qmc_chan = qmc_chan_get_byphandle(np, phandle_name);
+ qmc_chan = qmc_chan_get_bychild(np);
if (!IS_ERR(qmc_chan)) {
*dr = qmc_chan;
devres_add(dev, dr);
@@ -1530,8 +2262,8 @@ struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev,
return qmc_chan;
}
-EXPORT_SYMBOL(devm_qmc_chan_get_byphandle);
+EXPORT_SYMBOL(devm_qmc_chan_get_bychild);
MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
-MODULE_DESCRIPTION("CPM QMC driver");
+MODULE_DESCRIPTION("CPM/QE QMC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/soc/fsl/qe/tsa.c b/drivers/soc/fsl/qe/tsa.c
index 3646153117b3..4a88e54d25b9 100644
--- a/drivers/soc/fsl/qe/tsa.c
+++ b/drivers/soc/fsl/qe/tsa.c
@@ -9,6 +9,8 @@
#include "tsa.h"
#include <dt-bindings/soc/cpm1-fsl,tsa.h>
+#include <dt-bindings/soc/qe-fsl,tsa.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -16,91 +18,121 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <soc/fsl/qe/ucc.h>
+
+/* TSA SI RAM routing tables entry (CPM1) */
+#define TSA_CPM1_SIRAM_ENTRY_LAST BIT(16)
+#define TSA_CPM1_SIRAM_ENTRY_BYTE BIT(17)
+#define TSA_CPM1_SIRAM_ENTRY_CNT_MASK GENMASK(21, 18)
+#define TSA_CPM1_SIRAM_ENTRY_CNT(x) FIELD_PREP(TSA_CPM1_SIRAM_ENTRY_CNT_MASK, x)
+#define TSA_CPM1_SIRAM_ENTRY_CSEL_MASK GENMASK(24, 22)
+#define TSA_CPM1_SIRAM_ENTRY_CSEL_NU FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x0)
+#define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC2 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x2)
+#define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC3 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x3)
+#define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC4 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x4)
+#define TSA_CPM1_SIRAM_ENTRY_CSEL_SMC1 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x5)
+#define TSA_CPM1_SIRAM_ENTRY_CSEL_SMC2 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x6)
+
+/* TSA SI RAM routing tables entry (QE) */
+#define TSA_QE_SIRAM_ENTRY_LAST BIT(0)
+#define TSA_QE_SIRAM_ENTRY_BYTE BIT(1)
+#define TSA_QE_SIRAM_ENTRY_CNT_MASK GENMASK(4, 2)
+#define TSA_QE_SIRAM_ENTRY_CNT(x) FIELD_PREP(TSA_QE_SIRAM_ENTRY_CNT_MASK, x)
+#define TSA_QE_SIRAM_ENTRY_CSEL_MASK GENMASK(8, 5)
+#define TSA_QE_SIRAM_ENTRY_CSEL_NU FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0x0)
+#define TSA_QE_SIRAM_ENTRY_CSEL_UCC5 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0x1)
+#define TSA_QE_SIRAM_ENTRY_CSEL_UCC1 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0x9)
+#define TSA_QE_SIRAM_ENTRY_CSEL_UCC2 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0xa)
+#define TSA_QE_SIRAM_ENTRY_CSEL_UCC3 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0xb)
+#define TSA_QE_SIRAM_ENTRY_CSEL_UCC4 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0xc)
-
-/* TSA SI RAM routing tables entry */
-#define TSA_SIRAM_ENTRY_LAST (1 << 16)
-#define TSA_SIRAM_ENTRY_BYTE (1 << 17)
-#define TSA_SIRAM_ENTRY_CNT(x) (((x) & 0x0f) << 18)
-#define TSA_SIRAM_ENTRY_CSEL_MASK (0x7 << 22)
-#define TSA_SIRAM_ENTRY_CSEL_NU (0x0 << 22)
-#define TSA_SIRAM_ENTRY_CSEL_SCC2 (0x2 << 22)
-#define TSA_SIRAM_ENTRY_CSEL_SCC3 (0x3 << 22)
-#define TSA_SIRAM_ENTRY_CSEL_SCC4 (0x4 << 22)
-#define TSA_SIRAM_ENTRY_CSEL_SMC1 (0x5 << 22)
-#define TSA_SIRAM_ENTRY_CSEL_SMC2 (0x6 << 22)
-
-/* SI mode register (32 bits) */
-#define TSA_SIMODE 0x00
-#define TSA_SIMODE_SMC2 0x80000000
-#define TSA_SIMODE_SMC1 0x00008000
-#define TSA_SIMODE_TDMA(x) ((x) << 0)
-#define TSA_SIMODE_TDMB(x) ((x) << 16)
-#define TSA_SIMODE_TDM_MASK 0x0fff
-#define TSA_SIMODE_TDM_SDM_MASK 0x0c00
-#define TSA_SIMODE_TDM_SDM_NORM 0x0000
-#define TSA_SIMODE_TDM_SDM_ECHO 0x0400
-#define TSA_SIMODE_TDM_SDM_INTL_LOOP 0x0800
-#define TSA_SIMODE_TDM_SDM_LOOP_CTRL 0x0c00
-#define TSA_SIMODE_TDM_RFSD(x) ((x) << 8)
-#define TSA_SIMODE_TDM_DSC 0x0080
-#define TSA_SIMODE_TDM_CRT 0x0040
-#define TSA_SIMODE_TDM_STZ 0x0020
-#define TSA_SIMODE_TDM_CE 0x0010
-#define TSA_SIMODE_TDM_FE 0x0008
-#define TSA_SIMODE_TDM_GM 0x0004
-#define TSA_SIMODE_TDM_TFSD(x) ((x) << 0)
-
-/* SI global mode register (8 bits) */
-#define TSA_SIGMR 0x04
-#define TSA_SIGMR_ENB (1<<3)
-#define TSA_SIGMR_ENA (1<<2)
-#define TSA_SIGMR_RDM_MASK 0x03
-#define TSA_SIGMR_RDM_STATIC_TDMA 0x00
-#define TSA_SIGMR_RDM_DYN_TDMA 0x01
-#define TSA_SIGMR_RDM_STATIC_TDMAB 0x02
-#define TSA_SIGMR_RDM_DYN_TDMAB 0x03
-
-/* SI status register (8 bits) */
-#define TSA_SISTR 0x06
-
-/* SI command register (8 bits) */
-#define TSA_SICMR 0x07
+/*
+ * SI mode register :
+ * - CPM1: 32bit register split in 2*16bit (16bit TDM)
+ * - QE: 4x16bit registers, one per TDM
+ */
+#define TSA_CPM1_SIMODE 0x00
+#define TSA_QE_SIAMR 0x00
+#define TSA_QE_SIBMR 0x02
+#define TSA_QE_SICMR 0x04
+#define TSA_QE_SIDMR 0x06
+#define TSA_CPM1_SIMODE_SMC2 BIT(31)
+#define TSA_CPM1_SIMODE_SMC1 BIT(15)
+#define TSA_CPM1_SIMODE_TDMA_MASK GENMASK(11, 0)
+#define TSA_CPM1_SIMODE_TDMA(x) FIELD_PREP(TSA_CPM1_SIMODE_TDMA_MASK, x)
+#define TSA_CPM1_SIMODE_TDMB_MASK GENMASK(27, 16)
+#define TSA_CPM1_SIMODE_TDMB(x) FIELD_PREP(TSA_CPM1_SIMODE_TDMB_MASK, x)
+#define TSA_QE_SIMODE_TDM_SAD_MASK GENMASK(15, 12)
+#define TSA_QE_SIMODE_TDM_SAD(x) FIELD_PREP(TSA_QE_SIMODE_TDM_SAD_MASK, x)
+#define TSA_CPM1_SIMODE_TDM_MASK GENMASK(11, 0)
+#define TSA_SIMODE_TDM_SDM_MASK GENMASK(11, 10)
+#define TSA_SIMODE_TDM_SDM_NORM FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x0)
+#define TSA_SIMODE_TDM_SDM_ECHO FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x1)
+#define TSA_SIMODE_TDM_SDM_INTL_LOOP FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x2)
+#define TSA_SIMODE_TDM_SDM_LOOP_CTRL FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x3)
+#define TSA_SIMODE_TDM_RFSD_MASK GENMASK(9, 8)
+#define TSA_SIMODE_TDM_RFSD(x) FIELD_PREP(TSA_SIMODE_TDM_RFSD_MASK, x)
+#define TSA_SIMODE_TDM_DSC BIT(7)
+#define TSA_SIMODE_TDM_CRT BIT(6)
+#define TSA_CPM1_SIMODE_TDM_STZ BIT(5) /* bit 5: STZ in CPM1 */
+#define TSA_QE_SIMODE_TDM_SL BIT(5) /* bit 5: SL in QE */
+#define TSA_SIMODE_TDM_CE BIT(4)
+#define TSA_SIMODE_TDM_FE BIT(3)
+#define TSA_SIMODE_TDM_GM BIT(2)
+#define TSA_SIMODE_TDM_TFSD_MASK GENMASK(1, 0)
+#define TSA_SIMODE_TDM_TFSD(x) FIELD_PREP(TSA_SIMODE_TDM_TFSD_MASK, x)
+
+/* CPM SI global mode register (8 bits) */
+#define TSA_CPM1_SIGMR 0x04
+#define TSA_CPM1_SIGMR_ENB BIT(3)
+#define TSA_CPM1_SIGMR_ENA BIT(2)
+#define TSA_CPM1_SIGMR_RDM_MASK GENMASK(1, 0)
+#define TSA_CPM1_SIGMR_RDM_STATIC_TDMA FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x0)
+#define TSA_CPM1_SIGMR_RDM_DYN_TDMA FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x1)
+#define TSA_CPM1_SIGMR_RDM_STATIC_TDMAB FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x2)
+#define TSA_CPM1_SIGMR_RDM_DYN_TDMAB FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x3)
+
+/* QE SI global mode register high (8 bits) */
+#define TSA_QE_SIGLMRH 0x08
+#define TSA_QE_SIGLMRH_END BIT(3)
+#define TSA_QE_SIGLMRH_ENC BIT(2)
+#define TSA_QE_SIGLMRH_ENB BIT(1)
+#define TSA_QE_SIGLMRH_ENA BIT(0)
/* SI clock route register (32 bits) */
-#define TSA_SICR 0x0C
-#define TSA_SICR_SCC2(x) ((x) << 8)
-#define TSA_SICR_SCC3(x) ((x) << 16)
-#define TSA_SICR_SCC4(x) ((x) << 24)
-#define TSA_SICR_SCC_MASK 0x0ff
-#define TSA_SICR_SCC_GRX (1 << 7)
-#define TSA_SICR_SCC_SCX_TSA (1 << 6)
-#define TSA_SICR_SCC_RXCS_MASK (0x7 << 3)
-#define TSA_SICR_SCC_RXCS_BRG1 (0x0 << 3)
-#define TSA_SICR_SCC_RXCS_BRG2 (0x1 << 3)
-#define TSA_SICR_SCC_RXCS_BRG3 (0x2 << 3)
-#define TSA_SICR_SCC_RXCS_BRG4 (0x3 << 3)
-#define TSA_SICR_SCC_RXCS_CLK15 (0x4 << 3)
-#define TSA_SICR_SCC_RXCS_CLK26 (0x5 << 3)
-#define TSA_SICR_SCC_RXCS_CLK37 (0x6 << 3)
-#define TSA_SICR_SCC_RXCS_CLK48 (0x7 << 3)
-#define TSA_SICR_SCC_TXCS_MASK (0x7 << 0)
-#define TSA_SICR_SCC_TXCS_BRG1 (0x0 << 0)
-#define TSA_SICR_SCC_TXCS_BRG2 (0x1 << 0)
-#define TSA_SICR_SCC_TXCS_BRG3 (0x2 << 0)
-#define TSA_SICR_SCC_TXCS_BRG4 (0x3 << 0)
-#define TSA_SICR_SCC_TXCS_CLK15 (0x4 << 0)
-#define TSA_SICR_SCC_TXCS_CLK26 (0x5 << 0)
-#define TSA_SICR_SCC_TXCS_CLK37 (0x6 << 0)
-#define TSA_SICR_SCC_TXCS_CLK48 (0x7 << 0)
-
-/* Serial interface RAM pointer register (32 bits) */
-#define TSA_SIRP 0x10
+#define TSA_CPM1_SICR 0x0C
+#define TSA_CPM1_SICR_SCC2_MASK GENMASK(15, 8)
+#define TSA_CPM1_SICR_SCC2(x) FIELD_PREP(TSA_CPM1_SICR_SCC2_MASK, x)
+#define TSA_CPM1_SICR_SCC3_MASK GENMASK(23, 16)
+#define TSA_CPM1_SICR_SCC3(x) FIELD_PREP(TSA_CPM1_SICR_SCC3_MASK, x)
+#define TSA_CPM1_SICR_SCC4_MASK GENMASK(31, 24)
+#define TSA_CPM1_SICR_SCC4(x) FIELD_PREP(TSA_CPM1_SICR_SCC4_MASK, x)
+#define TSA_CPM1_SICR_SCC_MASK GENMASK(7, 0)
+#define TSA_CPM1_SICR_SCC_GRX BIT(7)
+#define TSA_CPM1_SICR_SCC_SCX_TSA BIT(6)
+#define TSA_CPM1_SICR_SCC_RXCS_MASK GENMASK(5, 3)
+#define TSA_CPM1_SICR_SCC_RXCS_BRG1 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x0)
+#define TSA_CPM1_SICR_SCC_RXCS_BRG2 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x1)
+#define TSA_CPM1_SICR_SCC_RXCS_BRG3 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x2)
+#define TSA_CPM1_SICR_SCC_RXCS_BRG4 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x3)
+#define TSA_CPM1_SICR_SCC_RXCS_CLK15 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x4)
+#define TSA_CPM1_SICR_SCC_RXCS_CLK26 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x5)
+#define TSA_CPM1_SICR_SCC_RXCS_CLK37 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x6)
+#define TSA_CPM1_SICR_SCC_RXCS_CLK48 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x7)
+#define TSA_CPM1_SICR_SCC_TXCS_MASK GENMASK(2, 0)
+#define TSA_CPM1_SICR_SCC_TXCS_BRG1 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x0)
+#define TSA_CPM1_SICR_SCC_TXCS_BRG2 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x1)
+#define TSA_CPM1_SICR_SCC_TXCS_BRG3 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x2)
+#define TSA_CPM1_SICR_SCC_TXCS_BRG4 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x3)
+#define TSA_CPM1_SICR_SCC_TXCS_CLK15 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x4)
+#define TSA_CPM1_SICR_SCC_TXCS_CLK26 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x5)
+#define TSA_CPM1_SICR_SCC_TXCS_CLK37 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x6)
+#define TSA_CPM1_SICR_SCC_TXCS_CLK48 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x7)
struct tsa_entries_area {
- void *__iomem entries_start;
- void *__iomem entries_next;
- void *__iomem last_entry;
+ void __iomem *entries_start;
+ void __iomem *entries_next;
+ void __iomem *last_entry;
};
struct tsa_tdm {
@@ -114,15 +146,31 @@ struct tsa_tdm {
#define TSA_TDMA 0
#define TSA_TDMB 1
+#define TSA_TDMC 2 /* QE implementation only */
+#define TSA_TDMD 3 /* QE implementation only */
+
+enum tsa_version {
+ TSA_CPM1 = 1, /* Avoid 0 value */
+ TSA_QE,
+};
struct tsa {
struct device *dev;
- void *__iomem si_regs;
- void *__iomem si_ram;
+ void __iomem *si_regs;
+ void __iomem *si_ram;
resource_size_t si_ram_sz;
- spinlock_t lock;
+ spinlock_t lock; /* Lock for read/modify/write sequence */
+ enum tsa_version version;
int tdms; /* TSA_TDMx ORed */
+#if IS_ENABLED(CONFIG_QUICC_ENGINE)
+ struct tsa_tdm tdm[4]; /* TDMa, TDMb, TDMc and TDMd */
+#else
struct tsa_tdm tdm[2]; /* TDMa and TDMb */
+#endif
+ /* Same number of serials for CPM1 and QE:
+ * CPM1: NU, 3 SCCs and 2 SMCs
+ * QE: NU and 5 UCCs
+ */
struct tsa_serial {
unsigned int id;
struct tsa_serial_info info;
@@ -135,32 +183,88 @@ static inline struct tsa *tsa_serial_get_tsa(struct tsa_serial *tsa_serial)
return container_of(tsa_serial, struct tsa, serials[tsa_serial->id]);
}
-static inline void tsa_write32(void *__iomem addr, u32 val)
+static inline void tsa_write32(void __iomem *addr, u32 val)
{
iowrite32be(val, addr);
}
-static inline void tsa_write8(void *__iomem addr, u32 val)
+static inline void tsa_write16(void __iomem *addr, u16 val)
+{
+ iowrite16be(val, addr);
+}
+
+static inline void tsa_write8(void __iomem *addr, u8 val)
{
iowrite8(val, addr);
}
-static inline u32 tsa_read32(void *__iomem addr)
+static inline u32 tsa_read32(void __iomem *addr)
{
return ioread32be(addr);
}
-static inline void tsa_clrbits32(void *__iomem addr, u32 clr)
+static inline u16 tsa_read16(void __iomem *addr)
+{
+ return ioread16be(addr);
+}
+
+static inline void tsa_clrbits32(void __iomem *addr, u32 clr)
{
tsa_write32(addr, tsa_read32(addr) & ~clr);
}
-static inline void tsa_clrsetbits32(void *__iomem addr, u32 clr, u32 set)
+static inline void tsa_clrbits16(void __iomem *addr, u16 clr)
+{
+ tsa_write16(addr, tsa_read16(addr) & ~clr);
+}
+
+static inline void tsa_clrsetbits32(void __iomem *addr, u32 clr, u32 set)
{
tsa_write32(addr, (tsa_read32(addr) & ~clr) | set);
}
-int tsa_serial_connect(struct tsa_serial *tsa_serial)
+static bool tsa_is_qe(const struct tsa *tsa)
+{
+ if (IS_ENABLED(CONFIG_QUICC_ENGINE) && IS_ENABLED(CONFIG_CPM))
+ return tsa->version == TSA_QE;
+
+ return IS_ENABLED(CONFIG_QUICC_ENGINE);
+}
+
+static int tsa_qe_serial_get_num(struct tsa_serial *tsa_serial)
+{
+ struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
+
+ switch (tsa_serial->id) {
+ case FSL_QE_TSA_UCC1: return 0;
+ case FSL_QE_TSA_UCC2: return 1;
+ case FSL_QE_TSA_UCC3: return 2;
+ case FSL_QE_TSA_UCC4: return 3;
+ case FSL_QE_TSA_UCC5: return 4;
+ default:
+ break;
+ }
+
+ dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
+ return -EINVAL;
+}
+
+int tsa_serial_get_num(struct tsa_serial *tsa_serial)
+{
+ struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
+
+ /*
+ * There is no need to get the serial num out of the TSA driver in the
+ * CPM case.
+ * Further more, in CPM, we can have 2 types of serial SCCs and FCCs.
+ * What kind of numbering to use that can be global to both SCCs and
+ * FCCs ?
+ */
+ return tsa_is_qe(tsa) ? tsa_qe_serial_get_num(tsa_serial) : -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(tsa_serial_get_num);
+
+static int tsa_cpm1_serial_connect(struct tsa_serial *tsa_serial, bool connect)
{
struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
unsigned long flags;
@@ -169,16 +273,16 @@ int tsa_serial_connect(struct tsa_serial *tsa_serial)
switch (tsa_serial->id) {
case FSL_CPM_TSA_SCC2:
- clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK);
- set = TSA_SICR_SCC2(TSA_SICR_SCC_SCX_TSA);
+ clear = TSA_CPM1_SICR_SCC2(TSA_CPM1_SICR_SCC_MASK);
+ set = TSA_CPM1_SICR_SCC2(TSA_CPM1_SICR_SCC_SCX_TSA);
break;
case FSL_CPM_TSA_SCC3:
- clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK);
- set = TSA_SICR_SCC3(TSA_SICR_SCC_SCX_TSA);
+ clear = TSA_CPM1_SICR_SCC3(TSA_CPM1_SICR_SCC_MASK);
+ set = TSA_CPM1_SICR_SCC3(TSA_CPM1_SICR_SCC_SCX_TSA);
break;
case FSL_CPM_TSA_SCC4:
- clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK);
- set = TSA_SICR_SCC4(TSA_SICR_SCC_SCX_TSA);
+ clear = TSA_CPM1_SICR_SCC4(TSA_CPM1_SICR_SCC_MASK);
+ set = TSA_CPM1_SICR_SCC4(TSA_CPM1_SICR_SCC_SCX_TSA);
break;
default:
dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
@@ -186,40 +290,53 @@ int tsa_serial_connect(struct tsa_serial *tsa_serial)
}
spin_lock_irqsave(&tsa->lock, flags);
- tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, set);
+ tsa_clrsetbits32(tsa->si_regs + TSA_CPM1_SICR, clear,
+ connect ? set : 0);
spin_unlock_irqrestore(&tsa->lock, flags);
return 0;
}
-EXPORT_SYMBOL(tsa_serial_connect);
-int tsa_serial_disconnect(struct tsa_serial *tsa_serial)
+static int tsa_qe_serial_connect(struct tsa_serial *tsa_serial, bool connect)
{
struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
unsigned long flags;
- u32 clear;
+ int ucc_num;
+ int ret;
- switch (tsa_serial->id) {
- case FSL_CPM_TSA_SCC2:
- clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK);
- break;
- case FSL_CPM_TSA_SCC3:
- clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK);
- break;
- case FSL_CPM_TSA_SCC4:
- clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK);
- break;
- default:
- dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
- return -EINVAL;
- }
+ ucc_num = tsa_qe_serial_get_num(tsa_serial);
+ if (ucc_num < 0)
+ return ucc_num;
spin_lock_irqsave(&tsa->lock, flags);
- tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, 0);
+ ret = ucc_set_qe_mux_tsa(ucc_num, connect);
spin_unlock_irqrestore(&tsa->lock, flags);
-
+ if (ret) {
+ dev_err(tsa->dev, "Connect serial id %u to TSA failed (%d)\n",
+ tsa_serial->id, ret);
+ return ret;
+ }
return 0;
}
+
+int tsa_serial_connect(struct tsa_serial *tsa_serial)
+{
+ struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
+
+ return tsa_is_qe(tsa) ?
+ tsa_qe_serial_connect(tsa_serial, true) :
+ tsa_cpm1_serial_connect(tsa_serial, true);
+}
+EXPORT_SYMBOL(tsa_serial_connect);
+
+int tsa_serial_disconnect(struct tsa_serial *tsa_serial)
+{
+ struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
+
+ return tsa_is_qe(tsa) ?
+ tsa_qe_serial_connect(tsa_serial, false) :
+ tsa_cpm1_serial_connect(tsa_serial, false);
+}
EXPORT_SYMBOL(tsa_serial_disconnect);
int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *info)
@@ -229,14 +346,14 @@ int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *i
}
EXPORT_SYMBOL(tsa_serial_get_info);
-static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
- u32 tdms, u32 tdm_id, bool is_rx)
+static void tsa_cpm1_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 tdms, u32 tdm_id, bool is_rx)
{
resource_size_t quarter;
resource_size_t half;
- quarter = tsa->si_ram_sz/4;
- half = tsa->si_ram_sz/2;
+ quarter = tsa->si_ram_sz / 4;
+ half = tsa->si_ram_sz / 2;
if (tdms == BIT(TSA_TDMA)) {
/* Only TDMA */
@@ -281,7 +398,42 @@ static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area
}
}
-static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id)
+static void tsa_qe_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 tdms, u32 tdm_id, bool is_rx)
+{
+ resource_size_t eighth;
+ resource_size_t half;
+
+ eighth = tsa->si_ram_sz / 8;
+ half = tsa->si_ram_sz / 2;
+
+ /*
+ * One half of the SI RAM used for Tx, the other one for Rx.
+ * In each half, 1/4 of the area is assigned to each TDM.
+ */
+ if (is_rx) {
+ /* Rx: Second half of si_ram */
+ area->entries_start = tsa->si_ram + half + (eighth * tdm_id);
+ area->entries_next = area->entries_start + eighth;
+ area->last_entry = NULL;
+ } else {
+ /* Tx: First half of si_ram */
+ area->entries_start = tsa->si_ram + (eighth * tdm_id);
+ area->entries_next = area->entries_start + eighth;
+ area->last_entry = NULL;
+ }
+}
+
+static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 tdms, u32 tdm_id, bool is_rx)
+{
+ if (tsa_is_qe(tsa))
+ tsa_qe_init_entries_area(tsa, area, tdms, tdm_id, is_rx);
+ else
+ tsa_cpm1_init_entries_area(tsa, area, tdms, tdm_id, is_rx);
+}
+
+static const char *tsa_cpm1_serial_id2name(struct tsa *tsa, u32 serial_id)
{
switch (serial_id) {
case FSL_CPM_TSA_NU: return "Not used";
@@ -296,24 +448,46 @@ static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id)
return NULL;
}
-static u32 tsa_serial_id2csel(struct tsa *tsa, u32 serial_id)
+static const char *tsa_qe_serial_id2name(struct tsa *tsa, u32 serial_id)
{
switch (serial_id) {
- case FSL_CPM_TSA_SCC2: return TSA_SIRAM_ENTRY_CSEL_SCC2;
- case FSL_CPM_TSA_SCC3: return TSA_SIRAM_ENTRY_CSEL_SCC3;
- case FSL_CPM_TSA_SCC4: return TSA_SIRAM_ENTRY_CSEL_SCC4;
- case FSL_CPM_TSA_SMC1: return TSA_SIRAM_ENTRY_CSEL_SMC1;
- case FSL_CPM_TSA_SMC2: return TSA_SIRAM_ENTRY_CSEL_SMC2;
+ case FSL_QE_TSA_NU: return "Not used";
+ case FSL_QE_TSA_UCC1: return "UCC1";
+ case FSL_QE_TSA_UCC2: return "UCC2";
+ case FSL_QE_TSA_UCC3: return "UCC3";
+ case FSL_QE_TSA_UCC4: return "UCC4";
+ case FSL_QE_TSA_UCC5: return "UCC5";
default:
break;
}
- return TSA_SIRAM_ENTRY_CSEL_NU;
+ return NULL;
}
-static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
- u32 count, u32 serial_id)
+static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id)
{
- void *__iomem addr;
+ return tsa_is_qe(tsa) ?
+ tsa_qe_serial_id2name(tsa, serial_id) :
+ tsa_cpm1_serial_id2name(tsa, serial_id);
+}
+
+static u32 tsa_cpm1_serial_id2csel(struct tsa *tsa, u32 serial_id)
+{
+ switch (serial_id) {
+ case FSL_CPM_TSA_SCC2: return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC2;
+ case FSL_CPM_TSA_SCC3: return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC3;
+ case FSL_CPM_TSA_SCC4: return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC4;
+ case FSL_CPM_TSA_SMC1: return TSA_CPM1_SIRAM_ENTRY_CSEL_SMC1;
+ case FSL_CPM_TSA_SMC2: return TSA_CPM1_SIRAM_ENTRY_CSEL_SMC2;
+ default:
+ break;
+ }
+ return TSA_CPM1_SIRAM_ENTRY_CSEL_NU;
+}
+
+static int tsa_cpm1_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 count, u32 serial_id)
+{
+ void __iomem *addr;
u32 left;
u32 val;
u32 cnt;
@@ -329,21 +503,21 @@ static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
if (area->last_entry) {
/* Clear last flag */
- tsa_clrbits32(area->last_entry, TSA_SIRAM_ENTRY_LAST);
+ tsa_clrbits32(area->last_entry, TSA_CPM1_SIRAM_ENTRY_LAST);
}
left = count;
while (left) {
- val = TSA_SIRAM_ENTRY_BYTE | tsa_serial_id2csel(tsa, serial_id);
+ val = TSA_CPM1_SIRAM_ENTRY_BYTE | tsa_cpm1_serial_id2csel(tsa, serial_id);
if (left > 16) {
cnt = 16;
} else {
cnt = left;
- val |= TSA_SIRAM_ENTRY_LAST;
+ val |= TSA_CPM1_SIRAM_ENTRY_LAST;
area->last_entry = addr;
}
- val |= TSA_SIRAM_ENTRY_CNT(cnt - 1);
+ val |= TSA_CPM1_SIRAM_ENTRY_CNT(cnt - 1);
tsa_write32(addr, val);
addr += 4;
@@ -353,6 +527,71 @@ static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
return 0;
}
+static u32 tsa_qe_serial_id2csel(struct tsa *tsa, u32 serial_id)
+{
+ switch (serial_id) {
+ case FSL_QE_TSA_UCC1: return TSA_QE_SIRAM_ENTRY_CSEL_UCC1;
+ case FSL_QE_TSA_UCC2: return TSA_QE_SIRAM_ENTRY_CSEL_UCC2;
+ case FSL_QE_TSA_UCC3: return TSA_QE_SIRAM_ENTRY_CSEL_UCC3;
+ case FSL_QE_TSA_UCC4: return TSA_QE_SIRAM_ENTRY_CSEL_UCC4;
+ case FSL_QE_TSA_UCC5: return TSA_QE_SIRAM_ENTRY_CSEL_UCC5;
+ default:
+ break;
+ }
+ return TSA_QE_SIRAM_ENTRY_CSEL_NU;
+}
+
+static int tsa_qe_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 count, u32 serial_id)
+{
+ void __iomem *addr;
+ u32 left;
+ u32 val;
+ u32 cnt;
+ u32 nb;
+
+ addr = area->last_entry ? area->last_entry + 2 : area->entries_start;
+
+ nb = DIV_ROUND_UP(count, 8);
+ if ((addr + (nb * 2)) > area->entries_next) {
+ dev_err(tsa->dev, "si ram area full\n");
+ return -ENOSPC;
+ }
+
+ if (area->last_entry) {
+ /* Clear last flag */
+ tsa_clrbits16(area->last_entry, TSA_QE_SIRAM_ENTRY_LAST);
+ }
+
+ left = count;
+ while (left) {
+ val = TSA_QE_SIRAM_ENTRY_BYTE | tsa_qe_serial_id2csel(tsa, serial_id);
+
+ if (left > 8) {
+ cnt = 8;
+ } else {
+ cnt = left;
+ val |= TSA_QE_SIRAM_ENTRY_LAST;
+ area->last_entry = addr;
+ }
+ val |= TSA_QE_SIRAM_ENTRY_CNT(cnt - 1);
+
+ tsa_write16(addr, val);
+ addr += 2;
+ left -= cnt;
+ }
+
+ return 0;
+}
+
+static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 count, u32 serial_id)
+{
+ return tsa_is_qe(tsa) ?
+ tsa_qe_add_entry(tsa, area, count, serial_id) :
+ tsa_cpm1_add_entry(tsa, area, count, serial_id);
+}
+
static int tsa_of_parse_tdm_route(struct tsa *tsa, struct device_node *tdm_np,
u32 tdms, u32 tdm_id, bool is_rx)
{
@@ -399,7 +638,7 @@ static int tsa_of_parse_tdm_route(struct tsa *tsa, struct device_node *tdm_np,
}
dev_dbg(tsa->dev, "tdm_id=%u, %s ts %u..%u -> %s\n",
- tdm_id, route_name, ts, ts+count-1, serial_name);
+ tdm_id, route_name, ts, ts + count - 1, serial_name);
ts += count;
ret = tsa_add_entry(tsa, &area, count, serial_id);
@@ -441,7 +680,6 @@ static inline int tsa_of_parse_tdm_tx_route(struct tsa *tsa,
static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
{
- struct device_node *tdm_np;
struct tsa_tdm *tdm;
struct clk *clk;
u32 tdm_id, val;
@@ -449,14 +687,13 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
int i;
tsa->tdms = 0;
- tsa->tdm[0].is_enable = false;
- tsa->tdm[1].is_enable = false;
+ for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++)
+ tsa->tdm[i].is_enable = false;
- for_each_available_child_of_node(np, tdm_np) {
+ for_each_available_child_of_node_scoped(np, tdm_np) {
ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
if (ret) {
dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
- of_node_put(tdm_np);
return ret;
}
switch (tdm_id) {
@@ -466,19 +703,28 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
case 1:
tsa->tdms |= BIT(TSA_TDMB);
break;
+ case 2:
+ if (!tsa_is_qe(tsa))
+ goto invalid_tdm; /* Not available on CPM1 */
+ tsa->tdms |= BIT(TSA_TDMC);
+ break;
+ case 3:
+ if (!tsa_is_qe(tsa))
+ goto invalid_tdm; /* Not available on CPM1 */
+ tsa->tdms |= BIT(TSA_TDMD);
+ break;
default:
+invalid_tdm:
dev_err(tsa->dev, "%pOF: Invalid tdm_id (%u)\n", tdm_np,
tdm_id);
- of_node_put(tdm_np);
return -EINVAL;
}
}
- for_each_available_child_of_node(np, tdm_np) {
+ for_each_available_child_of_node_scoped(np, tdm_np) {
ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
if (ret) {
dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
- of_node_put(tdm_np);
return ret;
}
@@ -492,14 +738,12 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
dev_err(tsa->dev,
"%pOF: failed to read fsl,rx-frame-sync-delay-bits\n",
tdm_np);
- of_node_put(tdm_np);
return ret;
}
if (val > 3) {
dev_err(tsa->dev,
"%pOF: Invalid fsl,rx-frame-sync-delay-bits (%u)\n",
tdm_np, val);
- of_node_put(tdm_np);
return -EINVAL;
}
tdm->simode_tdm |= TSA_SIMODE_TDM_RFSD(val);
@@ -511,14 +755,12 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
dev_err(tsa->dev,
"%pOF: failed to read fsl,tx-frame-sync-delay-bits\n",
tdm_np);
- of_node_put(tdm_np);
return ret;
}
if (val > 3) {
dev_err(tsa->dev,
"%pOF: Invalid fsl,tx-frame-sync-delay-bits (%u)\n",
tdm_np, val);
- of_node_put(tdm_np);
return -EINVAL;
}
tdm->simode_tdm |= TSA_SIMODE_TDM_TFSD(val);
@@ -532,85 +774,88 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
if (of_property_read_bool(tdm_np, "fsl,fsync-rising-edge"))
tdm->simode_tdm |= TSA_SIMODE_TDM_FE;
+ if (tsa_is_qe(tsa) &&
+ of_property_read_bool(tdm_np, "fsl,fsync-active-low"))
+ tdm->simode_tdm |= TSA_QE_SIMODE_TDM_SL;
+
if (of_property_read_bool(tdm_np, "fsl,double-speed-clock"))
tdm->simode_tdm |= TSA_SIMODE_TDM_DSC;
- clk = of_clk_get_by_name(tdm_np, "l1rsync");
+ clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "rsync" : "l1rsync");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- of_node_put(tdm_np);
goto err;
}
ret = clk_prepare_enable(clk);
if (ret) {
clk_put(clk);
- of_node_put(tdm_np);
goto err;
}
tdm->l1rsync_clk = clk;
- clk = of_clk_get_by_name(tdm_np, "l1rclk");
+ clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "rclk" : "l1rclk");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- of_node_put(tdm_np);
goto err;
}
ret = clk_prepare_enable(clk);
if (ret) {
clk_put(clk);
- of_node_put(tdm_np);
goto err;
}
tdm->l1rclk_clk = clk;
if (!(tdm->simode_tdm & TSA_SIMODE_TDM_CRT)) {
- clk = of_clk_get_by_name(tdm_np, "l1tsync");
+ clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "tsync" : "l1tsync");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- of_node_put(tdm_np);
goto err;
}
ret = clk_prepare_enable(clk);
if (ret) {
clk_put(clk);
- of_node_put(tdm_np);
goto err;
}
tdm->l1tsync_clk = clk;
- clk = of_clk_get_by_name(tdm_np, "l1tclk");
+ clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "tclk" : "l1tclk");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- of_node_put(tdm_np);
goto err;
}
ret = clk_prepare_enable(clk);
if (ret) {
clk_put(clk);
- of_node_put(tdm_np);
goto err;
}
tdm->l1tclk_clk = clk;
}
+ if (tsa_is_qe(tsa)) {
+ /*
+ * The starting address for TSA table must be set.
+ * 512 entries for Tx and 512 entries for Rx are
+ * available for 4 TDMs.
+ * We assign entries equally -> 128 Rx/Tx entries per
+ * TDM. In other words, 4 blocks of 32 entries per TDM.
+ */
+ tdm->simode_tdm |= TSA_QE_SIMODE_TDM_SAD(4 * tdm_id);
+ }
+
ret = tsa_of_parse_tdm_rx_route(tsa, tdm_np, tsa->tdms, tdm_id);
- if (ret) {
- of_node_put(tdm_np);
+ if (ret)
goto err;
- }
ret = tsa_of_parse_tdm_tx_route(tsa, tdm_np, tsa->tdms, tdm_id);
- if (ret) {
- of_node_put(tdm_np);
+ if (ret)
goto err;
- }
tdm->is_enable = true;
}
return 0;
err:
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
if (tsa->tdm[i].l1rsync_clk) {
clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
clk_put(tsa->tdm[i].l1rsync_clk);
@@ -636,8 +881,87 @@ static void tsa_init_si_ram(struct tsa *tsa)
resource_size_t i;
/* Fill all entries as the last one */
- for (i = 0; i < tsa->si_ram_sz; i += 4)
- tsa_write32(tsa->si_ram + i, TSA_SIRAM_ENTRY_LAST);
+ if (tsa_is_qe(tsa)) {
+ for (i = 0; i < tsa->si_ram_sz; i += 2)
+ tsa_write16(tsa->si_ram + i, TSA_QE_SIRAM_ENTRY_LAST);
+ } else {
+ for (i = 0; i < tsa->si_ram_sz; i += 4)
+ tsa_write32(tsa->si_ram + i, TSA_CPM1_SIRAM_ENTRY_LAST);
+ }
+}
+
+static int tsa_cpm1_setup(struct tsa *tsa)
+{
+ u32 val;
+
+ /* Set SIMODE */
+ val = 0;
+ if (tsa->tdm[0].is_enable)
+ val |= TSA_CPM1_SIMODE_TDMA(tsa->tdm[0].simode_tdm);
+ if (tsa->tdm[1].is_enable)
+ val |= TSA_CPM1_SIMODE_TDMB(tsa->tdm[1].simode_tdm);
+
+ tsa_clrsetbits32(tsa->si_regs + TSA_CPM1_SIMODE,
+ TSA_CPM1_SIMODE_TDMA(TSA_CPM1_SIMODE_TDM_MASK) |
+ TSA_CPM1_SIMODE_TDMB(TSA_CPM1_SIMODE_TDM_MASK),
+ val);
+
+ /* Set SIGMR */
+ val = (tsa->tdms == BIT(TSA_TDMA)) ?
+ TSA_CPM1_SIGMR_RDM_STATIC_TDMA : TSA_CPM1_SIGMR_RDM_STATIC_TDMAB;
+ if (tsa->tdms & BIT(TSA_TDMA))
+ val |= TSA_CPM1_SIGMR_ENA;
+ if (tsa->tdms & BIT(TSA_TDMB))
+ val |= TSA_CPM1_SIGMR_ENB;
+ tsa_write8(tsa->si_regs + TSA_CPM1_SIGMR, val);
+
+ return 0;
+}
+
+static int tsa_qe_setup(struct tsa *tsa)
+{
+ unsigned int sixmr;
+ u8 siglmrh = 0;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
+ if (!tsa->tdm[i].is_enable)
+ continue;
+
+ switch (i) {
+ case 0:
+ sixmr = TSA_QE_SIAMR;
+ siglmrh |= TSA_QE_SIGLMRH_ENA;
+ break;
+ case 1:
+ sixmr = TSA_QE_SIBMR;
+ siglmrh |= TSA_QE_SIGLMRH_ENB;
+ break;
+ case 2:
+ sixmr = TSA_QE_SICMR;
+ siglmrh |= TSA_QE_SIGLMRH_ENC;
+ break;
+ case 3:
+ sixmr = TSA_QE_SIDMR;
+ siglmrh |= TSA_QE_SIGLMRH_END;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set SI mode register */
+ tsa_write16(tsa->si_regs + sixmr, tsa->tdm[i].simode_tdm);
+ }
+
+ /* Enable TDMs */
+ tsa_write8(tsa->si_regs + TSA_QE_SIGLMRH, siglmrh);
+
+ return 0;
+}
+
+static int tsa_setup(struct tsa *tsa)
+{
+ return tsa_is_qe(tsa) ? tsa_qe_setup(tsa) : tsa_cpm1_setup(tsa);
}
static int tsa_probe(struct platform_device *pdev)
@@ -646,7 +970,6 @@ static int tsa_probe(struct platform_device *pdev)
struct resource *res;
struct tsa *tsa;
unsigned int i;
- u32 val;
int ret;
tsa = devm_kzalloc(&pdev->dev, sizeof(*tsa), GFP_KERNEL);
@@ -654,6 +977,18 @@ static int tsa_probe(struct platform_device *pdev)
return -ENOMEM;
tsa->dev = &pdev->dev;
+ tsa->version = (enum tsa_version)(uintptr_t)of_device_get_match_data(&pdev->dev);
+ switch (tsa->version) {
+ case TSA_CPM1:
+ dev_info(tsa->dev, "CPM1 version\n");
+ break;
+ case TSA_QE:
+ dev_info(tsa->dev, "QE version\n");
+ break;
+ default:
+ dev_err(tsa->dev, "Unknown version (%d)\n", tsa->version);
+ return -EINVAL;
+ }
for (i = 0; i < ARRAY_SIZE(tsa->serials); i++)
tsa->serials[i].id = i;
@@ -680,38 +1015,21 @@ static int tsa_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Set SIMODE */
- val = 0;
- if (tsa->tdm[0].is_enable)
- val |= TSA_SIMODE_TDMA(tsa->tdm[0].simode_tdm);
- if (tsa->tdm[1].is_enable)
- val |= TSA_SIMODE_TDMB(tsa->tdm[1].simode_tdm);
-
- tsa_clrsetbits32(tsa->si_regs + TSA_SIMODE,
- TSA_SIMODE_TDMA(TSA_SIMODE_TDM_MASK) |
- TSA_SIMODE_TDMB(TSA_SIMODE_TDM_MASK),
- val);
-
- /* Set SIGMR */
- val = (tsa->tdms == BIT(TSA_TDMA)) ?
- TSA_SIGMR_RDM_STATIC_TDMA : TSA_SIGMR_RDM_STATIC_TDMAB;
- if (tsa->tdms & BIT(TSA_TDMA))
- val |= TSA_SIGMR_ENA;
- if (tsa->tdms & BIT(TSA_TDMB))
- val |= TSA_SIGMR_ENB;
- tsa_write8(tsa->si_regs + TSA_SIGMR, val);
+ ret = tsa_setup(tsa);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, tsa);
return 0;
}
-static int tsa_remove(struct platform_device *pdev)
+static void tsa_remove(struct platform_device *pdev)
{
struct tsa *tsa = platform_get_drvdata(pdev);
int i;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
if (tsa->tdm[i].l1rsync_clk) {
clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
clk_put(tsa->tdm[i].l1rsync_clk);
@@ -729,11 +1047,15 @@ static int tsa_remove(struct platform_device *pdev)
clk_put(tsa->tdm[i].l1rclk_clk);
}
}
- return 0;
}
static const struct of_device_id tsa_id_table[] = {
- { .compatible = "fsl,cpm1-tsa" },
+#if IS_ENABLED(CONFIG_CPM1)
+ { .compatible = "fsl,cpm1-tsa", .data = (void *)TSA_CPM1 },
+#endif
+#if IS_ENABLED(CONFIG_QUICC_ENGINE)
+ { .compatible = "fsl,qe-tsa", .data = (void *)TSA_QE },
+#endif
{} /* sentinel */
};
MODULE_DEVICE_TABLE(of, tsa_id_table);
@@ -842,5 +1164,5 @@ struct tsa_serial *devm_tsa_serial_get_byphandle(struct device *dev,
EXPORT_SYMBOL(devm_tsa_serial_get_byphandle);
MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
-MODULE_DESCRIPTION("CPM TSA driver");
+MODULE_DESCRIPTION("CPM/QE TSA driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/soc/fsl/qe/tsa.h b/drivers/soc/fsl/qe/tsa.h
index d9df89b6da3e..da137bc0f49b 100644
--- a/drivers/soc/fsl/qe/tsa.h
+++ b/drivers/soc/fsl/qe/tsa.h
@@ -39,4 +39,7 @@ struct tsa_serial_info {
/* Get information */
int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *info);
+/* Get serial number */
+int tsa_serial_get_num(struct tsa_serial *tsa_serial);
+
#endif /* __SOC_FSL_TSA_H__ */
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
index 21dbcd787cd5..892aa5931d5b 100644
--- a/drivers/soc/fsl/qe/ucc.c
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -114,6 +114,7 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
return 0;
}
+EXPORT_SYMBOL(ucc_mux_set_grant_tsa_bkpt);
int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
enum comm_dir mode)
diff --git a/drivers/soc/fsl/rcpm.c b/drivers/soc/fsl/rcpm.c
index 3d0cae30c769..06bd94b29fb3 100644
--- a/drivers/soc/fsl/rcpm.c
+++ b/drivers/soc/fsl/rcpm.c
@@ -36,6 +36,7 @@ static void copy_ippdexpcr1_setting(u32 val)
return;
regs = of_iomap(np, 0);
+ of_node_put(np);
if (!regs)
return;
diff --git a/drivers/soc/fujitsu/a64fx-diag.c b/drivers/soc/fujitsu/a64fx-diag.c
index 524fbfeb94e3..76cb0b6a221c 100644
--- a/drivers/soc/fujitsu/a64fx-diag.c
+++ b/drivers/soc/fujitsu/a64fx-diag.c
@@ -116,7 +116,7 @@ static int a64fx_diag_probe(struct platform_device *pdev)
return 0;
}
-static int a64fx_diag_remove(struct platform_device *pdev)
+static void a64fx_diag_remove(struct platform_device *pdev)
{
struct a64fx_diag_priv *priv = platform_get_drvdata(pdev);
@@ -127,8 +127,6 @@ static int a64fx_diag_remove(struct platform_device *pdev)
free_nmi(priv->irq, NULL);
else
free_irq(priv->irq, NULL);
-
- return 0;
}
static const struct acpi_device_id a64fx_diag_acpi_match[] = {
diff --git a/drivers/soc/hisilicon/Kconfig b/drivers/soc/hisilicon/Kconfig
index 0ab688af308f..6d7c244d2e78 100644
--- a/drivers/soc/hisilicon/Kconfig
+++ b/drivers/soc/hisilicon/Kconfig
@@ -6,16 +6,19 @@ menu "Hisilicon SoC drivers"
config KUNPENG_HCCS
tristate "HCCS driver on Kunpeng SoC"
depends on ACPI
- depends on MAILBOX
+ depends on PCC
depends on ARM64 || COMPILE_TEST
help
The Huawei Cache Coherence System (HCCS) is a multi-chip
interconnection bus protocol.
The performance of application may be affected if some HCCS
ports are not in full lane status, have a large number of CRC
- errors and so on.
+ errors and so on. This may support for reducing system power
+ consumption if there are HCCS ports supported low power feature
+ on platform.
Say M here if you want to include support for querying the
- health status and port information of HCCS on Kunpeng SoC.
+ health status and port information of HCCS, or reducing system
+ power consumption on Kunpeng SoC.
endmenu
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.c b/drivers/soc/hisilicon/kunpeng_hccs.c
index f3810d9d1caa..006fec47ea10 100644
--- a/drivers/soc/hisilicon/kunpeng_hccs.c
+++ b/drivers/soc/hisilicon/kunpeng_hccs.c
@@ -21,20 +21,27 @@
* - if all enabled ports are in linked
* - if all linked ports are in full lane
* - CRC error count sum
+ *
+ * - Retrieve all HCCS types used on the platform.
+ *
+ * - Support low power feature for all specified HCCS type ports, and
+ * provide the following interface:
+ * - query HCCS types supported increasing and decreasing lane number.
+ * - decrease lane number of all specified HCCS type ports on idle state.
+ * - increase lane number of all specified HCCS type ports.
*/
#include <linux/acpi.h>
+#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/platform_device.h>
+#include <linux/stringify.h>
#include <linux/sysfs.h>
+#include <linux/types.h>
#include <acpi/pcc.h>
#include "kunpeng_hccs.h"
-/* PCC defines */
-#define HCCS_PCC_SIGNATURE_MASK 0x50434300
-#define HCCS_PCC_STATUS_CMD_COMPLETE BIT(0)
-
/*
* Arbitrary retries in case the remote processor is slow to respond
* to PCC commands
@@ -57,6 +64,42 @@ static struct hccs_chip_info *kobj_to_chip_info(struct kobject *k)
return container_of(k, struct hccs_chip_info, kobj);
}
+static struct hccs_dev *device_kobj_to_hccs_dev(struct kobject *k)
+{
+ struct device *dev = container_of(k, struct device, kobj);
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+
+ return platform_get_drvdata(pdev);
+}
+
+static char *hccs_port_type_to_name(struct hccs_dev *hdev, u8 type)
+{
+ u16 i;
+
+ for (i = 0; i < hdev->used_type_num; i++) {
+ if (hdev->type_name_maps[i].type == type)
+ return hdev->type_name_maps[i].name;
+ }
+
+ return NULL;
+}
+
+static int hccs_name_to_port_type(struct hccs_dev *hdev,
+ const char *name, u8 *type)
+{
+ u16 i;
+
+ for (i = 0; i < hdev->used_type_num; i++) {
+ if (strcmp(hdev->type_name_maps[i].name, name) == 0) {
+ *type = hdev->type_name_maps[i].type;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
struct hccs_register_ctx {
struct device *dev;
u8 chan_id;
@@ -89,8 +132,10 @@ static int hccs_get_pcc_chan_id(struct hccs_dev *hdev)
struct hccs_register_ctx ctx = {0};
acpi_status status;
- if (!acpi_has_method(handle, METHOD_NAME__CRS))
+ if (!acpi_has_method(handle, METHOD_NAME__CRS)) {
+ dev_err(hdev->dev, "No _CRS method.\n");
return -ENODEV;
+ }
ctx.dev = hdev->dev;
status = acpi_walk_resources(handle, METHOD_NAME__CRS,
@@ -112,12 +157,16 @@ static void hccs_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
*(u8 *)msg, ret);
}
-static void hccs_unregister_pcc_channel(struct hccs_dev *hdev)
+static void hccs_pcc_rx_callback(struct mbox_client *cl, void *mssg)
{
- struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
+ struct hccs_mbox_client_info *cl_info =
+ container_of(cl, struct hccs_mbox_client_info, client);
+
+ complete(&cl_info->done);
+}
- if (cl_info->pcc_comm_addr)
- iounmap(cl_info->pcc_comm_addr);
+static void hccs_unregister_pcc_channel(struct hccs_dev *hdev)
+{
pcc_mbox_free_channel(hdev->cl_info.pcc_chan);
}
@@ -126,6 +175,7 @@ static int hccs_register_pcc_channel(struct hccs_dev *hdev)
struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
struct mbox_client *cl = &cl_info->client;
struct pcc_mbox_chan *pcc_chan;
+ struct mbox_chan *mbox_chan;
struct device *dev = hdev->dev;
int rc;
@@ -133,14 +183,17 @@ static int hccs_register_pcc_channel(struct hccs_dev *hdev)
cl->tx_block = false;
cl->knows_txdone = true;
cl->tx_done = hccs_chan_tx_done;
+ cl->rx_callback = hdev->verspec_data->rx_callback;
+ init_completion(&cl_info->done);
+
pcc_chan = pcc_mbox_request_channel(cl, hdev->chan_id);
if (IS_ERR(pcc_chan)) {
- dev_err(dev, "PPC channel request failed.\n");
+ dev_err(dev, "PCC channel request failed.\n");
rc = -ENODEV;
goto out;
}
cl_info->pcc_chan = pcc_chan;
- cl_info->mbox_chan = pcc_chan->mchan;
+ mbox_chan = pcc_chan->mchan;
/*
* pcc_chan->latency is just a nominal value. In reality the remote
@@ -149,21 +202,23 @@ static int hccs_register_pcc_channel(struct hccs_dev *hdev)
*/
cl_info->deadline_us =
HCCS_PCC_CMD_WAIT_RETRIES_NUM * pcc_chan->latency;
- if (cl_info->mbox_chan->mbox->txdone_irq) {
+ if (!hdev->verspec_data->has_txdone_irq &&
+ mbox_chan->mbox->txdone_irq) {
dev_err(dev, "PCC IRQ in PCCT is enabled.\n");
rc = -EINVAL;
goto err_mbx_channel_free;
+ } else if (hdev->verspec_data->has_txdone_irq &&
+ !mbox_chan->mbox->txdone_irq) {
+ dev_err(dev, "PCC IRQ in PCCT isn't supported.\n");
+ rc = -EINVAL;
+ goto err_mbx_channel_free;
}
- if (pcc_chan->shmem_base_addr) {
- cl_info->pcc_comm_addr = ioremap(pcc_chan->shmem_base_addr,
- pcc_chan->shmem_size);
- if (!cl_info->pcc_comm_addr) {
- dev_err(dev, "Failed to ioremap PCC communication region for channel-%d.\n",
- hdev->chan_id);
- rc = -ENOMEM;
- goto err_mbx_channel_free;
- }
+ if (pcc_chan->shmem_size != HCCS_PCC_SHARE_MEM_BYTES) {
+ dev_err(dev, "Base size (%llu) of PCC communication region must be %d bytes.\n",
+ pcc_chan->shmem_size, HCCS_PCC_SHARE_MEM_BYTES);
+ rc = -EINVAL;
+ goto err_mbx_channel_free;
}
return 0;
@@ -174,11 +229,11 @@ out:
return rc;
}
-static int hccs_check_chan_cmd_complete(struct hccs_dev *hdev)
+static int hccs_wait_cmd_complete_by_poll(struct hccs_dev *hdev)
{
struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
struct acpi_pcct_shared_memory __iomem *comm_base =
- cl_info->pcc_comm_addr;
+ cl_info->pcc_chan->shmem;
u16 status;
int ret;
@@ -187,7 +242,7 @@ static int hccs_check_chan_cmd_complete(struct hccs_dev *hdev)
* deadline_us(timeout_us) until PCC command complete bit is set(cond)
*/
ret = readw_poll_timeout(&comm_base->status, status,
- status & HCCS_PCC_STATUS_CMD_COMPLETE,
+ status & PCC_STATUS_CMD_COMPLETE,
HCCS_POLL_STATUS_TIME_INTERVAL_US,
cl_info->deadline_us);
if (unlikely(ret))
@@ -196,46 +251,90 @@ static int hccs_check_chan_cmd_complete(struct hccs_dev *hdev)
return ret;
}
+static int hccs_wait_cmd_complete_by_irq(struct hccs_dev *hdev)
+{
+ struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
+
+ if (!wait_for_completion_timeout(&cl_info->done,
+ usecs_to_jiffies(cl_info->deadline_us))) {
+ dev_err(hdev->dev, "PCC command executed timeout!\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static inline void hccs_fill_pcc_shared_mem_region(struct hccs_dev *hdev,
+ u8 cmd,
+ struct hccs_desc *desc,
+ void __iomem *comm_space,
+ u16 space_size)
+{
+ struct acpi_pcct_shared_memory tmp = {
+ .signature = PCC_SIGNATURE | hdev->chan_id,
+ .command = cmd,
+ .status = 0,
+ };
+
+ memcpy_toio(hdev->cl_info.pcc_chan->shmem, (void *)&tmp,
+ sizeof(struct acpi_pcct_shared_memory));
+
+ /* Copy the message to the PCC comm space */
+ memcpy_toio(comm_space, (void *)desc, space_size);
+}
+
+static inline void hccs_fill_ext_pcc_shared_mem_region(struct hccs_dev *hdev,
+ u8 cmd,
+ struct hccs_desc *desc,
+ void __iomem *comm_space,
+ u16 space_size)
+{
+ struct acpi_pcct_ext_pcc_shared_memory tmp = {
+ .signature = PCC_SIGNATURE | hdev->chan_id,
+ .flags = PCC_CMD_COMPLETION_NOTIFY,
+ .length = HCCS_PCC_SHARE_MEM_BYTES,
+ .command = cmd,
+ };
+
+ memcpy_toio(hdev->cl_info.pcc_chan->shmem, (void *)&tmp,
+ sizeof(struct acpi_pcct_ext_pcc_shared_memory));
+
+ /* Copy the message to the PCC comm space */
+ memcpy_toio(comm_space, (void *)desc, space_size);
+}
+
static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
struct hccs_desc *desc)
{
+ const struct hccs_verspecific_data *verspec_data = hdev->verspec_data;
struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
- void __iomem *comm_space = cl_info->pcc_comm_addr +
- sizeof(struct acpi_pcct_shared_memory);
+ struct mbox_chan *mbox_chan = cl_info->pcc_chan->mchan;
struct hccs_fw_inner_head *fw_inner_head;
- struct acpi_pcct_shared_memory tmp = {0};
- u16 comm_space_size;
+ void __iomem *comm_space;
+ u16 space_size;
int ret;
- /* Write signature for this subspace */
- tmp.signature = HCCS_PCC_SIGNATURE_MASK | hdev->chan_id;
- /* Write to the shared command region */
- tmp.command = cmd;
- /* Clear cmd complete bit */
- tmp.status = 0;
- memcpy_toio(cl_info->pcc_comm_addr, (void *)&tmp,
- sizeof(struct acpi_pcct_shared_memory));
-
- /* Copy the message to the PCC comm space */
- comm_space_size = HCCS_PCC_SHARE_MEM_BYTES -
- sizeof(struct acpi_pcct_shared_memory);
- memcpy_toio(comm_space, (void *)desc, comm_space_size);
+ comm_space = cl_info->pcc_chan->shmem + verspec_data->shared_mem_size;
+ space_size = HCCS_PCC_SHARE_MEM_BYTES - verspec_data->shared_mem_size;
+ verspec_data->fill_pcc_shared_mem(hdev, cmd, desc,
+ comm_space, space_size);
+ if (verspec_data->has_txdone_irq)
+ reinit_completion(&cl_info->done);
/* Ring doorbell */
- ret = mbox_send_message(cl_info->mbox_chan, &cmd);
+ ret = mbox_send_message(mbox_chan, &cmd);
if (ret < 0) {
dev_err(hdev->dev, "Send PCC mbox message failed, ret = %d.\n",
ret);
goto end;
}
- /* Wait for completion */
- ret = hccs_check_chan_cmd_complete(hdev);
+ ret = verspec_data->wait_cmd_complete(hdev);
if (ret)
goto end;
/* Copy response data */
- memcpy_fromio((void *)desc, comm_space, comm_space_size);
+ memcpy_fromio((void *)desc, comm_space, space_size);
fw_inner_head = &desc->rsp.fw_inner_head;
if (fw_inner_head->retStatus) {
dev_err(hdev->dev, "Execute PCC command failed, error code = %u.\n",
@@ -244,7 +343,10 @@ static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
}
end:
- mbox_client_txdone(cl_info->mbox_chan, ret);
+ if (verspec_data->has_txdone_irq)
+ mbox_chan_txdone(mbox_chan, ret);
+ else
+ mbox_client_txdone(mbox_chan, ret);
return ret;
}
@@ -390,6 +492,7 @@ static int hccs_query_all_die_info_on_platform(struct hccs_dev *hdev)
struct device *dev = hdev->dev;
struct hccs_chip_info *chip;
struct hccs_die_info *die;
+ bool has_die_info = false;
u8 i, j;
int ret;
@@ -398,6 +501,7 @@ static int hccs_query_all_die_info_on_platform(struct hccs_dev *hdev)
if (!chip->die_num)
continue;
+ has_die_info = true;
chip->dies = devm_kzalloc(hdev->dev,
chip->die_num * sizeof(struct hccs_die_info),
GFP_KERNEL);
@@ -419,7 +523,7 @@ static int hccs_query_all_die_info_on_platform(struct hccs_dev *hdev)
}
}
- return 0;
+ return has_die_info ? 0 : -EINVAL;
}
static int hccs_get_bd_info(struct hccs_dev *hdev, u8 opcode,
@@ -495,6 +599,12 @@ static int hccs_get_all_port_attr(struct hccs_dev *hdev,
start_id = rsp_head.next_id;
}
+ if (left_buf_len != 0) {
+ dev_err(hdev->dev, "failed to get the expected port number(%u) attribute.\n",
+ size);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -519,7 +629,7 @@ static int hccs_get_all_port_info_on_die(struct hccs_dev *hdev,
port = &die->ports[i];
port->port_id = attrs[i].port_id;
port->port_type = attrs[i].port_type;
- port->lane_mode = attrs[i].lane_mode;
+ port->max_lane_num = attrs[i].max_lane_num;
port->enable = attrs[i].enable;
port->die = die;
}
@@ -531,10 +641,10 @@ out:
static int hccs_query_all_port_info_on_platform(struct hccs_dev *hdev)
{
-
struct device *dev = hdev->dev;
struct hccs_chip_info *chip;
struct hccs_die_info *die;
+ bool has_port_info = false;
u8 i, j;
int ret;
@@ -545,6 +655,7 @@ static int hccs_query_all_port_info_on_platform(struct hccs_dev *hdev)
if (!die->port_num)
continue;
+ has_port_info = true;
die->ports = devm_kzalloc(dev,
die->port_num * sizeof(struct hccs_port_info),
GFP_KERNEL);
@@ -563,7 +674,7 @@ static int hccs_query_all_port_info_on_platform(struct hccs_dev *hdev)
}
}
- return 0;
+ return has_port_info ? 0 : -EINVAL;
}
static int hccs_get_hw_info(struct hccs_dev *hdev)
@@ -594,6 +705,55 @@ static int hccs_get_hw_info(struct hccs_dev *hdev)
return 0;
}
+static u16 hccs_calc_used_type_num(struct hccs_dev *hdev,
+ unsigned long *hccs_ver)
+{
+ struct hccs_chip_info *chip;
+ struct hccs_port_info *port;
+ struct hccs_die_info *die;
+ u16 used_type_num = 0;
+ u16 i, j, k;
+
+ for (i = 0; i < hdev->chip_num; i++) {
+ chip = &hdev->chips[i];
+ for (j = 0; j < chip->die_num; j++) {
+ die = &chip->dies[j];
+ for (k = 0; k < die->port_num; k++) {
+ port = &die->ports[k];
+ set_bit(port->port_type, hccs_ver);
+ }
+ }
+ }
+
+ for_each_set_bit(i, hccs_ver, HCCS_IP_MAX + 1)
+ used_type_num++;
+
+ return used_type_num;
+}
+
+static int hccs_init_type_name_maps(struct hccs_dev *hdev)
+{
+ DECLARE_BITMAP(hccs_ver, HCCS_IP_MAX + 1) = {};
+ unsigned int i;
+ u16 idx = 0;
+
+ hdev->used_type_num = hccs_calc_used_type_num(hdev, hccs_ver);
+ hdev->type_name_maps = devm_kcalloc(hdev->dev, hdev->used_type_num,
+ sizeof(struct hccs_type_name_map),
+ GFP_KERNEL);
+ if (!hdev->type_name_maps)
+ return -ENOMEM;
+
+ for_each_set_bit(i, hccs_ver, HCCS_IP_MAX + 1) {
+ hdev->type_name_maps[idx].type = i;
+ sprintf(hdev->type_name_maps[idx].name,
+ "%s%u", HCCS_IP_PREFIX, i);
+ idx++;
+ }
+
+ return 0;
+}
+
static int hccs_query_port_link_status(struct hccs_dev *hdev,
const struct hccs_port_info *port,
struct hccs_link_status *link_status)
@@ -754,7 +914,7 @@ static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
{
const struct hccs_port_info *port = kobj_to_port_info(kobj);
- return sysfs_emit(buf, "HCCS-v%u\n", port->port_type);
+ return sysfs_emit(buf, "%s%u\n", HCCS_IP_PREFIX, port->port_type);
}
static struct kobj_attribute hccs_type_attr = __ATTR_RO(type);
@@ -763,7 +923,7 @@ static ssize_t lane_mode_show(struct kobject *kobj, struct kobj_attribute *attr,
{
const struct hccs_port_info *port = kobj_to_port_info(kobj);
- return sysfs_emit(buf, "x%u\n", port->lane_mode);
+ return sysfs_emit(buf, "x%u\n", port->max_lane_num);
}
static struct kobj_attribute lane_mode_attr = __ATTR_RO(lane_mode);
@@ -1058,6 +1218,372 @@ static const struct kobj_type hccs_chip_type = {
.default_groups = hccs_chip_default_groups,
};
+static int hccs_parse_pm_port_type(struct hccs_dev *hdev, const char *buf,
+ u8 *port_type)
+{
+ char hccs_name[HCCS_NAME_MAX_LEN + 1] = "";
+ u8 type;
+ int ret;
+
+ ret = sscanf(buf, "%" __stringify(HCCS_NAME_MAX_LEN) "s", hccs_name);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = hccs_name_to_port_type(hdev, hccs_name, &type);
+ if (ret) {
+ dev_dbg(hdev->dev, "input invalid, please get the available types from 'used_types'.\n");
+ return ret;
+ }
+
+ if (type == HCCS_V2 && hdev->caps & HCCS_CAPS_HCCS_V2_PM) {
+ *port_type = type;
+ return 0;
+ }
+
+ dev_dbg(hdev->dev, "%s doesn't support for increasing and decreasing lane.\n",
+ hccs_name);
+
+ return -EOPNOTSUPP;
+}
+
+static int hccs_query_port_idle_status(struct hccs_dev *hdev,
+ struct hccs_port_info *port, u8 *idle)
+{
+ const struct hccs_die_info *die = port->die;
+ const struct hccs_chip_info *chip = die->chip;
+ struct hccs_port_comm_req_param *req_param;
+ struct hccs_desc desc;
+ int ret;
+
+ hccs_init_req_desc(&desc);
+ req_param = (struct hccs_port_comm_req_param *)desc.req.data;
+ req_param->chip_id = chip->chip_id;
+ req_param->die_id = die->die_id;
+ req_param->port_id = port->port_id;
+ ret = hccs_pcc_cmd_send(hdev, HCCS_GET_PORT_IDLE_STATUS, &desc);
+ if (ret) {
+ dev_err(hdev->dev,
+ "get port idle status failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ *idle = *((u8 *)desc.rsp.data);
+ return 0;
+}
+
+static int hccs_get_all_spec_port_idle_sta(struct hccs_dev *hdev, u8 port_type,
+ bool *all_idle)
+{
+ struct hccs_chip_info *chip;
+ struct hccs_port_info *port;
+ struct hccs_die_info *die;
+ int ret = 0;
+ u8 i, j, k;
+ u8 idle;
+
+ *all_idle = false;
+ for (i = 0; i < hdev->chip_num; i++) {
+ chip = &hdev->chips[i];
+ for (j = 0; j < chip->die_num; j++) {
+ die = &chip->dies[j];
+ for (k = 0; k < die->port_num; k++) {
+ port = &die->ports[k];
+ if (port->port_type != port_type)
+ continue;
+ ret = hccs_query_port_idle_status(hdev, port,
+ &idle);
+ if (ret) {
+ dev_err(hdev->dev,
+ "hccs%u on chip%u/die%u get idle status failed, ret = %d.\n",
+ port->port_id, chip->chip_id, die->die_id, ret);
+ return ret;
+ } else if (idle == 0) {
+ dev_info(hdev->dev, "hccs%u on chip%u/die%u is busy.\n",
+ port->port_id, chip->chip_id, die->die_id);
+ return 0;
+ }
+ }
+ }
+ }
+ *all_idle = true;
+
+ return 0;
+}
+
+static int hccs_get_all_spec_port_full_lane_sta(struct hccs_dev *hdev,
+ u8 port_type, bool *full_lane)
+{
+ struct hccs_link_status status = {0};
+ struct hccs_chip_info *chip;
+ struct hccs_port_info *port;
+ struct hccs_die_info *die;
+ u8 i, j, k;
+ int ret;
+
+ *full_lane = false;
+ for (i = 0; i < hdev->chip_num; i++) {
+ chip = &hdev->chips[i];
+ for (j = 0; j < chip->die_num; j++) {
+ die = &chip->dies[j];
+ for (k = 0; k < die->port_num; k++) {
+ port = &die->ports[k];
+ if (port->port_type != port_type)
+ continue;
+ ret = hccs_query_port_link_status(hdev, port,
+ &status);
+ if (ret)
+ return ret;
+ if (status.lane_num != port->max_lane_num)
+ return 0;
+ }
+ }
+ }
+ *full_lane = true;
+
+ return 0;
+}
+
+static int hccs_prepare_inc_lane(struct hccs_dev *hdev, u8 type)
+{
+ struct hccs_inc_lane_req_param *req_param;
+ struct hccs_desc desc;
+ int ret;
+
+ hccs_init_req_desc(&desc);
+ req_param = (struct hccs_inc_lane_req_param *)desc.req.data;
+ req_param->port_type = type;
+ req_param->opt_type = HCCS_PREPARE_INC_LANE;
+ ret = hccs_pcc_cmd_send(hdev, HCCS_PM_INC_LANE, &desc);
+ if (ret)
+ dev_err(hdev->dev, "prepare for increasing lane failed, ret = %d.\n",
+ ret);
+
+ return ret;
+}
+
+static int hccs_wait_serdes_adapt_completed(struct hccs_dev *hdev, u8 type)
+{
+#define HCCS_MAX_WAIT_CNT_FOR_ADAPT 10
+#define HCCS_QUERY_ADAPT_RES_DELAY_MS 100
+#define HCCS_SERDES_ADAPT_OK 0
+
+ struct hccs_inc_lane_req_param *req_param;
+ u8 wait_cnt = HCCS_MAX_WAIT_CNT_FOR_ADAPT;
+ struct hccs_desc desc;
+ u8 adapt_res;
+ int ret;
+
+ do {
+ hccs_init_req_desc(&desc);
+ req_param = (struct hccs_inc_lane_req_param *)desc.req.data;
+ req_param->port_type = type;
+ req_param->opt_type = HCCS_GET_ADAPT_RES;
+ ret = hccs_pcc_cmd_send(hdev, HCCS_PM_INC_LANE, &desc);
+ if (ret) {
+ dev_err(hdev->dev, "query adapting result failed, ret = %d.\n",
+ ret);
+ return ret;
+ }
+ adapt_res = *((u8 *)&desc.rsp.data);
+ if (adapt_res == HCCS_SERDES_ADAPT_OK)
+ return 0;
+
+ msleep(HCCS_QUERY_ADAPT_RES_DELAY_MS);
+ } while (--wait_cnt);
+
+ dev_err(hdev->dev, "wait for adapting completed timeout.\n");
+
+ return -ETIMEDOUT;
+}
+
+static int hccs_start_hpcs_retraining(struct hccs_dev *hdev, u8 type)
+{
+ struct hccs_inc_lane_req_param *req_param;
+ struct hccs_desc desc;
+ int ret;
+
+ hccs_init_req_desc(&desc);
+ req_param = (struct hccs_inc_lane_req_param *)desc.req.data;
+ req_param->port_type = type;
+ req_param->opt_type = HCCS_START_RETRAINING;
+ ret = hccs_pcc_cmd_send(hdev, HCCS_PM_INC_LANE, &desc);
+ if (ret)
+ dev_err(hdev->dev, "start hpcs retraining failed, ret = %d.\n",
+ ret);
+
+ return ret;
+}
+
+static int hccs_start_inc_lane(struct hccs_dev *hdev, u8 type)
+{
+ int ret;
+
+ ret = hccs_prepare_inc_lane(hdev, type);
+ if (ret)
+ return ret;
+
+ ret = hccs_wait_serdes_adapt_completed(hdev, type);
+ if (ret)
+ return ret;
+
+ return hccs_start_hpcs_retraining(hdev, type);
+}
+
+static int hccs_start_dec_lane(struct hccs_dev *hdev, u8 type)
+{
+ struct hccs_desc desc;
+ u8 *port_type;
+ int ret;
+
+ hccs_init_req_desc(&desc);
+ port_type = (u8 *)desc.req.data;
+ *port_type = type;
+ ret = hccs_pcc_cmd_send(hdev, HCCS_PM_DEC_LANE, &desc);
+ if (ret)
+ dev_err(hdev->dev, "start to decrease lane failed, ret = %d.\n",
+ ret);
+
+ return ret;
+}
+
+static ssize_t dec_lane_of_type_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hccs_dev *hdev = device_kobj_to_hccs_dev(kobj);
+ bool all_in_idle;
+ u8 port_type;
+ int ret;
+
+ ret = hccs_parse_pm_port_type(hdev, buf, &port_type);
+ if (ret)
+ return ret;
+
+ mutex_lock(&hdev->lock);
+ ret = hccs_get_all_spec_port_idle_sta(hdev, port_type, &all_in_idle);
+ if (ret)
+ goto out;
+ if (!all_in_idle) {
+ ret = -EBUSY;
+ dev_err(hdev->dev, "please don't decrease lanes on high load with %s, ret = %d.\n",
+ hccs_port_type_to_name(hdev, port_type), ret);
+ goto out;
+ }
+
+ ret = hccs_start_dec_lane(hdev, port_type);
+out:
+ mutex_unlock(&hdev->lock);
+
+ return ret == 0 ? count : ret;
+}
+static struct kobj_attribute dec_lane_of_type_attr =
+ __ATTR(dec_lane_of_type, 0200, NULL, dec_lane_of_type_store);
+
+static ssize_t inc_lane_of_type_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hccs_dev *hdev = device_kobj_to_hccs_dev(kobj);
+ bool full_lane;
+ u8 port_type;
+ int ret;
+
+ ret = hccs_parse_pm_port_type(hdev, buf, &port_type);
+ if (ret)
+ return ret;
+
+ mutex_lock(&hdev->lock);
+ ret = hccs_get_all_spec_port_full_lane_sta(hdev, port_type, &full_lane);
+ if (ret || full_lane)
+ goto out;
+
+ ret = hccs_start_inc_lane(hdev, port_type);
+out:
+ mutex_unlock(&hdev->lock);
+ return ret == 0 ? count : ret;
+}
+static struct kobj_attribute inc_lane_of_type_attr =
+ __ATTR(inc_lane_of_type, 0200, NULL, inc_lane_of_type_store);
+
+static ssize_t available_inc_dec_lane_types_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct hccs_dev *hdev = device_kobj_to_hccs_dev(kobj);
+
+ if (hdev->caps & HCCS_CAPS_HCCS_V2_PM)
+ return sysfs_emit(buf, "%s\n",
+ hccs_port_type_to_name(hdev, HCCS_V2));
+
+ return -EINVAL;
+}
+static struct kobj_attribute available_inc_dec_lane_types_attr =
+ __ATTR(available_inc_dec_lane_types, 0444,
+ available_inc_dec_lane_types_show, NULL);
+
+static ssize_t used_types_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct hccs_dev *hdev = device_kobj_to_hccs_dev(kobj);
+ int len = 0;
+ u16 i;
+
+ for (i = 0; i < hdev->used_type_num - 1; i++)
+ len += sysfs_emit_at(buf, len, "%s ", hdev->type_name_maps[i].name);
+ len += sysfs_emit_at(buf, len, "%s\n", hdev->type_name_maps[i].name);
+
+ return len;
+}
+static struct kobj_attribute used_types_attr =
+ __ATTR(used_types, 0444, used_types_show, NULL);
+
+static void hccs_remove_misc_sysfs(struct hccs_dev *hdev)
+{
+ sysfs_remove_file(&hdev->dev->kobj, &used_types_attr.attr);
+
+ if (!(hdev->caps & HCCS_CAPS_HCCS_V2_PM))
+ return;
+
+ sysfs_remove_file(&hdev->dev->kobj,
+ &available_inc_dec_lane_types_attr.attr);
+ sysfs_remove_file(&hdev->dev->kobj, &dec_lane_of_type_attr.attr);
+ sysfs_remove_file(&hdev->dev->kobj, &inc_lane_of_type_attr.attr);
+}
+
+static int hccs_add_misc_sysfs(struct hccs_dev *hdev)
+{
+ int ret;
+
+ ret = sysfs_create_file(&hdev->dev->kobj, &used_types_attr.attr);
+ if (ret)
+ return ret;
+
+ if (!(hdev->caps & HCCS_CAPS_HCCS_V2_PM))
+ return 0;
+
+ ret = sysfs_create_file(&hdev->dev->kobj,
+ &available_inc_dec_lane_types_attr.attr);
+ if (ret)
+ goto used_types_remove;
+
+ ret = sysfs_create_file(&hdev->dev->kobj, &dec_lane_of_type_attr.attr);
+ if (ret)
+ goto inc_dec_lane_types_remove;
+
+ ret = sysfs_create_file(&hdev->dev->kobj, &inc_lane_of_type_attr.attr);
+ if (ret)
+ goto dec_lane_of_type_remove;
+
+ return 0;
+
+dec_lane_of_type_remove:
+ sysfs_remove_file(&hdev->dev->kobj, &dec_lane_of_type_attr.attr);
+inc_dec_lane_types_remove:
+ sysfs_remove_file(&hdev->dev->kobj,
+ &available_inc_dec_lane_types_attr.attr);
+used_types_remove:
+ sysfs_remove_file(&hdev->dev->kobj, &used_types_attr.attr);
+ return ret;
+}
+
static void hccs_remove_die_dir(struct hccs_die_info *die)
{
struct hccs_port_info *port;
@@ -1092,6 +1618,8 @@ static void hccs_remove_topo_dirs(struct hccs_dev *hdev)
for (i = 0; i < hdev->chip_num; i++)
hccs_remove_chip_dir(&hdev->chips[i]);
+
+ hccs_remove_misc_sysfs(hdev);
}
static int hccs_create_hccs_dir(struct hccs_dev *hdev,
@@ -1101,7 +1629,7 @@ static int hccs_create_hccs_dir(struct hccs_dev *hdev,
int ret;
ret = kobject_init_and_add(&port->kobj, &hccs_port_type,
- &die->kobj, "hccs%d", port->port_id);
+ &die->kobj, "hccs%u", port->port_id);
if (ret) {
kobject_put(&port->kobj);
return ret;
@@ -1119,7 +1647,7 @@ static int hccs_create_die_dir(struct hccs_dev *hdev,
u16 i;
ret = kobject_init_and_add(&die->kobj, &hccs_die_type,
- &chip->kobj, "die%d", die->die_id);
+ &chip->kobj, "die%u", die->die_id);
if (ret) {
kobject_put(&die->kobj);
return ret;
@@ -1129,7 +1657,7 @@ static int hccs_create_die_dir(struct hccs_dev *hdev,
port = &die->ports[i];
ret = hccs_create_hccs_dir(hdev, die, port);
if (ret) {
- dev_err(hdev->dev, "create hccs%d dir failed.\n",
+ dev_err(hdev->dev, "create hccs%u dir failed.\n",
port->port_id);
goto err;
}
@@ -1151,7 +1679,7 @@ static int hccs_create_chip_dir(struct hccs_dev *hdev,
u16 id;
ret = kobject_init_and_add(&chip->kobj, &hccs_chip_type,
- &hdev->dev->kobj, "chip%d", chip->chip_id);
+ &hdev->dev->kobj, "chip%u", chip->chip_id);
if (ret) {
kobject_put(&chip->kobj);
return ret;
@@ -1182,11 +1710,17 @@ static int hccs_create_topo_dirs(struct hccs_dev *hdev)
chip = &hdev->chips[id];
ret = hccs_create_chip_dir(hdev, chip);
if (ret) {
- dev_err(hdev->dev, "init chip%d dir failed!\n", id);
+ dev_err(hdev->dev, "init chip%u dir failed!\n", id);
goto err;
}
}
+ ret = hccs_add_misc_sysfs(hdev);
+ if (ret) {
+ dev_err(hdev->dev, "create misc sysfs interface failed, ret = %d\n", ret);
+ goto err;
+ }
+
return 0;
err:
for (k = 0; k < id; k++)
@@ -1216,6 +1750,11 @@ static int hccs_probe(struct platform_device *pdev)
hdev->dev = &pdev->dev;
platform_set_drvdata(pdev, hdev);
+ /*
+ * Here would never be failure as the driver and device has been matched.
+ */
+ hdev->verspec_data = acpi_device_get_match_data(hdev->dev);
+
mutex_init(&hdev->lock);
rc = hccs_get_pcc_chan_id(hdev);
if (rc)
@@ -1232,6 +1771,10 @@ static int hccs_probe(struct platform_device *pdev)
if (rc)
goto unregister_pcc_chan;
+ rc = hccs_init_type_name_maps(hdev);
+ if (rc)
+ goto unregister_pcc_chan;
+
rc = hccs_create_topo_dirs(hdev);
if (rc)
goto unregister_pcc_chan;
@@ -1244,19 +1787,34 @@ unregister_pcc_chan:
return rc;
}
-static int hccs_remove(struct platform_device *pdev)
+static void hccs_remove(struct platform_device *pdev)
{
struct hccs_dev *hdev = platform_get_drvdata(pdev);
hccs_remove_topo_dirs(hdev);
hccs_unregister_pcc_channel(hdev);
-
- return 0;
}
+static const struct hccs_verspecific_data hisi04b1_verspec_data = {
+ .rx_callback = NULL,
+ .wait_cmd_complete = hccs_wait_cmd_complete_by_poll,
+ .fill_pcc_shared_mem = hccs_fill_pcc_shared_mem_region,
+ .shared_mem_size = sizeof(struct acpi_pcct_shared_memory),
+ .has_txdone_irq = false,
+};
+
+static const struct hccs_verspecific_data hisi04b2_verspec_data = {
+ .rx_callback = hccs_pcc_rx_callback,
+ .wait_cmd_complete = hccs_wait_cmd_complete_by_irq,
+ .fill_pcc_shared_mem = hccs_fill_ext_pcc_shared_mem_region,
+ .shared_mem_size = sizeof(struct acpi_pcct_ext_pcc_shared_memory),
+ .has_txdone_irq = true,
+};
+
static const struct acpi_device_id hccs_acpi_match[] = {
- { "HISI04B1"},
- { ""},
+ { "HISI04B1", (unsigned long)&hisi04b1_verspec_data},
+ { "HISI04B2", (unsigned long)&hisi04b2_verspec_data},
+ { }
};
MODULE_DEVICE_TABLE(acpi, hccs_acpi_match);
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.h b/drivers/soc/hisilicon/kunpeng_hccs.h
index 6012d2776028..f0a9a5618d97 100644
--- a/drivers/soc/hisilicon/kunpeng_hccs.h
+++ b/drivers/soc/hisilicon/kunpeng_hccs.h
@@ -10,6 +10,19 @@
* | P0 | P1 | P2 | P3 | P0 | P1 | P2 | P3 | P0 | P1 | P2 | P3 |P0 | P1 | P2 | P3 |
*/
+enum hccs_port_type {
+ HCCS_V1 = 1,
+ HCCS_V2,
+};
+
+#define HCCS_IP_PREFIX "HCCS-v"
+#define HCCS_IP_MAX 255
+#define HCCS_NAME_MAX_LEN 9
+struct hccs_type_name_map {
+ u8 type;
+ char name[HCCS_NAME_MAX_LEN + 1];
+};
+
/*
* This value cannot be 255, otherwise the loop of the multi-BD communication
* case cannot end.
@@ -19,7 +32,7 @@
struct hccs_port_info {
u8 port_id;
u8 port_type;
- u8 lane_mode;
+ u8 max_lane_num;
bool enable; /* if the port is enabled */
struct kobject kobj;
bool dir_created;
@@ -47,18 +60,36 @@ struct hccs_chip_info {
struct hccs_mbox_client_info {
struct mbox_client client;
- struct mbox_chan *mbox_chan;
struct pcc_mbox_chan *pcc_chan;
u64 deadline_us;
- void __iomem *pcc_comm_addr;
+ struct completion done;
};
+struct hccs_desc;
+
+struct hccs_verspecific_data {
+ void (*rx_callback)(struct mbox_client *cl, void *mssg);
+ int (*wait_cmd_complete)(struct hccs_dev *hdev);
+ void (*fill_pcc_shared_mem)(struct hccs_dev *hdev,
+ u8 cmd, struct hccs_desc *desc,
+ void __iomem *comm_space,
+ u16 space_size);
+ u16 shared_mem_size;
+ bool has_txdone_irq;
+};
+
+#define HCCS_CAPS_HCCS_V2_PM BIT_ULL(0)
+
struct hccs_dev {
struct device *dev;
struct acpi_device *acpi_dev;
+ const struct hccs_verspecific_data *verspec_data;
+ /* device capabilities from firmware, like HCCS_CAPS_xxx. */
u64 caps;
u8 chip_num;
struct hccs_chip_info *chips;
+ u16 used_type_num;
+ struct hccs_type_name_map *type_name_maps;
u8 chan_id;
struct mutex lock;
struct hccs_mbox_client_info cl_info;
@@ -76,6 +107,9 @@ enum hccs_subcmd_type {
HCCS_GET_DIE_PORTS_LANE_STA,
HCCS_GET_DIE_PORTS_LINK_STA,
HCCS_GET_DIE_PORTS_CRC_ERR_CNT,
+ HCCS_GET_PORT_IDLE_STATUS,
+ HCCS_PM_DEC_LANE,
+ HCCS_PM_INC_LANE,
HCCS_SUB_CMD_MAX = 255,
};
@@ -98,7 +132,7 @@ struct hccs_die_info_rsp_data {
struct hccs_port_attr {
u8 port_id;
u8 port_type;
- u8 lane_mode;
+ u8 max_lane_num;
u8 enable : 1; /* if the port is enabled */
u16 rsv[2];
};
@@ -119,6 +153,14 @@ struct hccs_port_comm_req_param {
u8 port_id;
};
+#define HCCS_PREPARE_INC_LANE 1
+#define HCCS_GET_ADAPT_RES 2
+#define HCCS_START_RETRAINING 3
+struct hccs_inc_lane_req_param {
+ u8 port_type;
+ u8 opt_type;
+};
+
#define HCCS_PORT_RESET 1
#define HCCS_PORT_SETUP 2
#define HCCS_PORT_CONFIG 3
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index 76a4593baf0a..2a90ddd20104 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -1,14 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "i.MX SoC drivers"
-config IMX_GPCV2_PM_DOMAINS
- bool "i.MX GPCv2 PM domains"
- depends on ARCH_MXC || (COMPILE_TEST && OF)
- depends on PM
- select PM_GENERIC_DOMAINS
- select REGMAP_MMIO
- default y if SOC_IMX7D
-
config SOC_IMX8M
tristate "i.MX8M SoC family support"
depends on ARCH_MXC || COMPILE_TEST
@@ -28,15 +20,4 @@ config SOC_IMX9
help
If you say yes here, you get support for the NXP i.MX9 family
-config IMX8M_BLK_CTRL
- bool
- default SOC_IMX8M && IMX_GPCV2_PM_DOMAINS
- depends on PM_GENERIC_DOMAINS
- depends on COMMON_CLK
-
-config IMX9_BLK_CTRL
- bool
- default SOC_IMX9 && IMX_GPCV2_PM_DOMAINS
- depends on PM_GENERIC_DOMAINS
-
endmenu
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 3ad321ca608a..ca6a5fa1618f 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -3,4 +3,4 @@ ifeq ($(CONFIG_ARM),y)
obj-$(CONFIG_ARCH_MXC) += soc-imx.o
endif
obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
-obj-$(CONFIG_SOC_IMX9) += imx93-src.o
+obj-$(CONFIG_SOC_IMX9) += imx93-src.o soc-imx9.o
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index 1dcd243df567..04a1b60f2f2b 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -24,16 +24,22 @@
#define OCOTP_UID_HIGH 0x420
#define IMX8MP_OCOTP_UID_OFFSET 0x10
+#define IMX8MP_OCOTP_UID_HIGH 0xE00
/* Same as ANADIG_DIGPROG_IMX7D */
#define ANADIG_DIGPROG_IMX8MM 0x800
struct imx8_soc_data {
char *name;
- u32 (*soc_revision)(void);
+ const char *ocotp_compatible;
+ int (*soc_revision)(struct platform_device *pdev, u32 *socrev);
+ int (*soc_uid)(struct platform_device *pdev, u64 *socuid);
};
-static u64 soc_uid;
+struct imx8_soc_drvdata {
+ void __iomem *ocotp_base;
+ struct clk *clk;
+};
#ifdef CONFIG_HAVE_ARM_SMCCC
static u32 imx8mq_soc_revision_from_atf(void)
@@ -51,27 +57,24 @@ static u32 imx8mq_soc_revision_from_atf(void)
static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; };
#endif
-static u32 __init imx8mq_soc_revision(void)
+static int imx8m_soc_uid(struct platform_device *pdev, u64 *socuid)
{
- struct device_node *np;
- void __iomem *ocotp_base;
- u32 magic;
- u32 rev;
- struct clk *clk;
+ struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev);
+ void __iomem *ocotp_base = drvdata->ocotp_base;
- np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp");
- if (!np)
- return 0;
+ *socuid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
+ *socuid <<= 32;
+ *socuid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
- ocotp_base = of_iomap(np, 0);
- WARN_ON(!ocotp_base);
- clk = of_clk_get_by_name(np, NULL);
- if (IS_ERR(clk)) {
- WARN_ON(IS_ERR(clk));
- return 0;
- }
+ return 0;
+}
- clk_prepare_enable(clk);
+static int imx8mq_soc_revision(struct platform_device *pdev, u32 *socrev)
+{
+ struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev);
+ void __iomem *ocotp_base = drvdata->ocotp_base;
+ u32 magic;
+ u32 rev;
/*
* SOC revision on older imx8mq is not available in fuses so query
@@ -84,81 +87,109 @@ static u32 __init imx8mq_soc_revision(void)
rev = REV_B1;
}
- soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
- soc_uid <<= 32;
- soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
-
- clk_disable_unprepare(clk);
- clk_put(clk);
- iounmap(ocotp_base);
- of_node_put(np);
+ *socrev = rev;
- return rev;
+ return 0;
}
-static void __init imx8mm_soc_uid(void)
+static int imx8mp_soc_uid(struct platform_device *pdev, u64 *socuid)
{
- void __iomem *ocotp_base;
- struct device_node *np;
- u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
- IMX8MP_OCOTP_UID_OFFSET : 0;
+ struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev);
+ void __iomem *ocotp_base = drvdata->ocotp_base;
- np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp");
- if (!np)
- return;
+ socuid[0] = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + IMX8MP_OCOTP_UID_OFFSET);
+ socuid[0] <<= 32;
+ socuid[0] |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + IMX8MP_OCOTP_UID_OFFSET);
- ocotp_base = of_iomap(np, 0);
- WARN_ON(!ocotp_base);
+ socuid[1] = readl_relaxed(ocotp_base + IMX8MP_OCOTP_UID_HIGH + 0x10);
+ socuid[1] <<= 32;
+ socuid[1] |= readl_relaxed(ocotp_base + IMX8MP_OCOTP_UID_HIGH);
- soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
- soc_uid <<= 32;
- soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);
-
- iounmap(ocotp_base);
- of_node_put(np);
+ return 0;
}
-static u32 __init imx8mm_soc_revision(void)
+static int imx8mm_soc_revision(struct platform_device *pdev, u32 *socrev)
{
- struct device_node *np;
+ struct device_node *np __free(device_node) =
+ of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
void __iomem *anatop_base;
- u32 rev;
- np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
if (!np)
- return 0;
+ return -EINVAL;
anatop_base = of_iomap(np, 0);
- WARN_ON(!anatop_base);
+ if (!anatop_base)
+ return -EINVAL;
- rev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM);
+ *socrev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM);
iounmap(anatop_base);
- of_node_put(np);
- imx8mm_soc_uid();
+ return 0;
+}
+
+static int imx8m_soc_prepare(struct platform_device *pdev, const char *ocotp_compatible)
+{
+ struct device_node *np __free(device_node) =
+ of_find_compatible_node(NULL, NULL, ocotp_compatible);
+ struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ if (!np)
+ return -EINVAL;
+
+ drvdata->ocotp_base = of_iomap(np, 0);
+ if (!drvdata->ocotp_base)
+ return -EINVAL;
+
+ drvdata->clk = of_clk_get_by_name(np, NULL);
+ if (IS_ERR(drvdata->clk)) {
+ ret = PTR_ERR(drvdata->clk);
+ goto err_clk;
+ }
+
+ return clk_prepare_enable(drvdata->clk);
+
+err_clk:
+ iounmap(drvdata->ocotp_base);
+ return ret;
+}
+
+static void imx8m_soc_unprepare(struct platform_device *pdev)
+{
+ struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev);
- return rev;
+ clk_disable_unprepare(drvdata->clk);
+ clk_put(drvdata->clk);
+ iounmap(drvdata->ocotp_base);
}
static const struct imx8_soc_data imx8mq_soc_data = {
.name = "i.MX8MQ",
+ .ocotp_compatible = "fsl,imx8mq-ocotp",
.soc_revision = imx8mq_soc_revision,
+ .soc_uid = imx8m_soc_uid,
};
static const struct imx8_soc_data imx8mm_soc_data = {
.name = "i.MX8MM",
+ .ocotp_compatible = "fsl,imx8mm-ocotp",
.soc_revision = imx8mm_soc_revision,
+ .soc_uid = imx8m_soc_uid,
};
static const struct imx8_soc_data imx8mn_soc_data = {
.name = "i.MX8MN",
+ .ocotp_compatible = "fsl,imx8mm-ocotp",
.soc_revision = imx8mm_soc_revision,
+ .soc_uid = imx8m_soc_uid,
};
static const struct imx8_soc_data imx8mp_soc_data = {
.name = "i.MX8MP",
+ .ocotp_compatible = "fsl,imx8mm-ocotp",
.soc_revision = imx8mm_soc_revision,
+ .soc_uid = imx8mp_soc_uid,
};
static __maybe_unused const struct of_device_id imx8_soc_match[] = {
@@ -169,77 +200,146 @@ static __maybe_unused const struct of_device_id imx8_soc_match[] = {
{ }
};
-#define imx8_revision(soc_rev) \
- soc_rev ? \
- kasprintf(GFP_KERNEL, "%d.%d", (soc_rev >> 4) & 0xf, soc_rev & 0xf) : \
+#define imx8_revision(dev, soc_rev) \
+ (soc_rev) ? \
+ devm_kasprintf((dev), GFP_KERNEL, "%d.%d", ((soc_rev) >> 4) & 0xf, (soc_rev) & 0xf) : \
"unknown"
-static int __init imx8_soc_init(void)
+static void imx8m_unregister_soc(void *data)
+{
+ soc_device_unregister(data);
+}
+
+static void imx8m_unregister_cpufreq(void *data)
+{
+ platform_device_unregister(data);
+}
+
+static int imx8m_soc_probe(struct platform_device *pdev)
{
struct soc_device_attribute *soc_dev_attr;
- struct soc_device *soc_dev;
+ struct platform_device *cpufreq_dev;
+ const struct imx8_soc_data *data;
+ struct imx8_soc_drvdata *drvdata;
+ struct device *dev = &pdev->dev;
const struct of_device_id *id;
+ struct soc_device *soc_dev;
u32 soc_rev = 0;
- const struct imx8_soc_data *data;
+ u64 soc_uid[2] = {0, 0};
int ret;
- soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ soc_dev_attr = devm_kzalloc(dev, sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, drvdata);
+
soc_dev_attr->family = "Freescale i.MX";
ret = of_property_read_string(of_root, "model", &soc_dev_attr->machine);
if (ret)
- goto free_soc;
+ return ret;
id = of_match_node(imx8_soc_match, of_root);
- if (!id) {
- ret = -ENODEV;
- goto free_soc;
- }
+ if (!id)
+ return -ENODEV;
data = id->data;
if (data) {
soc_dev_attr->soc_id = data->name;
- if (data->soc_revision)
- soc_rev = data->soc_revision();
+ ret = imx8m_soc_prepare(pdev, data->ocotp_compatible);
+ if (ret)
+ return ret;
+
+ if (data->soc_revision) {
+ ret = data->soc_revision(pdev, &soc_rev);
+ if (ret) {
+ imx8m_soc_unprepare(pdev);
+ return ret;
+ }
+ }
+ if (data->soc_uid) {
+ ret = data->soc_uid(pdev, soc_uid);
+ if (ret) {
+ imx8m_soc_unprepare(pdev);
+ return ret;
+ }
+ }
+ imx8m_soc_unprepare(pdev);
}
- soc_dev_attr->revision = imx8_revision(soc_rev);
- if (!soc_dev_attr->revision) {
- ret = -ENOMEM;
- goto free_soc;
- }
+ soc_dev_attr->revision = imx8_revision(dev, soc_rev);
+ if (!soc_dev_attr->revision)
+ return -ENOMEM;
- soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", soc_uid);
- if (!soc_dev_attr->serial_number) {
- ret = -ENOMEM;
- goto free_rev;
- }
+ if (soc_uid[1])
+ soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL, "%016llX%016llX",
+ soc_uid[1], soc_uid[0]);
+ else
+ soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL, "%016llX",
+ soc_uid[0]);
+ if (!soc_dev_attr->serial_number)
+ return -ENOMEM;
soc_dev = soc_device_register(soc_dev_attr);
- if (IS_ERR(soc_dev)) {
- ret = PTR_ERR(soc_dev);
- goto free_serial_number;
- }
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
+
+ ret = devm_add_action(dev, imx8m_unregister_soc, soc_dev);
+ if (ret)
+ return ret;
pr_info("SoC: %s revision %s\n", soc_dev_attr->soc_id,
soc_dev_attr->revision);
- if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
- platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
+ if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) {
+ cpufreq_dev = platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
+ if (IS_ERR(cpufreq_dev))
+ return dev_err_probe(dev, PTR_ERR(cpufreq_dev),
+ "Failed to register imx-cpufreq-dev device\n");
+ ret = devm_add_action(dev, imx8m_unregister_cpufreq, cpufreq_dev);
+ if (ret)
+ return ret;
+ }
return 0;
+}
-free_serial_number:
- kfree(soc_dev_attr->serial_number);
-free_rev:
- if (strcmp(soc_dev_attr->revision, "unknown"))
- kfree(soc_dev_attr->revision);
-free_soc:
- kfree(soc_dev_attr);
- return ret;
+static struct platform_driver imx8m_soc_driver = {
+ .probe = imx8m_soc_probe,
+ .driver = {
+ .name = "imx8m-soc",
+ },
+};
+
+static int __init imx8_soc_init(void)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ /* No match means this is non-i.MX8M hardware, do nothing. */
+ if (!of_match_node(imx8_soc_match, of_root))
+ return 0;
+
+ ret = platform_driver_register(&imx8m_soc_driver);
+ if (ret) {
+ pr_err("Failed to register imx8m-soc platform driver: %d\n", ret);
+ return ret;
+ }
+
+ pdev = platform_device_register_simple("imx8m-soc", -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ pr_err("Failed to register imx8m-soc platform device: %ld\n", PTR_ERR(pdev));
+ platform_driver_unregister(&imx8m_soc_driver);
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
}
device_initcall(imx8_soc_init);
+MODULE_DESCRIPTION("NXP i.MX8M SoC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/soc/imx/soc-imx9.c b/drivers/soc/imx/soc-imx9.c
new file mode 100644
index 000000000000..b46d22cf0212
--- /dev/null
+++ b/drivers/soc/imx/soc-imx9.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#define IMX_SIP_GET_SOC_INFO 0xc2000006
+#define SOC_ID(x) (((x) & 0xFFFF) >> 8)
+#define SOC_REV_MAJOR(x) ((((x) >> 28) & 0xF) - 0x9)
+#define SOC_REV_MINOR(x) (((x) >> 24) & 0xF)
+
+static int imx9_soc_probe(struct platform_device *pdev)
+{
+ struct soc_device_attribute *attr;
+ struct arm_smccc_res res;
+ struct soc_device *sdev;
+ u32 soc_id, rev_major, rev_minor;
+ u64 uid127_64, uid63_0;
+ int err;
+
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ err = of_property_read_string(of_root, "model", &attr->machine);
+ if (err) {
+ pr_err("%s: missing model property: %d\n", __func__, err);
+ goto attr;
+ }
+
+ attr->family = kasprintf(GFP_KERNEL, "Freescale i.MX");
+
+ /*
+ * Retrieve the soc id, rev & uid info:
+ * res.a1[31:16]: soc revision;
+ * res.a1[15:0]: soc id;
+ * res.a2: uid[127:64];
+ * res.a3: uid[63:0];
+ */
+ arm_smccc_smc(IMX_SIP_GET_SOC_INFO, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ pr_err("%s: SMC failed: 0x%lx\n", __func__, res.a0);
+ err = -EINVAL;
+ goto family;
+ }
+
+ soc_id = SOC_ID(res.a1);
+ rev_major = SOC_REV_MAJOR(res.a1);
+ rev_minor = SOC_REV_MINOR(res.a1);
+
+ attr->soc_id = kasprintf(GFP_KERNEL, "i.MX%2x", soc_id);
+ attr->revision = kasprintf(GFP_KERNEL, "%d.%d", rev_major, rev_minor);
+
+ uid127_64 = res.a2;
+ uid63_0 = res.a3;
+ attr->serial_number = kasprintf(GFP_KERNEL, "%016llx%016llx", uid127_64, uid63_0);
+
+ sdev = soc_device_register(attr);
+ if (IS_ERR(sdev)) {
+ err = PTR_ERR(sdev);
+ pr_err("%s failed to register SoC as a device: %d\n", __func__, err);
+ goto serial_number;
+ }
+
+ return 0;
+
+serial_number:
+ kfree(attr->serial_number);
+ kfree(attr->revision);
+ kfree(attr->soc_id);
+family:
+ kfree(attr->family);
+attr:
+ kfree(attr);
+ return err;
+}
+
+static __maybe_unused const struct of_device_id imx9_soc_match[] = {
+ { .compatible = "fsl,imx93", },
+ { .compatible = "fsl,imx95", },
+ { }
+};
+
+#define IMX_SOC_DRIVER "imx9-soc"
+
+static struct platform_driver imx9_soc_driver = {
+ .probe = imx9_soc_probe,
+ .driver = {
+ .name = IMX_SOC_DRIVER,
+ },
+};
+
+static int __init imx9_soc_init(void)
+{
+ int ret;
+ struct platform_device *pdev;
+
+ /* No match means it is not an i.MX 9 series SoC, do nothing. */
+ if (!of_match_node(imx9_soc_match, of_root))
+ return 0;
+
+ ret = platform_driver_register(&imx9_soc_driver);
+ if (ret) {
+ pr_err("failed to register imx9_soc platform driver: %d\n", ret);
+ return ret;
+ }
+
+ pdev = platform_device_register_simple(IMX_SOC_DRIVER, -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ pr_err("failed to register imx9_soc platform device: %ld\n", PTR_ERR(pdev));
+ platform_driver_unregister(&imx9_soc_driver);
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
+device_initcall(imx9_soc_init);
+
+MODULE_AUTHOR("NXP");
+MODULE_DESCRIPTION("NXP i.MX9 SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c
index 5be9988f30ce..33e2e0366f19 100644
--- a/drivers/soc/ixp4xx/ixp4xx-npe.c
+++ b/drivers/soc/ixp4xx/ixp4xx-npe.c
@@ -736,7 +736,7 @@ static int ixp4xx_npe_probe(struct platform_device *pdev)
return 0;
}
-static int ixp4xx_npe_remove(struct platform_device *pdev)
+static void ixp4xx_npe_remove(struct platform_device *pdev)
{
int i;
@@ -744,8 +744,6 @@ static int ixp4xx_npe_remove(struct platform_device *pdev)
if (npe_tab[i].regs) {
npe_reset(&npe_tab[i]);
}
-
- return 0;
}
static const struct of_device_id ixp4xx_npe_of_match[] = {
@@ -766,6 +764,7 @@ static struct platform_driver ixp4xx_npe_driver = {
module_platform_driver(ixp4xx_npe_driver);
MODULE_AUTHOR("Krzysztof Halasa");
+MODULE_DESCRIPTION("Intel IXP4xx Network Processor Engine driver");
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(NPE_A_FIRMWARE);
MODULE_FIRMWARE(NPE_B_FIRMWARE);
diff --git a/drivers/soc/ixp4xx/ixp4xx-qmgr.c b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
index 291086bb9313..475e229039e3 100644
--- a/drivers/soc/ixp4xx/ixp4xx-qmgr.c
+++ b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
@@ -442,11 +442,10 @@ static int ixp4xx_qmgr_probe(struct platform_device *pdev)
return 0;
}
-static int ixp4xx_qmgr_remove(struct platform_device *pdev)
+static void ixp4xx_qmgr_remove(struct platform_device *pdev)
{
synchronize_irq(qmgr_irq_1);
synchronize_irq(qmgr_irq_2);
- return 0;
}
static const struct of_device_id ixp4xx_qmgr_of_match[] = {
@@ -466,6 +465,7 @@ static struct platform_driver ixp4xx_qmgr_driver = {
};
module_platform_driver(ixp4xx_qmgr_driver);
+MODULE_DESCRIPTION("Intel IXP4xx Queue Manager driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Krzysztof Halasa");
diff --git a/drivers/soc/litex/Kconfig b/drivers/soc/litex/Kconfig
index e6ba3573a772..f3f869639588 100644
--- a/drivers/soc/litex/Kconfig
+++ b/drivers/soc/litex/Kconfig
@@ -7,7 +7,7 @@ config LITEX
config LITEX_SOC_CONTROLLER
tristate "Enable LiteX SoC Controller driver"
- depends on OF || COMPILE_TEST
+ depends on OF
depends on HAS_IOMEM
select LITEX
help
diff --git a/drivers/soc/litex/litex_soc_ctrl.c b/drivers/soc/litex/litex_soc_ctrl.c
index f75790091d38..104a5f9bfd26 100644
--- a/drivers/soc/litex/litex_soc_ctrl.c
+++ b/drivers/soc/litex/litex_soc_ctrl.c
@@ -69,26 +69,21 @@ static int litex_check_csr_access(void __iomem *reg_addr)
struct litex_soc_ctrl_device {
void __iomem *base;
- struct notifier_block reset_nb;
};
-static int litex_reset_handler(struct notifier_block *this, unsigned long mode,
- void *cmd)
+static int litex_reset_handler(struct sys_off_data *data)
{
- struct litex_soc_ctrl_device *soc_ctrl_dev =
- container_of(this, struct litex_soc_ctrl_device, reset_nb);
+ struct litex_soc_ctrl_device *soc_ctrl_dev = data->cb_data;
litex_write32(soc_ctrl_dev->base + RESET_REG_OFF, RESET_REG_VALUE);
return NOTIFY_DONE;
}
-#ifdef CONFIG_OF
static const struct of_device_id litex_soc_ctrl_of_match[] = {
{.compatible = "litex,soc-controller"},
{},
};
MODULE_DEVICE_TABLE(of, litex_soc_ctrl_of_match);
-#endif /* CONFIG_OF */
static int litex_soc_ctrl_probe(struct platform_device *pdev)
{
@@ -107,11 +102,9 @@ static int litex_soc_ctrl_probe(struct platform_device *pdev)
if (error)
return error;
- platform_set_drvdata(pdev, soc_ctrl_dev);
-
- soc_ctrl_dev->reset_nb.notifier_call = litex_reset_handler;
- soc_ctrl_dev->reset_nb.priority = 128;
- error = register_restart_handler(&soc_ctrl_dev->reset_nb);
+ error = devm_register_restart_handler(&pdev->dev,
+ litex_reset_handler,
+ soc_ctrl_dev);
if (error) {
dev_warn(&pdev->dev, "cannot register restart handler: %d\n",
error);
@@ -120,21 +113,12 @@ static int litex_soc_ctrl_probe(struct platform_device *pdev)
return 0;
}
-static int litex_soc_ctrl_remove(struct platform_device *pdev)
-{
- struct litex_soc_ctrl_device *soc_ctrl_dev = platform_get_drvdata(pdev);
-
- unregister_restart_handler(&soc_ctrl_dev->reset_nb);
- return 0;
-}
-
static struct platform_driver litex_soc_ctrl_driver = {
.driver = {
.name = "litex-soc-controller",
- .of_match_table = of_match_ptr(litex_soc_ctrl_of_match)
+ .of_match_table = litex_soc_ctrl_of_match,
},
.probe = litex_soc_ctrl_probe,
- .remove = litex_soc_ctrl_remove,
};
module_platform_driver(litex_soc_ctrl_driver);
diff --git a/drivers/soc/loongson/Kconfig b/drivers/soc/loongson/Kconfig
index 314e13bb3e01..368344943a93 100644
--- a/drivers/soc/loongson/Kconfig
+++ b/drivers/soc/loongson/Kconfig
@@ -20,6 +20,7 @@ config LOONGSON2_GUTS
config LOONGSON2_PM
bool "Loongson-2 SoC Power Management Controller Driver"
depends on LOONGARCH && OF
+ depends on INPUT=y
help
The Loongson-2's power management controller was ACPI, supports ACPI
S2Idle (Suspend To Idle), ACPI S3 (Suspend To RAM), ACPI S4 (Suspend To
diff --git a/drivers/soc/loongson/loongson2_guts.c b/drivers/soc/loongson/loongson2_guts.c
index bace4bc8e03b..16913c3ef65c 100644
--- a/drivers/soc/loongson/loongson2_guts.c
+++ b/drivers/soc/loongson/loongson2_guts.c
@@ -70,7 +70,7 @@ static const struct loongson2_soc_die_attr *loongson2_soc_die_match(
if (matches->svr == (svr & matches->mask))
return matches;
matches++;
- };
+ }
return NULL;
}
@@ -94,7 +94,6 @@ static int loongson2_guts_probe(struct platform_device *pdev)
{
struct device_node *root, *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- struct resource *res;
const struct loongson2_soc_die_attr *soc_die;
const char *machine;
u32 svr;
@@ -106,8 +105,7 @@ static int loongson2_guts_probe(struct platform_device *pdev)
guts->little_endian = of_property_read_bool(np, "little-endian");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- guts->regs = ioremap(res->start, res->end - res->start + 1);
+ guts->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(guts->regs))
return PTR_ERR(guts->regs);
@@ -116,8 +114,11 @@ static int loongson2_guts_probe(struct platform_device *pdev)
if (of_property_read_string(root, "model", &machine))
of_property_read_string_index(root, "compatible", 0, &machine);
of_node_put(root);
- if (machine)
+ if (machine) {
soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
+ if (!soc_dev_attr.machine)
+ return -ENOMEM;
+ }
svr = loongson2_guts_get_svr();
soc_die = loongson2_soc_die_match(svr, loongson2_soc_die);
@@ -150,11 +151,9 @@ static int loongson2_guts_probe(struct platform_device *pdev)
return 0;
}
-static int loongson2_guts_remove(struct platform_device *dev)
+static void loongson2_guts_remove(struct platform_device *dev)
{
soc_device_unregister(soc_dev);
-
- return 0;
}
/*
diff --git a/drivers/soc/loongson/loongson2_pm.c b/drivers/soc/loongson/loongson2_pm.c
index 796add6e8b63..b8e5e1e3528a 100644
--- a/drivers/soc/loongson/loongson2_pm.c
+++ b/drivers/soc/loongson/loongson2_pm.c
@@ -11,6 +11,7 @@
#include <linux/input.h>
#include <linux/suspend.h>
#include <linux/interrupt.h>
+#include <linux/of_platform.h>
#include <linux/pm_wakeirq.h>
#include <linux/platform_device.h>
#include <asm/bootinfo.h>
@@ -192,12 +193,16 @@ static int loongson2_pm_probe(struct platform_device *pdev)
if (loongson_sysconf.suspend_addr)
suspend_set_ops(&loongson2_suspend_ops);
+ /* Populate children */
+ retval = devm_of_platform_populate(dev);
+ if (retval)
+ dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n");
+
return 0;
}
static const struct of_device_id loongson2_pm_match[] = {
{ .compatible = "loongson,ls2k0500-pmc", },
- { .compatible = "loongson,ls2k1000-pmc", },
{},
};
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index a88cf04fc803..d7293977f06e 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -26,6 +26,17 @@ config MTK_DEVAPC
The violation information is logged for further analysis or
countermeasures.
+config MTK_DVFSRC
+ tristate "MediaTek DVFSRC Support"
+ depends on ARCH_MEDIATEK
+ help
+ Say yes here to add support for the MediaTek Dynamic Voltage
+ and Frequency Scaling Resource Collector (DVFSRC): a HW
+ IP found on many MediaTek SoCs, which is responsible for
+ collecting DVFS requests from various SoC IPs, other than
+ software, and performing bandwidth scaling to provide the
+ best achievable performance-per-watt.
+
config MTK_INFRACFG
bool "MediaTek INFRACFG Support"
select REGMAP
@@ -49,29 +60,6 @@ config MTK_REGULATOR_COUPLER
default ARCH_MEDIATEK
depends on REGULATOR
-config MTK_SCPSYS
- bool "MediaTek SCPSYS Support"
- default ARCH_MEDIATEK
- depends on OF
- select REGMAP
- select MTK_INFRACFG
- select PM_GENERIC_DOMAINS if PM
- help
- Say yes here to add support for the MediaTek SCPSYS power domain
- driver.
-
-config MTK_SCPSYS_PM_DOMAINS
- bool "MediaTek SCPSYS generic power domain"
- default ARCH_MEDIATEK
- depends on PM
- select PM_GENERIC_DOMAINS
- select REGMAP
- help
- Say y here to enable power domain support.
- In order to meet high performance and low power requirements, the System
- Control Processor System (SCPSYS) has several power management related
- tasks in the system.
-
config MTK_MMSYS
tristate "MediaTek MMSYS Support"
default ARCH_MEDIATEK
@@ -91,4 +79,14 @@ config MTK_SVS
chip process corner, temperatures and other factors. Then DVFS
driver could apply SVS bank voltage to PMIC/Buck.
+config MTK_SOCINFO
+ tristate "MediaTek SoC Information"
+ default y
+ depends on NVMEM_MTK_EFUSE
+ select SOC_BUS
+ help
+ The MediaTek SoC Information (mtk-socinfo) driver provides
+ information about the SoC to the userspace including the
+ manufacturer name, marketing name and soc name.
+
endmenu
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index 9d3ce7878c5c..0665573e3c4b 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -1,9 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MTK_CMDQ) += mtk-cmdq-helper.o
obj-$(CONFIG_MTK_DEVAPC) += mtk-devapc.o
+obj-$(CONFIG_MTK_DVFSRC) += mtk-dvfsrc.o
obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o
obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
obj-$(CONFIG_MTK_REGULATOR_COUPLER) += mtk-regulator-coupler.o
obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o
obj-$(CONFIG_MTK_MMSYS) += mtk-mutex.o
obj-$(CONFIG_MTK_SVS) += mtk-svs.o
+obj-$(CONFIG_MTK_SOCINFO) += mtk-socinfo.o
diff --git a/drivers/soc/mediatek/mt8167-mmsys.h b/drivers/soc/mediatek/mt8167-mmsys.h
index f7a35b3656bb..c468926561b4 100644
--- a/drivers/soc/mediatek/mt8167-mmsys.h
+++ b/drivers/soc/mediatek/mt8167-mmsys.h
@@ -14,22 +14,21 @@
#define MT8167_DSI0_SEL_IN_RDMA0 0x1
static const struct mtk_mmsys_routes mt8167_mmsys_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
- MT8167_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0,
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_RDMA0,
- MT8167_DISP_REG_CONFIG_DISP_DITHER_MOUT_EN, MT8167_DITHER_MOUT_EN_RDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
- MT8167_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI0,
- MT8167_DISP_REG_CONFIG_DISP_DSI0_SEL_IN, MT8167_DSI0_SEL_IN_RDMA0
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI0,
- MT8167_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8167_RDMA0_SOUT_DSI0
- },
+ MMSYS_ROUTE(OVL0, COLOR0,
+ MT8167_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0,
+ OVL0_MOUT_EN_COLOR0),
+ MMSYS_ROUTE(DITHER0, RDMA0,
+ MT8167_DISP_REG_CONFIG_DISP_DITHER_MOUT_EN, MT8167_DITHER_MOUT_EN_RDMA0,
+ MT8167_DITHER_MOUT_EN_RDMA0),
+ MMSYS_ROUTE(OVL0, COLOR0,
+ MT8167_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0,
+ COLOR0_SEL_IN_OVL0),
+ MMSYS_ROUTE(RDMA0, DSI0,
+ MT8167_DISP_REG_CONFIG_DISP_DSI0_SEL_IN, MT8167_DSI0_SEL_IN_RDMA0,
+ MT8167_DSI0_SEL_IN_RDMA0),
+ MMSYS_ROUTE(RDMA0, DSI0,
+ MT8167_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8167_RDMA0_SOUT_DSI0,
+ MT8167_RDMA0_SOUT_DSI0),
};
#endif /* __SOC_MEDIATEK_MT8167_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8173-mmsys.h b/drivers/soc/mediatek/mt8173-mmsys.h
index 9d24e381271e..957876d7c166 100644
--- a/drivers/soc/mediatek/mt8173-mmsys.h
+++ b/drivers/soc/mediatek/mt8173-mmsys.h
@@ -33,63 +33,48 @@
#define MT8173_RDMA0_SOUT_COLOR0 BIT(0)
static const struct mtk_mmsys_routes mt8173_mmsys_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
- MT8173_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
- MT8173_OVL0_MOUT_EN_COLOR0, MT8173_OVL0_MOUT_EN_COLOR0
- }, {
- DDP_COMPONENT_OD0, DDP_COMPONENT_RDMA0,
- MT8173_DISP_REG_CONFIG_DISP_OD_MOUT_EN,
- MT8173_OD0_MOUT_EN_RDMA0, MT8173_OD0_MOUT_EN_RDMA0
- }, {
- DDP_COMPONENT_UFOE, DDP_COMPONENT_DSI0,
- MT8173_DISP_REG_CONFIG_DISP_UFOE_MOUT_EN,
- MT8173_UFOE_MOUT_EN_DSI0, MT8173_UFOE_MOUT_EN_DSI0
- }, {
- DDP_COMPONENT_COLOR0, DDP_COMPONENT_AAL0,
- MT8173_DISP_REG_CONFIG_DISP_COLOR0_SOUT_SEL_IN,
- MT8173_COLOR0_SOUT_MERGE, 0 /* SOUT to AAL */
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_UFOE,
- MT8173_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN,
- MT8173_RDMA0_SOUT_COLOR0, 0 /* SOUT to UFOE */
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
- MT8173_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
- MT8173_COLOR0_SEL_IN_OVL0, MT8173_COLOR0_SEL_IN_OVL0
- }, {
- DDP_COMPONENT_AAL0, DDP_COMPONENT_COLOR0,
- MT8173_DISP_REG_CONFIG_DISP_AAL_SEL_IN,
- MT8173_AAL_SEL_IN_MERGE, 0 /* SEL_IN from COLOR0 */
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_UFOE,
- MT8173_DISP_REG_CONFIG_DISP_UFOE_SEL_IN,
- MT8173_UFOE_SEL_IN_RDMA0, 0 /* SEL_IN from RDMA0 */
- }, {
- DDP_COMPONENT_UFOE, DDP_COMPONENT_DSI0,
- MT8173_DISP_REG_CONFIG_DSI0_SEL_IN,
- MT8173_DSI0_SEL_IN_UFOE, 0, /* SEL_IN from UFOE */
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
- MT8173_DISP_REG_CONFIG_DISP_OVL1_MOUT_EN,
- MT8173_OVL1_MOUT_EN_COLOR1, MT8173_OVL1_MOUT_EN_COLOR1
- }, {
- DDP_COMPONENT_GAMMA, DDP_COMPONENT_RDMA1,
- MT8173_DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN,
- MT8173_GAMMA_MOUT_EN_RDMA1, MT8173_GAMMA_MOUT_EN_RDMA1
- }, {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8173_DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN,
- RDMA1_SOUT_MASK, RDMA1_SOUT_DPI0
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
- MT8173_DISP_REG_CONFIG_DISP_COLOR1_SEL_IN,
- COLOR1_SEL_IN_OVL1, COLOR1_SEL_IN_OVL1
- }, {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8173_DISP_REG_CONFIG_DPI_SEL_IN,
- MT8173_DPI0_SEL_IN_MASK, MT8173_DPI0_SEL_IN_RDMA1
- }
+ MMSYS_ROUTE(OVL0, COLOR0,
+ MT8173_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, MT8173_OVL0_MOUT_EN_COLOR0,
+ MT8173_OVL0_MOUT_EN_COLOR0),
+ MMSYS_ROUTE(OD0, RDMA0,
+ MT8173_DISP_REG_CONFIG_DISP_OD_MOUT_EN, MT8173_OD0_MOUT_EN_RDMA0,
+ MT8173_OD0_MOUT_EN_RDMA0),
+ MMSYS_ROUTE(UFOE, DSI0,
+ MT8173_DISP_REG_CONFIG_DISP_UFOE_MOUT_EN, MT8173_UFOE_MOUT_EN_DSI0,
+ MT8173_UFOE_MOUT_EN_DSI0),
+ MMSYS_ROUTE(COLOR0, AAL0,
+ MT8173_DISP_REG_CONFIG_DISP_COLOR0_SOUT_SEL_IN, MT8173_COLOR0_SOUT_MERGE,
+ 0 /* SOUT to AAL */),
+ MMSYS_ROUTE(RDMA0, UFOE,
+ MT8173_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8173_RDMA0_SOUT_COLOR0,
+ 0 /* SOUT to UFOE */),
+ MMSYS_ROUTE(OVL0, COLOR0,
+ MT8173_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, MT8173_COLOR0_SEL_IN_OVL0,
+ MT8173_COLOR0_SEL_IN_OVL0),
+ MMSYS_ROUTE(AAL0, COLOR0,
+ MT8173_DISP_REG_CONFIG_DISP_AAL_SEL_IN, MT8173_AAL_SEL_IN_MERGE,
+ 0 /* SEL_IN from COLOR0 */),
+ MMSYS_ROUTE(RDMA0, UFOE,
+ MT8173_DISP_REG_CONFIG_DISP_UFOE_SEL_IN, MT8173_UFOE_SEL_IN_RDMA0,
+ 0 /* SEL_IN from RDMA0 */),
+ MMSYS_ROUTE(UFOE, DSI0,
+ MT8173_DISP_REG_CONFIG_DSI0_SEL_IN, MT8173_DSI0_SEL_IN_UFOE,
+ 0 /* SEL_IN from UFOE */),
+ MMSYS_ROUTE(OVL1, COLOR1,
+ MT8173_DISP_REG_CONFIG_DISP_OVL1_MOUT_EN, MT8173_OVL1_MOUT_EN_COLOR1,
+ MT8173_OVL1_MOUT_EN_COLOR1),
+ MMSYS_ROUTE(GAMMA, RDMA1,
+ MT8173_DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN, MT8173_GAMMA_MOUT_EN_RDMA1,
+ MT8173_GAMMA_MOUT_EN_RDMA1),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8173_DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
+ RDMA1_SOUT_DPI0),
+ MMSYS_ROUTE(OVL1, COLOR1,
+ MT8173_DISP_REG_CONFIG_DISP_COLOR1_SEL_IN, COLOR1_SEL_IN_OVL1,
+ COLOR1_SEL_IN_OVL1),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8173_DISP_REG_CONFIG_DPI_SEL_IN, MT8173_DPI0_SEL_IN_MASK,
+ MT8173_DPI0_SEL_IN_RDMA1),
};
#endif /* __SOC_MEDIATEK_MT8173_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8183-mmsys.h b/drivers/soc/mediatek/mt8183-mmsys.h
index ff6be1703469..123384958c4b 100644
--- a/drivers/soc/mediatek/mt8183-mmsys.h
+++ b/drivers/soc/mediatek/mt8183-mmsys.h
@@ -28,35 +28,27 @@
#define MT8183_MMSYS_SW0_RST_B 0x140
static const struct mtk_mmsys_routes mmsys_mt8183_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
- MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L,
- MT8183_OVL0_MOUT_EN_OVL0_2L
- }, {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0,
- MT8183_OVL0_2L_MOUT_EN_DISP_PATH0
- }, {
- DDP_COMPONENT_OVL_2L1, DDP_COMPONENT_RDMA1,
- MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1,
- MT8183_OVL1_2L_MOUT_EN_RDMA1
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0,
- MT8183_DITHER0_MOUT_IN_DSI0
- }, {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L,
- MT8183_DISP_PATH0_SEL_IN_OVL0_2L
- }, {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1,
- MT8183_DPI0_SEL_IN_RDMA1
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0,
- MT8183_RDMA0_SOUT_COLOR0
- }
+ MMSYS_ROUTE(OVL0, OVL_2L0,
+ MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L,
+ MT8183_OVL0_MOUT_EN_OVL0_2L),
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0,
+ MT8183_OVL0_2L_MOUT_EN_DISP_PATH0),
+ MMSYS_ROUTE(OVL_2L1, RDMA1,
+ MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1,
+ MT8183_OVL1_2L_MOUT_EN_RDMA1),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0,
+ MT8183_DITHER0_MOUT_IN_DSI0),
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L,
+ MT8183_DISP_PATH0_SEL_IN_OVL0_2L),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1,
+ MT8183_DPI0_SEL_IN_RDMA1),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0,
+ MT8183_RDMA0_SOUT_COLOR0),
};
#endif /* __SOC_MEDIATEK_MT8183_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8186-mmsys.h b/drivers/soc/mediatek/mt8186-mmsys.h
index 279d4138525b..354664be72bd 100644
--- a/drivers/soc/mediatek/mt8186-mmsys.h
+++ b/drivers/soc/mediatek/mt8186-mmsys.h
@@ -63,61 +63,39 @@
#define MT8186_MMSYS_SW0_RST_B 0x160
static const struct mtk_mmsys_routes mmsys_mt8186_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8186_DISP_OVL0_MOUT_EN, MT8186_OVL0_MOUT_EN_MASK,
- MT8186_OVL0_MOUT_TO_RDMA0
- },
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8186_DISP_RDMA0_SEL_IN, MT8186_RDMA0_SEL_IN_MASK,
- MT8186_RDMA0_FROM_OVL0
- },
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_CON_MASK,
- MT8186_OVL0_GO_BLEND
- },
- {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8186_DISP_RDMA0_SOUT_SEL, MT8186_RDMA0_SOUT_SEL_MASK,
- MT8186_RDMA0_SOUT_TO_COLOR0
- },
- {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8186_DISP_DITHER0_MOUT_EN, MT8186_DITHER0_MOUT_EN_MASK,
- MT8186_DITHER0_MOUT_TO_DSI0,
- },
- {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8186_DISP_DSI0_SEL_IN, MT8186_DSI0_SEL_IN_MASK,
- MT8186_DSI0_FROM_DITHER0
- },
- {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
- MT8186_DISP_OVL0_2L_MOUT_EN, MT8186_OVL0_2L_MOUT_EN_MASK,
- MT8186_OVL0_2L_MOUT_TO_RDMA1
- },
- {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
- MT8186_DISP_RDMA1_SEL_IN, MT8186_RDMA1_SEL_IN_MASK,
- MT8186_RDMA1_FROM_OVL0_2L
- },
- {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
- MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_2L_CON_MASK,
- MT8186_OVL0_2L_GO_BLEND
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8186_DISP_RDMA1_MOUT_EN, MT8186_RDMA1_MOUT_EN_MASK,
- MT8186_RDMA1_MOUT_TO_DPI0_SEL
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8186_DISP_DPI0_SEL_IN, MT8186_DPI0_SEL_IN_MASK,
- MT8186_DPI0_FROM_RDMA1
- },
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8186_DISP_OVL0_MOUT_EN, MT8186_OVL0_MOUT_EN_MASK,
+ MT8186_OVL0_MOUT_TO_RDMA0),
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8186_DISP_RDMA0_SEL_IN, MT8186_RDMA0_SEL_IN_MASK,
+ MT8186_RDMA0_FROM_OVL0),
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_CON_MASK,
+ MT8186_OVL0_GO_BLEND),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8186_DISP_RDMA0_SOUT_SEL, MT8186_RDMA0_SOUT_SEL_MASK,
+ MT8186_RDMA0_SOUT_TO_COLOR0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8186_DISP_DITHER0_MOUT_EN, MT8186_DITHER0_MOUT_EN_MASK,
+ MT8186_DITHER0_MOUT_TO_DSI0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8186_DISP_DSI0_SEL_IN, MT8186_DSI0_SEL_IN_MASK,
+ MT8186_DSI0_FROM_DITHER0),
+ MMSYS_ROUTE(OVL_2L0, RDMA1,
+ MT8186_DISP_OVL0_2L_MOUT_EN, MT8186_OVL0_2L_MOUT_EN_MASK,
+ MT8186_OVL0_2L_MOUT_TO_RDMA1),
+ MMSYS_ROUTE(OVL_2L0, RDMA1,
+ MT8186_DISP_RDMA1_SEL_IN, MT8186_RDMA1_SEL_IN_MASK,
+ MT8186_RDMA1_FROM_OVL0_2L),
+ MMSYS_ROUTE(OVL_2L0, RDMA1,
+ MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_2L_CON_MASK,
+ MT8186_OVL0_2L_GO_BLEND),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8186_DISP_RDMA1_MOUT_EN, MT8186_RDMA1_MOUT_EN_MASK,
+ MT8186_RDMA1_MOUT_TO_DPI0_SEL),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8186_DISP_DPI0_SEL_IN, MT8186_DPI0_SEL_IN_MASK,
+ MT8186_DPI0_FROM_RDMA1),
};
#endif /* __SOC_MEDIATEK_MT8186_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8188-mmsys.h b/drivers/soc/mediatek/mt8188-mmsys.h
index 448cc3761b43..99080afead7e 100644
--- a/drivers/soc/mediatek/mt8188-mmsys.h
+++ b/drivers/soc/mediatek/mt8188-mmsys.h
@@ -3,6 +3,10 @@
#ifndef __SOC_MEDIATEK_MT8188_MMSYS_H
#define __SOC_MEDIATEK_MT8188_MMSYS_H
+#include <linux/soc/mediatek/mtk-mmsys.h>
+#include <dt-bindings/reset/mt8188-resets.h>
+
+#define MT8188_VDO0_SW0_RST_B 0x190
#define MT8188_VDO0_OVL_MOUT_EN 0xf14
#define MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0 BIT(0)
#define MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0 BIT(1)
@@ -67,83 +71,257 @@
#define MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE BIT(18)
#define MT8188_SOUT_DSC_WRAP0_OUT_TO_DISP_WDMA0 BIT(19)
+#define MT8188_VDO1_SW0_RST_B 0x1d0
+#define MT8188_VDO1_HDR_TOP_CFG 0xd00
+#define MT8188_VDO1_MIXER_IN1_ALPHA 0xd30
+#define MT8188_VDO1_MIXER_IN1_PAD 0xd40
+#define MT8188_VDO1_MIXER_VSYNC_LEN 0xd5c
+#define MT8188_VDO1_MERGE0_ASYNC_CFG_WD 0xe30
+#define MT8188_VDO1_HDRBE_ASYNC_CFG_WD 0xe70
+#define MT8188_VDO1_VPP_MERGE0_P0_SEL_IN 0xf04
+#define MT8188_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0 1
+#define MT8188_VDO1_VPP_MERGE0_P1_SEL_IN 0xf08
+#define MT8188_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1 1
+#define MT8188_VDO1_DISP_DPI1_SEL_IN 0xf10
+#define MT8188_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT 0
+#define MT8188_VDO1_DISP_DP_INTF0_SEL_IN 0xf14
+#define MT8188_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT 0
+#define MT8188_VDO1_MERGE4_SOUT_SEL 0xf18
+#define MT8188_MERGE4_SOUT_TO_DPI1_SEL BIT(2)
+#define MT8188_MERGE4_SOUT_TO_DP_INTF0_SEL BIT(3)
+#define MT8188_VDO1_MIXER_IN1_SEL_IN 0xf24
+#define MT8188_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT 1
+#define MT8188_VDO1_MIXER_IN2_SEL_IN 0xf28
+#define MT8188_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT 1
+#define MT8188_VDO1_MIXER_IN3_SEL_IN 0xf2c
+#define MT8188_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT 1
+#define MT8188_VDO1_MIXER_IN4_SEL_IN 0xf30
+#define MT8188_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT 1
+#define MT8188_VDO1_MIXER_OUT_SOUT_SEL 0xf34
+#define MT8188_MIXER_SOUT_TO_MERGE4_ASYNC_SEL 1
+#define MT8188_VDO1_VPP_MERGE1_P0_SEL_IN 0xf3c
+#define MT8188_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2 1
+#define MT8188_VDO1_MERGE0_ASYNC_SOUT_SEL 0xf40
+#define MT8188_SOUT_TO_MIXER_IN1_SEL 1
+#define MT8188_VDO1_MERGE1_ASYNC_SOUT_SEL 0xf44
+#define MT8188_SOUT_TO_MIXER_IN2_SEL 1
+#define MT8188_VDO1_MERGE2_ASYNC_SOUT_SEL 0xf48
+#define MT8188_SOUT_TO_MIXER_IN3_SEL 1
+#define MT8188_VDO1_MERGE3_ASYNC_SOUT_SEL 0xf4c
+#define MT8188_SOUT_TO_MIXER_IN4_SEL 1
+#define MT8188_VDO1_MERGE4_ASYNC_SEL_IN 0xf50
+#define MT8188_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT 1
+#define MT8188_VDO1_MIXER_IN1_SOUT_SEL 0xf58
+#define MT8188_MIXER_IN1_SOUT_TO_DISP_MIXER 0
+#define MT8188_VDO1_MIXER_IN2_SOUT_SEL 0xf5c
+#define MT8188_MIXER_IN2_SOUT_TO_DISP_MIXER 0
+#define MT8188_VDO1_MIXER_IN3_SOUT_SEL 0xf60
+#define MT8188_MIXER_IN3_SOUT_TO_DISP_MIXER 0
+#define MT8188_VDO1_MIXER_IN4_SOUT_SEL 0xf64
+#define MT8188_MIXER_IN4_SOUT_TO_DISP_MIXER 0
+#define MT8188_VDO1_MIXER_SOUT_SEL_IN 0xf68
+#define MT8188_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER 0
+
+static const u8 mmsys_mt8188_vdo0_rst_tb[] = {
+ [MT8188_VDO0_RST_DISP_OVL0] = MMSYS_RST_NR(0, 0),
+ [MT8188_VDO0_RST_FAKE_ENG0] = MMSYS_RST_NR(0, 2),
+ [MT8188_VDO0_RST_DISP_CCORR0] = MMSYS_RST_NR(0, 4),
+ [MT8188_VDO0_RST_DISP_MUTEX0] = MMSYS_RST_NR(0, 6),
+ [MT8188_VDO0_RST_DISP_GAMMA0] = MMSYS_RST_NR(0, 8),
+ [MT8188_VDO0_RST_DISP_DITHER0] = MMSYS_RST_NR(0, 10),
+ [MT8188_VDO0_RST_DISP_WDMA0] = MMSYS_RST_NR(0, 17),
+ [MT8188_VDO0_RST_DISP_RDMA0] = MMSYS_RST_NR(0, 19),
+ [MT8188_VDO0_RST_DSI0] = MMSYS_RST_NR(0, 21),
+ [MT8188_VDO0_RST_DSI1] = MMSYS_RST_NR(0, 22),
+ [MT8188_VDO0_RST_DSC_WRAP0] = MMSYS_RST_NR(0, 23),
+ [MT8188_VDO0_RST_VPP_MERGE0] = MMSYS_RST_NR(0, 24),
+ [MT8188_VDO0_RST_DP_INTF0] = MMSYS_RST_NR(0, 25),
+ [MT8188_VDO0_RST_DISP_AAL0] = MMSYS_RST_NR(0, 26),
+ [MT8188_VDO0_RST_INLINEROT0] = MMSYS_RST_NR(0, 27),
+ [MT8188_VDO0_RST_APB_BUS] = MMSYS_RST_NR(0, 28),
+ [MT8188_VDO0_RST_DISP_COLOR0] = MMSYS_RST_NR(0, 29),
+ [MT8188_VDO0_RST_MDP_WROT0] = MMSYS_RST_NR(0, 30),
+ [MT8188_VDO0_RST_DISP_RSZ0] = MMSYS_RST_NR(0, 31),
+};
+
+static const u8 mmsys_mt8188_vdo1_rst_tb[] = {
+ [MT8188_VDO1_RST_SMI_LARB2] = MMSYS_RST_NR(0, 0),
+ [MT8188_VDO1_RST_SMI_LARB3] = MMSYS_RST_NR(0, 1),
+ [MT8188_VDO1_RST_GALS] = MMSYS_RST_NR(0, 2),
+ [MT8188_VDO1_RST_FAKE_ENG0] = MMSYS_RST_NR(0, 3),
+ [MT8188_VDO1_RST_FAKE_ENG1] = MMSYS_RST_NR(0, 4),
+ [MT8188_VDO1_RST_MDP_RDMA0] = MMSYS_RST_NR(0, 5),
+ [MT8188_VDO1_RST_MDP_RDMA1] = MMSYS_RST_NR(0, 6),
+ [MT8188_VDO1_RST_MDP_RDMA2] = MMSYS_RST_NR(0, 7),
+ [MT8188_VDO1_RST_MDP_RDMA3] = MMSYS_RST_NR(0, 8),
+ [MT8188_VDO1_RST_VPP_MERGE0] = MMSYS_RST_NR(0, 9),
+ [MT8188_VDO1_RST_VPP_MERGE1] = MMSYS_RST_NR(0, 10),
+ [MT8188_VDO1_RST_VPP_MERGE2] = MMSYS_RST_NR(0, 11),
+ [MT8188_VDO1_RST_VPP_MERGE3] = MMSYS_RST_NR(1, 0),
+ [MT8188_VDO1_RST_VPP_MERGE4] = MMSYS_RST_NR(1, 1),
+ [MT8188_VDO1_RST_VPP2_TO_VDO1_DL_ASYNC] = MMSYS_RST_NR(1, 2),
+ [MT8188_VDO1_RST_VPP3_TO_VDO1_DL_ASYNC] = MMSYS_RST_NR(1, 3),
+ [MT8188_VDO1_RST_DISP_MUTEX] = MMSYS_RST_NR(1, 4),
+ [MT8188_VDO1_RST_MDP_RDMA4] = MMSYS_RST_NR(1, 5),
+ [MT8188_VDO1_RST_MDP_RDMA5] = MMSYS_RST_NR(1, 6),
+ [MT8188_VDO1_RST_MDP_RDMA6] = MMSYS_RST_NR(1, 7),
+ [MT8188_VDO1_RST_MDP_RDMA7] = MMSYS_RST_NR(1, 8),
+ [MT8188_VDO1_RST_DP_INTF1_MMCK] = MMSYS_RST_NR(1, 9),
+ [MT8188_VDO1_RST_DPI0_MM_CK] = MMSYS_RST_NR(1, 10),
+ [MT8188_VDO1_RST_DPI1_MM_CK] = MMSYS_RST_NR(1, 11),
+ [MT8188_VDO1_RST_MERGE0_DL_ASYNC] = MMSYS_RST_NR(1, 13),
+ [MT8188_VDO1_RST_MERGE1_DL_ASYNC] = MMSYS_RST_NR(1, 14),
+ [MT8188_VDO1_RST_MERGE2_DL_ASYNC] = MMSYS_RST_NR(1, 15),
+ [MT8188_VDO1_RST_MERGE3_DL_ASYNC] = MMSYS_RST_NR(1, 16),
+ [MT8188_VDO1_RST_MERGE4_DL_ASYNC] = MMSYS_RST_NR(1, 17),
+ [MT8188_VDO1_RST_VDO0_DSC_TO_VDO1_DL_ASYNC] = MMSYS_RST_NR(1, 18),
+ [MT8188_VDO1_RST_VDO0_MERGE_TO_VDO1_DL_ASYNC] = MMSYS_RST_NR(1, 19),
+ [MT8188_VDO1_RST_PADDING0] = MMSYS_RST_NR(1, 20),
+ [MT8188_VDO1_RST_PADDING1] = MMSYS_RST_NR(1, 21),
+ [MT8188_VDO1_RST_PADDING2] = MMSYS_RST_NR(1, 22),
+ [MT8188_VDO1_RST_PADDING3] = MMSYS_RST_NR(1, 23),
+ [MT8188_VDO1_RST_PADDING4] = MMSYS_RST_NR(1, 24),
+ [MT8188_VDO1_RST_PADDING5] = MMSYS_RST_NR(1, 25),
+ [MT8188_VDO1_RST_PADDING6] = MMSYS_RST_NR(1, 26),
+ [MT8188_VDO1_RST_PADDING7] = MMSYS_RST_NR(1, 27),
+ [MT8188_VDO1_RST_DISP_RSZ0] = MMSYS_RST_NR(1, 28),
+ [MT8188_VDO1_RST_DISP_RSZ1] = MMSYS_RST_NR(1, 29),
+ [MT8188_VDO1_RST_DISP_RSZ2] = MMSYS_RST_NR(1, 30),
+ [MT8188_VDO1_RST_DISP_RSZ3] = MMSYS_RST_NR(1, 31),
+ [MT8188_VDO1_RST_HDR_VDO_FE0] = MMSYS_RST_NR(2, 0),
+ [MT8188_VDO1_RST_HDR_GFX_FE0] = MMSYS_RST_NR(2, 1),
+ [MT8188_VDO1_RST_HDR_VDO_BE] = MMSYS_RST_NR(2, 2),
+ [MT8188_VDO1_RST_HDR_VDO_FE1] = MMSYS_RST_NR(2, 16),
+ [MT8188_VDO1_RST_HDR_GFX_FE1] = MMSYS_RST_NR(2, 17),
+ [MT8188_VDO1_RST_DISP_MIXER] = MMSYS_RST_NR(2, 18),
+ [MT8188_VDO1_RST_HDR_VDO_FE0_DL_ASYNC] = MMSYS_RST_NR(2, 19),
+ [MT8188_VDO1_RST_HDR_VDO_FE1_DL_ASYNC] = MMSYS_RST_NR(2, 20),
+ [MT8188_VDO1_RST_HDR_GFX_FE0_DL_ASYNC] = MMSYS_RST_NR(2, 21),
+ [MT8188_VDO1_RST_HDR_GFX_FE1_DL_ASYNC] = MMSYS_RST_NR(2, 22),
+ [MT8188_VDO1_RST_HDR_VDO_BE_DL_ASYNC] = MMSYS_RST_NR(2, 23),
+};
+
static const struct mtk_mmsys_routes mmsys_mt8188_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0,
- MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
- MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0,
- MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8188_VDO0_DISP_RDMA_SEL, MT8188_SEL_IN_DISP_RDMA0_FROM_MASK,
- MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_OVL0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
- MT8188_SEL_IN_DSI0_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_MERGE0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8188_SEL_IN_VPP_MERGE_FROM_DITHER0_OUT
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
- MT8188_VDO0_DSC_WARP_SEL,
- MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_MASK,
- MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DP_INTF0,
- MT8188_VDO0_DP_INTF0_SEL_IN, MT8188_SEL_IN_DP_INTF0_FROM_MASK,
- MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8188_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
- MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
- MT8188_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8188_VDO0_DISP_RDMA_SEL, MT8188_SOUT_DISP_RDMA0_TO_MASK,
- MT8188_SOUT_DISP_RDMA0_TO_DISP_COLOR0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8188_VDO0_DISP_DITHER0_SEL_OUT,
- MT8188_SOUT_DISP_DITHER0_TO_MASK,
- MT8188_SOUT_DISP_DITHER0_TO_DSI0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DP_INTF0,
- MT8188_VDO0_DISP_DITHER0_SEL_OUT,
- MT8188_SOUT_DISP_DITHER0_TO_MASK,
- MT8188_SOUT_DISP_DITHER0_TO_DP_INTF0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
- MT8188_SOUT_VPP_MERGE_TO_DP_INTF0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
- MT8188_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
- MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
- MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
- MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8188_SOUT_DSC_WRAP0_OUT_TO_DSI0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
- MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE
- },
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0,
+ MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0),
+ MMSYS_ROUTE(OVL0, WDMA0,
+ MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0,
+ MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0),
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8188_VDO0_DISP_RDMA_SEL, MT8188_SEL_IN_DISP_RDMA0_FROM_MASK,
+ MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_OVL0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
+ MT8188_SEL_IN_DSI0_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DITHER0, MERGE0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DITHER0, DSC0,
+ MT8188_VDO0_DSC_WARP_SEL, MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_MASK,
+ MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DITHER0, DP_INTF0,
+ MT8188_VDO0_DP_INTF0_SEL_IN, MT8188_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DSC0, MERGE0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8188_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(MERGE0, DP_INTF0,
+ MT8188_VDO0_DP_INTF0_SEL_IN, MT8188_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8188_SEL_IN_DP_INTF0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(DSC0, DSI0,
+ MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
+ MT8188_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8188_VDO0_DISP_RDMA_SEL, GENMASK(1, 0),
+ MT8188_SOUT_DISP_RDMA0_TO_DISP_COLOR0),
+ MMSYS_ROUTE(DITHER0, DSC0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_DSI0),
+ MMSYS_ROUTE(DITHER0, MERGE0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_VPP_MERGE0),
+ MMSYS_ROUTE(DITHER0, DP_INTF0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_DP_INTF0),
+ MMSYS_ROUTE(MERGE0, DP_INTF0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_DP_INTF0),
+ MMSYS_ROUTE(MERGE0, DPI0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(MERGE0, WDMA0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA0),
+ MMSYS_ROUTE(MERGE0, DSC0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN),
+ MMSYS_ROUTE(DSC0, DSI0,
+ MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8188_SOUT_DSC_WRAP0_OUT_TO_DSI0),
+ MMSYS_ROUTE(DSC0, MERGE0,
+ MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE),
+};
+
+static const struct mtk_mmsys_routes mmsys_mt8188_vdo1_routing_table[] = {
+ MMSYS_ROUTE(MDP_RDMA0, MERGE1,
+ MT8188_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
+ MT8188_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0),
+ MMSYS_ROUTE(MDP_RDMA1, MERGE1,
+ MT8188_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
+ MT8188_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1),
+ MMSYS_ROUTE(MDP_RDMA2, MERGE2,
+ MT8188_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
+ MT8188_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2),
+ MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
+ MT8188_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN1_SEL),
+ MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
+ MT8188_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN2_SEL),
+ MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
+ MT8188_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN3_SEL),
+ MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
+ MT8188_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN4_SEL),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8188_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
+ MT8188_MIXER_SOUT_TO_MERGE4_ASYNC_SEL),
+ MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8188_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
+ MT8188_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8188_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
+ MT8188_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT),
+ MMSYS_ROUTE(MERGE5, DPI1,
+ MT8188_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
+ MT8188_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT),
+ MMSYS_ROUTE(MERGE5, DPI1,
+ MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(3, 0),
+ MT8188_MERGE4_SOUT_TO_DPI1_SEL),
+ MMSYS_ROUTE(MERGE5, DP_INTF1,
+ MT8188_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
+ MT8188_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT),
+ MMSYS_ROUTE(MERGE5, DP_INTF1,
+ MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(3, 0),
+ MT8188_MERGE4_SOUT_TO_DP_INTF0_SEL),
};
#endif /* __SOC_MEDIATEK_MT8188_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8192-mmsys.h b/drivers/soc/mediatek/mt8192-mmsys.h
index a016d80b4bc1..7cafa2455fd0 100644
--- a/drivers/soc/mediatek/mt8192-mmsys.h
+++ b/drivers/soc/mediatek/mt8192-mmsys.h
@@ -31,47 +31,36 @@
#define MT8192_DSI0_SEL_IN_DITHER0 0x1
static const struct mtk_mmsys_routes mmsys_mt8192_routing_table[] = {
- {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8192_DISP_OVL0_2L_MOUT_EN, MT8192_OVL0_MOUT_EN_DISP_RDMA0,
- MT8192_OVL0_MOUT_EN_DISP_RDMA0
- }, {
- DDP_COMPONENT_OVL_2L2, DDP_COMPONENT_RDMA4,
- MT8192_DISP_OVL2_2L_MOUT_EN, MT8192_OVL2_2L_MOUT_EN_RDMA4,
- MT8192_OVL2_2L_MOUT_EN_RDMA4
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8192_DISP_DITHER0_MOUT_EN, MT8192_DITHER0_MOUT_IN_DSI0,
- MT8192_DITHER0_MOUT_IN_DSI0
- }, {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8192_DISP_RDMA0_SEL_IN, MT8192_RDMA0_SEL_IN_OVL0_2L,
- MT8192_RDMA0_SEL_IN_OVL0_2L
- }, {
- DDP_COMPONENT_CCORR, DDP_COMPONENT_AAL0,
- MT8192_DISP_AAL0_SEL_IN, MT8192_AAL0_SEL_IN_CCORR0,
- MT8192_AAL0_SEL_IN_CCORR0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0,
- MT8192_DSI0_SEL_IN_DITHER0
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8192_DISP_RDMA0_SOUT_SEL, MT8192_RDMA0_SOUT_COLOR0,
- MT8192_RDMA0_SOUT_COLOR0
- }, {
- DDP_COMPONENT_CCORR, DDP_COMPONENT_AAL0,
- MT8192_DISP_CCORR0_SOUT_SEL, MT8192_CCORR0_SOUT_AAL0,
- MT8192_CCORR0_SOUT_AAL0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
- MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_GO_BG,
- MT8192_DISP_OVL0_GO_BG
- }, {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_2L_GO_BLEND,
- MT8192_DISP_OVL0_2L_GO_BLEND
- }
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8192_DISP_OVL0_2L_MOUT_EN, MT8192_OVL0_MOUT_EN_DISP_RDMA0,
+ MT8192_OVL0_MOUT_EN_DISP_RDMA0),
+ MMSYS_ROUTE(OVL_2L2, RDMA4,
+ MT8192_DISP_OVL2_2L_MOUT_EN, MT8192_OVL2_2L_MOUT_EN_RDMA4,
+ MT8192_OVL2_2L_MOUT_EN_RDMA4),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8192_DISP_DITHER0_MOUT_EN, MT8192_DITHER0_MOUT_IN_DSI0,
+ MT8192_DITHER0_MOUT_IN_DSI0),
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8192_DISP_RDMA0_SEL_IN, MT8192_RDMA0_SEL_IN_OVL0_2L,
+ MT8192_RDMA0_SEL_IN_OVL0_2L),
+ MMSYS_ROUTE(CCORR, AAL0,
+ MT8192_DISP_AAL0_SEL_IN, MT8192_AAL0_SEL_IN_CCORR0,
+ MT8192_AAL0_SEL_IN_CCORR0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0,
+ MT8192_DSI0_SEL_IN_DITHER0),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8192_DISP_RDMA0_SOUT_SEL, MT8192_RDMA0_SOUT_COLOR0,
+ MT8192_RDMA0_SOUT_COLOR0),
+ MMSYS_ROUTE(CCORR, AAL0,
+ MT8192_DISP_CCORR0_SOUT_SEL, MT8192_CCORR0_SOUT_AAL0,
+ MT8192_CCORR0_SOUT_AAL0),
+ MMSYS_ROUTE(OVL0, OVL_2L0,
+ MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_GO_BG,
+ MT8192_DISP_OVL0_GO_BG),
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_2L_GO_BLEND,
+ MT8192_DISP_OVL0_2L_GO_BLEND),
};
#endif /* __SOC_MEDIATEK_MT8192_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8195-mmsys.h b/drivers/soc/mediatek/mt8195-mmsys.h
index 9be2df2832a4..f69929a2a4d4 100644
--- a/drivers/soc/mediatek/mt8195-mmsys.h
+++ b/drivers/soc/mediatek/mt8195-mmsys.h
@@ -160,370 +160,278 @@
#define MT8195_SVPP3_MDP_RSZ BIT(5)
static const struct mtk_mmsys_routes mmsys_mt8195_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0,
- MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0,
- MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL1,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1,
- MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_RDMA1,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1,
- MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_WDMA1,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1,
- MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_OVL0,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0,
- MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8195_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8195_SEL_IN_VPP_MERGE_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8195_SEL_IN_VPP_MERGE_FROM_VDO1_VIRTUAL0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP0_IN_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP0_IN_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSC1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_IN_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_IN_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
- MT8195_SEL_IN_DP_INTF0_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
- MT8195_SEL_IN_DP_INTF0_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
- MT8195_SEL_IN_DP_INTF0_FROM_VDO1_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
- MT8195_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
- MT8195_SEL_IN_DSI0_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
- MT8195_SEL_IN_DSI1_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
- MT8195_SEL_IN_DSI1_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_WDMA1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
- MT8195_SEL_IN_DISP_WDMA1_FROM_DISP_OVL1
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
- MT8195_SEL_IN_DISP_WDMA1_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA0_FROM_MASK,
- MT8195_SEL_IN_DISP_WDMA0_FROM_DISP_OVL0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
- MT8195_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
- MT8195_SOUT_DISP_DITHER0_TO_DSI0
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSC1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_VPP_MERGE
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
- MT8195_SOUT_VDO1_VIRTUAL0_TO_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
- MT8195_SOUT_VDO1_VIRTUAL0_TO_DP_INTF0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DSI1
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DP_INTF0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DISP_WDMA1
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_DSI0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_DSI1
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_DP_INTF0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_VPP_MERGE
- }
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0,
+ MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0),
+ MMSYS_ROUTE(OVL0, WDMA0,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0,
+ MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0),
+ MMSYS_ROUTE(OVL0, OVL1,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1,
+ MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1),
+ MMSYS_ROUTE(OVL1, RDMA1,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1,
+ MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1),
+ MMSYS_ROUTE(OVL1, WDMA1,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1,
+ MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1),
+ MMSYS_ROUTE(OVL1, OVL0,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0,
+ MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0),
+ MMSYS_ROUTE(DSC0, MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8195_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DITHER1, MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8195_SEL_IN_VPP_MERGE_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(MERGE5, MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8195_SEL_IN_VPP_MERGE_FROM_VDO1_VIRTUAL0),
+ MMSYS_ROUTE(DITHER0, DSC0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP0_IN_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(MERGE0, DSC0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP0_IN_FROM_VPP_MERGE),
+ MMSYS_ROUTE(DITHER1, DSC1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_IN_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(MERGE0, DSC1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_IN_FROM_VPP_MERGE),
+ MMSYS_ROUTE(MERGE0, DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(MERGE0, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(MERGE0, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(DSC1, DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DSC1, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DSC1, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DSC0, DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DSC0, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DSC0, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DSC1, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8195_SEL_IN_DP_INTF0_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(MERGE0, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8195_SEL_IN_DP_INTF0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(MERGE5, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8195_SEL_IN_DP_INTF0_FROM_VDO1_VIRTUAL0),
+ MMSYS_ROUTE(DSC0, DSI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
+ MT8195_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
+ MT8195_SEL_IN_DSI0_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DSC1, DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
+ MT8195_SEL_IN_DSI1_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(MERGE0, DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
+ MT8195_SEL_IN_DSI1_FROM_VPP_MERGE),
+ MMSYS_ROUTE(OVL1, WDMA1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
+ MT8195_SEL_IN_DISP_WDMA1_FROM_DISP_OVL1),
+ MMSYS_ROUTE(MERGE0, WDMA1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
+ MT8195_SEL_IN_DISP_WDMA1_FROM_VPP_MERGE),
+ MMSYS_ROUTE(DSC1, DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DITHER1, DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(DITHER1, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(DITHER1, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(DITHER1, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(OVL0, WDMA0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA0_FROM_MASK,
+ MT8195_SEL_IN_DISP_WDMA0_FROM_DISP_OVL0),
+ MMSYS_ROUTE(DITHER0, DSC0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
+ MT8195_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
+ MT8195_SOUT_DISP_DITHER0_TO_DSI0),
+ MMSYS_ROUTE(DITHER1, DSC1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DITHER1, MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_VPP_MERGE),
+ MMSYS_ROUTE(DITHER1, DSI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DITHER1, DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DITHER1, DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DITHER1, DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DITHER1, DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(MERGE5, MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
+ MT8195_SOUT_VDO1_VIRTUAL0_TO_VPP_MERGE),
+ MMSYS_ROUTE(MERGE5, DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
+ MT8195_SOUT_VDO1_VIRTUAL0_TO_DP_INTF0),
+ MMSYS_ROUTE(MERGE0, DSI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DSI1),
+ MMSYS_ROUTE(MERGE0, DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DP_INTF0),
+ MMSYS_ROUTE(MERGE0, DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(MERGE0, DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(MERGE0, DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(MERGE0, WDMA1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DISP_WDMA1),
+ MMSYS_ROUTE(MERGE0, DSC0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN),
+ MMSYS_ROUTE(MERGE0, DSC1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC0, DSI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_DSI0),
+ MMSYS_ROUTE(DSC0, DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0),
+ MMSYS_ROUTE(DSC0, DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0),
+ MMSYS_ROUTE(DSC0, DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0),
+ MMSYS_ROUTE(DSC0, MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE),
+ MMSYS_ROUTE(DSC1, DSI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_DSI1),
+ MMSYS_ROUTE(DSC1, DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_DP_INTF0),
+ MMSYS_ROUTE(DSC1, DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(DSC1, DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(DSC1, DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(DSC1, MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_VPP_MERGE),
};
static const struct mtk_mmsys_routes mmsys_mt8195_vdo1_routing_table[] = {
- {
- DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1,
- MT8195_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
- MT8195_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0
- }, {
- DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1,
- MT8195_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
- MT8195_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1
- }, {
- DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2,
- MT8195_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
- MT8195_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2
- }, {
- DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8195_SOUT_TO_MIXER_IN1_SEL
- }, {
- DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8195_SOUT_TO_MIXER_IN2_SEL
- }, {
- DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8195_SOUT_TO_MIXER_IN3_SEL
- }, {
- DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8195_SOUT_TO_MIXER_IN4_SEL
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8195_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
- MT8195_MIXER_SOUT_TO_MERGE4_ASYNC_SEL
- }, {
- DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
- MT8195_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
- MT8195_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
- MT8195_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
- MT8195_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8195_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
- MT8195_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8195_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
- MT8195_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
- MT8195_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
- MT8195_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
- MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
- MT8195_MERGE4_SOUT_TO_DPI1_SEL
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
- MT8195_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
- MT8195_MERGE4_SOUT_TO_DP_INTF0_SEL
- }
+ MMSYS_ROUTE(MDP_RDMA0, MERGE1,
+ MT8195_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
+ MT8195_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0),
+ MMSYS_ROUTE(MDP_RDMA1, MERGE1,
+ MT8195_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
+ MT8195_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1),
+ MMSYS_ROUTE(MDP_RDMA2, MERGE2,
+ MT8195_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
+ MT8195_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2),
+ MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
+ MT8195_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN1_SEL),
+ MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
+ MT8195_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN2_SEL),
+ MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
+ MT8195_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN3_SEL),
+ MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
+ MT8195_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN4_SEL),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8195_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
+ MT8195_MIXER_SOUT_TO_MERGE4_ASYNC_SEL),
+ MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8195_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
+ MT8195_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8195_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
+ MT8195_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT),
+ MMSYS_ROUTE(MERGE5, DPI1,
+ MT8195_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
+ MT8195_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT),
+ MMSYS_ROUTE(MERGE5, DPI1,
+ MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
+ MT8195_MERGE4_SOUT_TO_DPI1_SEL),
+ MMSYS_ROUTE(MERGE5, DP_INTF1,
+ MT8195_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
+ MT8195_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT),
+ MMSYS_ROUTE(MERGE5, DP_INTF1,
+ MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
+ MT8195_MERGE4_SOUT_TO_DP_INTF0_SEL),
};
#endif /* __SOC_MEDIATEK_MT8195_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8365-mmsys.h b/drivers/soc/mediatek/mt8365-mmsys.h
index 7abaf048d91e..533a3fd0923b 100644
--- a/drivers/soc/mediatek/mt8365-mmsys.h
+++ b/drivers/soc/mediatek/mt8365-mmsys.h
@@ -14,8 +14,9 @@
#define MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN 0xfd8
#define MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00 0xfdc
+#define MT8365_DISP_MS_IN_OUT_MASK GENMASK(3, 0)
#define MT8365_RDMA0_SOUT_COLOR0 0x1
-#define MT8365_DITHER_MOUT_EN_DSI0 0x1
+#define MT8365_DITHER_MOUT_EN_DSI0 BIT(0)
#define MT8365_DSI0_SEL_IN_DITHER 0x1
#define MT8365_RDMA0_SEL_IN_OVL0 0x0
#define MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 0x0
@@ -27,56 +28,37 @@
#define MT8365_DPI0_SEL_IN_RDMA1 0x0
static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8365_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
- MT8365_OVL0_MOUT_PATH0_SEL, MT8365_OVL0_MOUT_PATH0_SEL
- },
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8365_DISP_REG_CONFIG_DISP_RDMA0_SEL_IN,
- MT8365_RDMA0_SEL_IN_OVL0, MT8365_RDMA0_SEL_IN_OVL0
- },
- {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8365_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL,
- MT8365_RDMA0_SOUT_COLOR0, MT8365_RDMA0_SOUT_COLOR0
- },
- {
- DDP_COMPONENT_COLOR0, DDP_COMPONENT_CCORR,
- MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
- MT8365_DISP_COLOR_SEL_IN_COLOR0,MT8365_DISP_COLOR_SEL_IN_COLOR0
- },
- {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8365_DISP_REG_CONFIG_DISP_DITHER0_MOUT_EN,
- MT8365_DITHER_MOUT_EN_DSI0, MT8365_DITHER_MOUT_EN_DSI0
- },
- {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN,
- MT8365_DSI0_SEL_IN_DITHER, MT8365_DSI0_SEL_IN_DITHER
- },
- {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN,
- MT8365_RDMA0_RSZ0_SEL_IN_RDMA0, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00,
- MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK, MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN,
- MT8365_DPI0_SEL_IN_RDMA1, MT8365_DPI0_SEL_IN_RDMA1
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL,
- MT8365_RDMA1_SOUT_DPI0, MT8365_RDMA1_SOUT_DPI0
- },
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8365_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_OVL0_MOUT_PATH0_SEL),
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA0_SEL_IN_OVL0),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA0_SOUT_COLOR0),
+ MMSYS_ROUTE(COLOR0, CCORR,
+ MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_DISP_COLOR_SEL_IN_COLOR0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8365_DISP_REG_CONFIG_DISP_DITHER0_MOUT_EN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_DITHER_MOUT_EN_DSI0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_DSI0_SEL_IN_DITHER),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00,
+ MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK,
+ MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_DPI0_SEL_IN_RDMA1),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA1_SOUT_DPI0),
};
#endif /* __SOC_MEDIATEK_MT8365_MMSYS_H */
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index b0cd071c4719..455221e8de24 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -12,9 +12,13 @@
#define CMDQ_WRITE_ENABLE_MASK BIT(0)
#define CMDQ_POLL_ENABLE_MASK BIT(0)
+/* dedicate the last GPR_R15 to assign the register address to be poll */
+#define CMDQ_POLL_ADDR_GPR (15)
#define CMDQ_EOC_IRQ_EN BIT(0)
+#define CMDQ_IMMEDIATE_VALUE 0
#define CMDQ_REG_TYPE 1
-#define CMDQ_JUMP_RELATIVE 1
+#define CMDQ_JUMP_RELATIVE 0
+#define CMDQ_JUMP_ABSOLUTE 1
struct cmdq_instruction {
union {
@@ -42,6 +46,16 @@ struct cmdq_instruction {
u8 op;
};
+static inline u8 cmdq_operand_get_type(struct cmdq_operand *op)
+{
+ return op->reg ? CMDQ_REG_TYPE : CMDQ_IMMEDIATE_VALUE;
+}
+
+static inline u16 cmdq_operand_get_idx_value(struct cmdq_operand *op)
+{
+ return op->reg ? op->idx : op->value;
+}
+
int cmdq_dev_get_client_reg(struct device *dev,
struct cmdq_client_reg *client_reg, int idx)
{
@@ -55,7 +69,7 @@ int cmdq_dev_get_client_reg(struct device *dev,
"mediatek,gce-client-reg",
3, idx, &spec);
if (err < 0) {
- dev_err(dev,
+ dev_warn(dev,
"error %d can't parse gce-client-reg property (%d)",
err, idx);
@@ -105,22 +119,16 @@ void cmdq_mbox_destroy(struct cmdq_client *client)
}
EXPORT_SYMBOL(cmdq_mbox_destroy);
-struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
+int cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, size_t size)
{
- struct cmdq_pkt *pkt;
struct device *dev;
dma_addr_t dma_addr;
- pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
- return ERR_PTR(-ENOMEM);
pkt->va_base = kzalloc(size, GFP_KERNEL);
- if (!pkt->va_base) {
- kfree(pkt);
- return ERR_PTR(-ENOMEM);
- }
+ if (!pkt->va_base)
+ return -ENOMEM;
+
pkt->buf_size = size;
- pkt->cl = (void *)client;
dev = client->chan->mbox->dev;
dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
@@ -128,24 +136,20 @@ struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
kfree(pkt->va_base);
- kfree(pkt);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
pkt->pa_base = dma_addr;
- return pkt;
+ return 0;
}
EXPORT_SYMBOL(cmdq_pkt_create);
-void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
+void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt)
{
- struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
-
dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
DMA_TO_DEVICE);
kfree(pkt->va_base);
- kfree(pkt);
}
EXPORT_SYMBOL(cmdq_pkt_destroy);
@@ -176,15 +180,23 @@ static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
return 0;
}
-int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
+static int cmdq_pkt_mask(struct cmdq_pkt *pkt, u32 mask)
{
- struct cmdq_instruction inst;
-
- inst.op = CMDQ_CODE_WRITE;
- inst.value = value;
- inst.offset = offset;
- inst.subsys = subsys;
+ struct cmdq_instruction inst = {
+ .op = CMDQ_CODE_MASK,
+ .mask = ~mask
+ };
+ return cmdq_pkt_append_command(pkt, inst);
+}
+int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
+{
+ struct cmdq_instruction inst = {
+ .op = CMDQ_CODE_WRITE,
+ .value = value,
+ .offset = offset,
+ .subsys = subsys
+ };
return cmdq_pkt_append_command(pkt, inst);
}
EXPORT_SYMBOL(cmdq_pkt_write);
@@ -192,36 +204,30 @@ EXPORT_SYMBOL(cmdq_pkt_write);
int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask)
{
- struct cmdq_instruction inst = { {0} };
u16 offset_mask = offset;
int err;
- if (mask != 0xffffffff) {
- inst.op = CMDQ_CODE_MASK;
- inst.mask = ~mask;
- err = cmdq_pkt_append_command(pkt, inst);
+ if (mask != GENMASK(31, 0)) {
+ err = cmdq_pkt_mask(pkt, mask);
if (err < 0)
return err;
offset_mask |= CMDQ_WRITE_ENABLE_MASK;
}
- err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
-
- return err;
+ return cmdq_pkt_write(pkt, subsys, offset_mask, value);
}
EXPORT_SYMBOL(cmdq_pkt_write_mask);
int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, u16 addr_low,
u16 reg_idx)
{
- struct cmdq_instruction inst = {};
-
- inst.op = CMDQ_CODE_READ_S;
- inst.dst_t = CMDQ_REG_TYPE;
- inst.sop = high_addr_reg_idx;
- inst.reg_dst = reg_idx;
- inst.src_reg = addr_low;
-
+ struct cmdq_instruction inst = {
+ .op = CMDQ_CODE_READ_S,
+ .dst_t = CMDQ_REG_TYPE,
+ .sop = high_addr_reg_idx,
+ .reg_dst = reg_idx,
+ .src_reg = addr_low
+ };
return cmdq_pkt_append_command(pkt, inst);
}
EXPORT_SYMBOL(cmdq_pkt_read_s);
@@ -229,14 +235,13 @@ EXPORT_SYMBOL(cmdq_pkt_read_s);
int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
u16 addr_low, u16 src_reg_idx)
{
- struct cmdq_instruction inst = {};
-
- inst.op = CMDQ_CODE_WRITE_S;
- inst.src_t = CMDQ_REG_TYPE;
- inst.sop = high_addr_reg_idx;
- inst.offset = addr_low;
- inst.src_reg = src_reg_idx;
-
+ struct cmdq_instruction inst = {
+ .op = CMDQ_CODE_WRITE_S,
+ .src_t = CMDQ_REG_TYPE,
+ .sop = high_addr_reg_idx,
+ .offset = addr_low,
+ .src_reg = src_reg_idx
+ };
return cmdq_pkt_append_command(pkt, inst);
}
EXPORT_SYMBOL(cmdq_pkt_write_s);
@@ -244,22 +249,19 @@ EXPORT_SYMBOL(cmdq_pkt_write_s);
int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
u16 addr_low, u16 src_reg_idx, u32 mask)
{
- struct cmdq_instruction inst = {};
+ struct cmdq_instruction inst = {
+ .op = CMDQ_CODE_WRITE_S_MASK,
+ .src_t = CMDQ_REG_TYPE,
+ .sop = high_addr_reg_idx,
+ .offset = addr_low,
+ .src_reg = src_reg_idx,
+ };
int err;
- inst.op = CMDQ_CODE_MASK;
- inst.mask = ~mask;
- err = cmdq_pkt_append_command(pkt, inst);
+ err = cmdq_pkt_mask(pkt, mask);
if (err < 0)
return err;
- inst.mask = 0;
- inst.op = CMDQ_CODE_WRITE_S_MASK;
- inst.src_t = CMDQ_REG_TYPE;
- inst.sop = high_addr_reg_idx;
- inst.offset = addr_low;
- inst.src_reg = src_reg_idx;
-
return cmdq_pkt_append_command(pkt, inst);
}
EXPORT_SYMBOL(cmdq_pkt_write_s_mask);
@@ -267,13 +269,12 @@ EXPORT_SYMBOL(cmdq_pkt_write_s_mask);
int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
u16 addr_low, u32 value)
{
- struct cmdq_instruction inst = {};
-
- inst.op = CMDQ_CODE_WRITE_S;
- inst.sop = high_addr_reg_idx;
- inst.offset = addr_low;
- inst.value = value;
-
+ struct cmdq_instruction inst = {
+ .op = CMDQ_CODE_WRITE_S,
+ .sop = high_addr_reg_idx,
+ .offset = addr_low,
+ .value = value
+ };
return cmdq_pkt_append_command(pkt, inst);
}
EXPORT_SYMBOL(cmdq_pkt_write_s_value);
@@ -281,50 +282,89 @@ EXPORT_SYMBOL(cmdq_pkt_write_s_value);
int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
u16 addr_low, u32 value, u32 mask)
{
- struct cmdq_instruction inst = {};
+ struct cmdq_instruction inst = {
+ .op = CMDQ_CODE_WRITE_S_MASK,
+ .sop = high_addr_reg_idx,
+ .offset = addr_low,
+ .value = value
+ };
int err;
- inst.op = CMDQ_CODE_MASK;
- inst.mask = ~mask;
- err = cmdq_pkt_append_command(pkt, inst);
+ err = cmdq_pkt_mask(pkt, mask);
if (err < 0)
return err;
- inst.op = CMDQ_CODE_WRITE_S_MASK;
- inst.sop = high_addr_reg_idx;
- inst.offset = addr_low;
- inst.value = value;
-
return cmdq_pkt_append_command(pkt, inst);
}
EXPORT_SYMBOL(cmdq_pkt_write_s_mask_value);
+int cmdq_pkt_mem_move(struct cmdq_pkt *pkt, dma_addr_t src_addr, dma_addr_t dst_addr)
+{
+ const u16 high_addr_reg_idx = CMDQ_THR_SPR_IDX0;
+ const u16 value_reg_idx = CMDQ_THR_SPR_IDX1;
+ int ret;
+
+ /* read the value of src_addr into high_addr_reg_idx */
+ ret = cmdq_pkt_assign(pkt, high_addr_reg_idx, CMDQ_ADDR_HIGH(src_addr));
+ if (ret < 0)
+ return ret;
+ ret = cmdq_pkt_read_s(pkt, high_addr_reg_idx, CMDQ_ADDR_LOW(src_addr), value_reg_idx);
+ if (ret < 0)
+ return ret;
+
+ /* write the value of value_reg_idx into dst_addr */
+ ret = cmdq_pkt_assign(pkt, high_addr_reg_idx, CMDQ_ADDR_HIGH(dst_addr));
+ if (ret < 0)
+ return ret;
+ ret = cmdq_pkt_write_s(pkt, high_addr_reg_idx, CMDQ_ADDR_LOW(dst_addr), value_reg_idx);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(cmdq_pkt_mem_move);
+
int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
{
- struct cmdq_instruction inst = { {0} };
u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0;
+ struct cmdq_instruction inst = {
+ .op = CMDQ_CODE_WFE,
+ .value = CMDQ_WFE_OPTION | clear_option,
+ .event = event
+ };
if (event >= CMDQ_MAX_EVENT)
return -EINVAL;
- inst.op = CMDQ_CODE_WFE;
- inst.value = CMDQ_WFE_OPTION | clear_option;
- inst.event = event;
-
return cmdq_pkt_append_command(pkt, inst);
}
EXPORT_SYMBOL(cmdq_pkt_wfe);
-int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
+int cmdq_pkt_acquire_event(struct cmdq_pkt *pkt, u16 event)
{
- struct cmdq_instruction inst = { {0} };
+ struct cmdq_instruction inst = {
+ .op = CMDQ_CODE_WFE,
+ .value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE | CMDQ_WFE_WAIT,
+ .event = event
+ };
if (event >= CMDQ_MAX_EVENT)
return -EINVAL;
- inst.op = CMDQ_CODE_WFE;
- inst.value = CMDQ_WFE_UPDATE;
- inst.event = event;
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_acquire_event);
+
+int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
+{
+ struct cmdq_instruction inst = {
+ .op = CMDQ_CODE_WFE,
+ .value = CMDQ_WFE_UPDATE,
+ .event = event
+ };
+
+ if (event >= CMDQ_MAX_EVENT)
+ return -EINVAL;
return cmdq_pkt_append_command(pkt, inst);
}
@@ -332,15 +372,15 @@ EXPORT_SYMBOL(cmdq_pkt_clear_event);
int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event)
{
- struct cmdq_instruction inst = {};
+ struct cmdq_instruction inst = {
+ .op = CMDQ_CODE_WFE,
+ .value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE,
+ .event = event
+ };
if (event >= CMDQ_MAX_EVENT)
return -EINVAL;
- inst.op = CMDQ_CODE_WFE;
- inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE;
- inst.event = event;
-
return cmdq_pkt_append_command(pkt, inst);
}
EXPORT_SYMBOL(cmdq_pkt_set_event);
@@ -348,97 +388,141 @@ EXPORT_SYMBOL(cmdq_pkt_set_event);
int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value)
{
- struct cmdq_instruction inst = { {0} };
- int err;
-
- inst.op = CMDQ_CODE_POLL;
- inst.value = value;
- inst.offset = offset;
- inst.subsys = subsys;
- err = cmdq_pkt_append_command(pkt, inst);
-
- return err;
+ struct cmdq_instruction inst = {
+ .op = CMDQ_CODE_POLL,
+ .value = value,
+ .offset = offset,
+ .subsys = subsys
+ };
+ return cmdq_pkt_append_command(pkt, inst);
}
EXPORT_SYMBOL(cmdq_pkt_poll);
int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask)
{
- struct cmdq_instruction inst = { {0} };
int err;
- inst.op = CMDQ_CODE_MASK;
- inst.mask = ~mask;
- err = cmdq_pkt_append_command(pkt, inst);
+ err = cmdq_pkt_mask(pkt, mask);
if (err < 0)
return err;
offset = offset | CMDQ_POLL_ENABLE_MASK;
- err = cmdq_pkt_poll(pkt, subsys, offset, value);
-
- return err;
+ return cmdq_pkt_poll(pkt, subsys, offset, value);
}
EXPORT_SYMBOL(cmdq_pkt_poll_mask);
-int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
+int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mask)
{
- struct cmdq_instruction inst = {};
+ struct cmdq_instruction inst = { {0} };
+ u8 use_mask = 0;
+ int ret;
+
+ /*
+ * Append an MASK instruction to set the mask for following POLL instruction
+ * which enables use_mask bit.
+ */
+ if (mask != GENMASK(31, 0)) {
+ ret = cmdq_pkt_mask(pkt, mask);
+ if (ret < 0)
+ return ret;
+ use_mask = CMDQ_POLL_ENABLE_MASK;
+ }
- inst.op = CMDQ_CODE_LOGIC;
+ /*
+ * POLL is an legacy operation in GCE and it does not support SPR and CMDQ_CODE_LOGIC,
+ * so it can not use cmdq_pkt_assign to keep polling register address to SPR.
+ * If user wants to poll a register address which doesn't have a subsys id,
+ * user needs to use GPR and CMDQ_CODE_MASK to move polling register address to GPR.
+ */
+ inst.op = CMDQ_CODE_MASK;
inst.dst_t = CMDQ_REG_TYPE;
- inst.reg_dst = reg_idx;
+ inst.sop = CMDQ_POLL_ADDR_GPR;
+ inst.value = addr;
+ ret = cmdq_pkt_append_command(pkt, inst);
+ if (ret < 0)
+ return ret;
+
+ /* Append POLL instruction to poll the register address assign to GPR previously. */
+ inst.op = CMDQ_CODE_POLL;
+ inst.dst_t = CMDQ_REG_TYPE;
+ inst.sop = CMDQ_POLL_ADDR_GPR;
+ inst.offset = use_mask;
inst.value = value;
- return cmdq_pkt_append_command(pkt, inst);
+ ret = cmdq_pkt_append_command(pkt, inst);
+ if (ret < 0)
+ return ret;
+
+ return 0;
}
-EXPORT_SYMBOL(cmdq_pkt_assign);
+EXPORT_SYMBOL(cmdq_pkt_poll_addr);
-int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
+int cmdq_pkt_logic_command(struct cmdq_pkt *pkt, u16 result_reg_idx,
+ struct cmdq_operand *left_operand,
+ enum cmdq_logic_op s_op,
+ struct cmdq_operand *right_operand)
{
- struct cmdq_instruction inst = {};
+ struct cmdq_instruction inst;
+
+ if (!left_operand || !right_operand || s_op >= CMDQ_LOGIC_MAX)
+ return -EINVAL;
+
+ inst.value = 0;
+ inst.op = CMDQ_CODE_LOGIC;
+ inst.dst_t = CMDQ_REG_TYPE;
+ inst.src_t = cmdq_operand_get_type(left_operand);
+ inst.arg_c_t = cmdq_operand_get_type(right_operand);
+ inst.sop = s_op;
+ inst.reg_dst = result_reg_idx;
+ inst.src_reg = cmdq_operand_get_idx_value(left_operand);
+ inst.arg_c = cmdq_operand_get_idx_value(right_operand);
- inst.op = CMDQ_CODE_JUMP;
- inst.offset = CMDQ_JUMP_RELATIVE;
- inst.value = addr >>
- cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
return cmdq_pkt_append_command(pkt, inst);
}
-EXPORT_SYMBOL(cmdq_pkt_jump);
+EXPORT_SYMBOL(cmdq_pkt_logic_command);
-int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
+int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
{
- struct cmdq_instruction inst = { {0} };
- int err;
-
- /* insert EOC and generate IRQ for each command iteration */
- inst.op = CMDQ_CODE_EOC;
- inst.value = CMDQ_EOC_IRQ_EN;
- err = cmdq_pkt_append_command(pkt, inst);
- if (err < 0)
- return err;
-
- /* JUMP to end */
- inst.op = CMDQ_CODE_JUMP;
- inst.value = CMDQ_JUMP_PASS >>
- cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
- err = cmdq_pkt_append_command(pkt, inst);
-
- return err;
+ struct cmdq_instruction inst = {
+ .op = CMDQ_CODE_LOGIC,
+ .dst_t = CMDQ_REG_TYPE,
+ .reg_dst = reg_idx,
+ .value = value
+ };
+ return cmdq_pkt_append_command(pkt, inst);
}
-EXPORT_SYMBOL(cmdq_pkt_finalize);
+EXPORT_SYMBOL(cmdq_pkt_assign);
-int cmdq_pkt_flush_async(struct cmdq_pkt *pkt)
+int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa)
{
- int err;
- struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
+ struct cmdq_instruction inst = {
+ .op = CMDQ_CODE_JUMP,
+ .offset = CMDQ_JUMP_ABSOLUTE,
+ .value = addr >> shift_pa
+ };
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_jump_abs);
- err = mbox_send_message(client->chan, pkt);
- if (err < 0)
- return err;
- /* We can send next packet immediately, so just call txdone. */
- mbox_client_txdone(client->chan, 0);
+int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa)
+{
+ struct cmdq_instruction inst = {
+ .op = CMDQ_CODE_JUMP,
+ .value = (u32)offset >> shift_pa
+ };
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_jump_rel);
- return 0;
+int cmdq_pkt_eoc(struct cmdq_pkt *pkt)
+{
+ struct cmdq_instruction inst = {
+ .op = CMDQ_CODE_EOC,
+ .value = CMDQ_EOC_IRQ_EN
+ };
+ return cmdq_pkt_append_command(pkt, inst);
}
-EXPORT_SYMBOL(cmdq_pkt_flush_async);
+EXPORT_SYMBOL(cmdq_pkt_eoc);
+MODULE_DESCRIPTION("MediaTek Command Queue (CMDQ) driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c
index b28feb967540..f54c966138b5 100644
--- a/drivers/soc/mediatek/mtk-devapc.c
+++ b/drivers/soc/mediatek/mtk-devapc.c
@@ -273,32 +273,39 @@ static int mtk_devapc_probe(struct platform_device *pdev)
return -EINVAL;
devapc_irq = irq_of_parse_and_map(node, 0);
- if (!devapc_irq)
- return -EINVAL;
+ if (!devapc_irq) {
+ ret = -EINVAL;
+ goto err;
+ }
ctx->infra_clk = devm_clk_get_enabled(&pdev->dev, "devapc-infra-clock");
- if (IS_ERR(ctx->infra_clk))
- return -EINVAL;
+ if (IS_ERR(ctx->infra_clk)) {
+ ret = -EINVAL;
+ goto err;
+ }
ret = devm_request_irq(&pdev->dev, devapc_irq, devapc_violation_irq,
IRQF_TRIGGER_NONE, "devapc", ctx);
if (ret)
- return ret;
+ goto err;
platform_set_drvdata(pdev, ctx);
start_devapc(ctx);
return 0;
+
+err:
+ iounmap(ctx->infra_base);
+ return ret;
}
-static int mtk_devapc_remove(struct platform_device *pdev)
+static void mtk_devapc_remove(struct platform_device *pdev)
{
struct mtk_devapc_context *ctx = platform_get_drvdata(pdev);
stop_devapc(ctx);
-
- return 0;
+ iounmap(ctx->infra_base);
}
static struct platform_driver mtk_devapc_driver = {
diff --git a/drivers/soc/mediatek/mtk-dvfsrc.c b/drivers/soc/mediatek/mtk-dvfsrc.c
new file mode 100644
index 000000000000..41add5636b03
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-dvfsrc.c
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 MediaTek Inc.
+ * Copyright (c) 2024 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/soc/mediatek/dvfsrc.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
+
+/* DVFSRC_LEVEL */
+#define DVFSRC_V1_LEVEL_TARGET_LEVEL GENMASK(15, 0)
+#define DVFSRC_TGT_LEVEL_IDLE 0x00
+#define DVFSRC_V1_LEVEL_CURRENT_LEVEL GENMASK(31, 16)
+
+/* DVFSRC_SW_REQ, DVFSRC_SW_REQ2 */
+#define DVFSRC_V1_SW_REQ2_DRAM_LEVEL GENMASK(1, 0)
+#define DVFSRC_V1_SW_REQ2_VCORE_LEVEL GENMASK(3, 2)
+
+#define DVFSRC_V2_SW_REQ_DRAM_LEVEL GENMASK(3, 0)
+#define DVFSRC_V2_SW_REQ_VCORE_LEVEL GENMASK(6, 4)
+
+/* DVFSRC_VCORE */
+#define DVFSRC_V2_VCORE_REQ_VSCP_LEVEL GENMASK(14, 12)
+
+#define DVFSRC_POLL_TIMEOUT_US 1000
+#define STARTUP_TIME_US 1
+
+#define MTK_SIP_DVFSRC_INIT 0x0
+#define MTK_SIP_DVFSRC_START 0x1
+
+struct dvfsrc_bw_constraints {
+ u16 max_dram_nom_bw;
+ u16 max_dram_peak_bw;
+ u16 max_dram_hrt_bw;
+};
+
+struct dvfsrc_opp {
+ u32 vcore_opp;
+ u32 dram_opp;
+};
+
+struct dvfsrc_opp_desc {
+ const struct dvfsrc_opp *opps;
+ u32 num_opp;
+};
+
+struct dvfsrc_soc_data;
+struct mtk_dvfsrc {
+ struct device *dev;
+ struct platform_device *icc;
+ struct platform_device *regulator;
+ const struct dvfsrc_soc_data *dvd;
+ const struct dvfsrc_opp_desc *curr_opps;
+ void __iomem *regs;
+ int dram_type;
+};
+
+struct dvfsrc_soc_data {
+ const int *regs;
+ const struct dvfsrc_opp_desc *opps_desc;
+ u32 (*get_target_level)(struct mtk_dvfsrc *dvfsrc);
+ u32 (*get_current_level)(struct mtk_dvfsrc *dvfsrc);
+ u32 (*get_vcore_level)(struct mtk_dvfsrc *dvfsrc);
+ u32 (*get_vscp_level)(struct mtk_dvfsrc *dvfsrc);
+ void (*set_dram_bw)(struct mtk_dvfsrc *dvfsrc, u64 bw);
+ void (*set_dram_peak_bw)(struct mtk_dvfsrc *dvfsrc, u64 bw);
+ void (*set_dram_hrt_bw)(struct mtk_dvfsrc *dvfsrc, u64 bw);
+ void (*set_opp_level)(struct mtk_dvfsrc *dvfsrc, u32 level);
+ void (*set_vcore_level)(struct mtk_dvfsrc *dvfsrc, u32 level);
+ void (*set_vscp_level)(struct mtk_dvfsrc *dvfsrc, u32 level);
+ int (*wait_for_opp_level)(struct mtk_dvfsrc *dvfsrc, u32 level);
+ int (*wait_for_vcore_level)(struct mtk_dvfsrc *dvfsrc, u32 level);
+ const struct dvfsrc_bw_constraints *bw_constraints;
+};
+
+static u32 dvfsrc_readl(struct mtk_dvfsrc *dvfs, u32 offset)
+{
+ return readl(dvfs->regs + dvfs->dvd->regs[offset]);
+}
+
+static void dvfsrc_writel(struct mtk_dvfsrc *dvfs, u32 offset, u32 val)
+{
+ writel(val, dvfs->regs + dvfs->dvd->regs[offset]);
+}
+
+enum dvfsrc_regs {
+ DVFSRC_SW_REQ,
+ DVFSRC_SW_REQ2,
+ DVFSRC_LEVEL,
+ DVFSRC_TARGET_LEVEL,
+ DVFSRC_SW_BW,
+ DVFSRC_SW_PEAK_BW,
+ DVFSRC_SW_HRT_BW,
+ DVFSRC_VCORE,
+ DVFSRC_REGS_MAX,
+};
+
+static const int dvfsrc_mt8183_regs[] = {
+ [DVFSRC_SW_REQ] = 0x4,
+ [DVFSRC_SW_REQ2] = 0x8,
+ [DVFSRC_LEVEL] = 0xDC,
+ [DVFSRC_SW_BW] = 0x160,
+};
+
+static const int dvfsrc_mt8195_regs[] = {
+ [DVFSRC_SW_REQ] = 0xc,
+ [DVFSRC_VCORE] = 0x6c,
+ [DVFSRC_SW_PEAK_BW] = 0x278,
+ [DVFSRC_SW_BW] = 0x26c,
+ [DVFSRC_SW_HRT_BW] = 0x290,
+ [DVFSRC_LEVEL] = 0xd44,
+ [DVFSRC_TARGET_LEVEL] = 0xd48,
+};
+
+static const struct dvfsrc_opp *dvfsrc_get_current_opp(struct mtk_dvfsrc *dvfsrc)
+{
+ u32 level = dvfsrc->dvd->get_current_level(dvfsrc);
+
+ return &dvfsrc->curr_opps->opps[level];
+}
+
+static bool dvfsrc_is_idle(struct mtk_dvfsrc *dvfsrc)
+{
+ if (!dvfsrc->dvd->get_target_level)
+ return true;
+
+ return dvfsrc->dvd->get_target_level(dvfsrc) == DVFSRC_TGT_LEVEL_IDLE;
+}
+
+static int dvfsrc_wait_for_vcore_level_v1(struct mtk_dvfsrc *dvfsrc, u32 level)
+{
+ const struct dvfsrc_opp *curr;
+
+ return readx_poll_timeout_atomic(dvfsrc_get_current_opp, dvfsrc, curr,
+ curr->vcore_opp >= level, STARTUP_TIME_US,
+ DVFSRC_POLL_TIMEOUT_US);
+}
+
+static int dvfsrc_wait_for_opp_level_v1(struct mtk_dvfsrc *dvfsrc, u32 level)
+{
+ const struct dvfsrc_opp *target, *curr;
+ int ret;
+
+ target = &dvfsrc->curr_opps->opps[level];
+ ret = readx_poll_timeout_atomic(dvfsrc_get_current_opp, dvfsrc, curr,
+ curr->dram_opp >= target->dram_opp &&
+ curr->vcore_opp >= target->vcore_opp,
+ STARTUP_TIME_US, DVFSRC_POLL_TIMEOUT_US);
+ if (ret < 0) {
+ dev_warn(dvfsrc->dev,
+ "timeout! target OPP: %u, dram: %d, vcore: %d\n", level,
+ curr->dram_opp, curr->vcore_opp);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dvfsrc_wait_for_opp_level_v2(struct mtk_dvfsrc *dvfsrc, u32 level)
+{
+ const struct dvfsrc_opp *target, *curr;
+ int ret;
+
+ target = &dvfsrc->curr_opps->opps[level];
+ ret = readx_poll_timeout_atomic(dvfsrc_get_current_opp, dvfsrc, curr,
+ curr->dram_opp >= target->dram_opp &&
+ curr->vcore_opp >= target->vcore_opp,
+ STARTUP_TIME_US, DVFSRC_POLL_TIMEOUT_US);
+ if (ret < 0) {
+ dev_warn(dvfsrc->dev,
+ "timeout! target OPP: %u, dram: %d\n", level, curr->dram_opp);
+ return ret;
+ }
+
+ return 0;
+}
+
+static u32 dvfsrc_get_target_level_v1(struct mtk_dvfsrc *dvfsrc)
+{
+ u32 val = dvfsrc_readl(dvfsrc, DVFSRC_LEVEL);
+
+ return FIELD_GET(DVFSRC_V1_LEVEL_TARGET_LEVEL, val);
+}
+
+static u32 dvfsrc_get_current_level_v1(struct mtk_dvfsrc *dvfsrc)
+{
+ u32 val = dvfsrc_readl(dvfsrc, DVFSRC_LEVEL);
+ u32 current_level = FIELD_GET(DVFSRC_V1_LEVEL_CURRENT_LEVEL, val);
+
+ return ffs(current_level) - 1;
+}
+
+static u32 dvfsrc_get_target_level_v2(struct mtk_dvfsrc *dvfsrc)
+{
+ return dvfsrc_readl(dvfsrc, DVFSRC_TARGET_LEVEL);
+}
+
+static u32 dvfsrc_get_current_level_v2(struct mtk_dvfsrc *dvfsrc)
+{
+ u32 val = dvfsrc_readl(dvfsrc, DVFSRC_LEVEL);
+ u32 level = ffs(val);
+
+ /* Valid levels */
+ if (level < dvfsrc->curr_opps->num_opp)
+ return dvfsrc->curr_opps->num_opp - level;
+
+ /* Zero for level 0 or invalid level */
+ return 0;
+}
+
+static u32 dvfsrc_get_vcore_level_v1(struct mtk_dvfsrc *dvfsrc)
+{
+ u32 val = dvfsrc_readl(dvfsrc, DVFSRC_SW_REQ2);
+
+ return FIELD_GET(DVFSRC_V1_SW_REQ2_VCORE_LEVEL, val);
+}
+
+static void dvfsrc_set_vcore_level_v1(struct mtk_dvfsrc *dvfsrc, u32 level)
+{
+ u32 val = dvfsrc_readl(dvfsrc, DVFSRC_SW_REQ2);
+
+ val &= ~DVFSRC_V1_SW_REQ2_VCORE_LEVEL;
+ val |= FIELD_PREP(DVFSRC_V1_SW_REQ2_VCORE_LEVEL, level);
+
+ dvfsrc_writel(dvfsrc, DVFSRC_SW_REQ2, val);
+}
+
+static u32 dvfsrc_get_vcore_level_v2(struct mtk_dvfsrc *dvfsrc)
+{
+ u32 val = dvfsrc_readl(dvfsrc, DVFSRC_SW_REQ);
+
+ return FIELD_GET(DVFSRC_V2_SW_REQ_VCORE_LEVEL, val);
+}
+
+static void dvfsrc_set_vcore_level_v2(struct mtk_dvfsrc *dvfsrc, u32 level)
+{
+ u32 val = dvfsrc_readl(dvfsrc, DVFSRC_SW_REQ);
+
+ val &= ~DVFSRC_V2_SW_REQ_VCORE_LEVEL;
+ val |= FIELD_PREP(DVFSRC_V2_SW_REQ_VCORE_LEVEL, level);
+
+ dvfsrc_writel(dvfsrc, DVFSRC_SW_REQ, val);
+}
+
+static u32 dvfsrc_get_vscp_level_v2(struct mtk_dvfsrc *dvfsrc)
+{
+ u32 val = dvfsrc_readl(dvfsrc, DVFSRC_VCORE);
+
+ return FIELD_GET(DVFSRC_V2_VCORE_REQ_VSCP_LEVEL, val);
+}
+
+static void dvfsrc_set_vscp_level_v2(struct mtk_dvfsrc *dvfsrc, u32 level)
+{
+ u32 val = dvfsrc_readl(dvfsrc, DVFSRC_VCORE);
+
+ val &= ~DVFSRC_V2_VCORE_REQ_VSCP_LEVEL;
+ val |= FIELD_PREP(DVFSRC_V2_VCORE_REQ_VSCP_LEVEL, level);
+
+ dvfsrc_writel(dvfsrc, DVFSRC_VCORE, val);
+}
+
+static void __dvfsrc_set_dram_bw_v1(struct mtk_dvfsrc *dvfsrc, u32 reg,
+ u16 max_bw, u16 min_bw, u64 bw)
+{
+ u32 new_bw = (u32)div_u64(bw, 100 * 1000);
+
+ /* If bw constraints (in mbps) are defined make sure to respect them */
+ if (max_bw)
+ new_bw = min(new_bw, max_bw);
+ if (min_bw && new_bw > 0)
+ new_bw = max(new_bw, min_bw);
+
+ dvfsrc_writel(dvfsrc, reg, new_bw);
+}
+
+static void dvfsrc_set_dram_bw_v1(struct mtk_dvfsrc *dvfsrc, u64 bw)
+{
+ u64 max_bw = dvfsrc->dvd->bw_constraints->max_dram_nom_bw;
+
+ __dvfsrc_set_dram_bw_v1(dvfsrc, DVFSRC_SW_BW, max_bw, 0, bw);
+};
+
+static void dvfsrc_set_dram_peak_bw_v1(struct mtk_dvfsrc *dvfsrc, u64 bw)
+{
+ u64 max_bw = dvfsrc->dvd->bw_constraints->max_dram_peak_bw;
+
+ __dvfsrc_set_dram_bw_v1(dvfsrc, DVFSRC_SW_PEAK_BW, max_bw, 0, bw);
+}
+
+static void dvfsrc_set_dram_hrt_bw_v1(struct mtk_dvfsrc *dvfsrc, u64 bw)
+{
+ u64 max_bw = dvfsrc->dvd->bw_constraints->max_dram_hrt_bw;
+
+ __dvfsrc_set_dram_bw_v1(dvfsrc, DVFSRC_SW_HRT_BW, max_bw, 0, bw);
+}
+
+static void dvfsrc_set_opp_level_v1(struct mtk_dvfsrc *dvfsrc, u32 level)
+{
+ const struct dvfsrc_opp *opp = &dvfsrc->curr_opps->opps[level];
+ u32 val;
+
+ /* Translate Pstate to DVFSRC level and set it to DVFSRC HW */
+ val = FIELD_PREP(DVFSRC_V1_SW_REQ2_DRAM_LEVEL, opp->dram_opp);
+ val |= FIELD_PREP(DVFSRC_V1_SW_REQ2_VCORE_LEVEL, opp->vcore_opp);
+
+ dev_dbg(dvfsrc->dev, "vcore_opp: %d, dram_opp: %d\n", opp->vcore_opp, opp->dram_opp);
+ dvfsrc_writel(dvfsrc, DVFSRC_SW_REQ, val);
+}
+
+int mtk_dvfsrc_send_request(const struct device *dev, u32 cmd, u64 data)
+{
+ struct mtk_dvfsrc *dvfsrc = dev_get_drvdata(dev);
+ bool state;
+ int ret;
+
+ dev_dbg(dvfsrc->dev, "cmd: %d, data: %llu\n", cmd, data);
+
+ switch (cmd) {
+ case MTK_DVFSRC_CMD_BW:
+ dvfsrc->dvd->set_dram_bw(dvfsrc, data);
+ return 0;
+ case MTK_DVFSRC_CMD_HRT_BW:
+ if (dvfsrc->dvd->set_dram_hrt_bw)
+ dvfsrc->dvd->set_dram_hrt_bw(dvfsrc, data);
+ return 0;
+ case MTK_DVFSRC_CMD_PEAK_BW:
+ if (dvfsrc->dvd->set_dram_peak_bw)
+ dvfsrc->dvd->set_dram_peak_bw(dvfsrc, data);
+ return 0;
+ case MTK_DVFSRC_CMD_OPP:
+ if (!dvfsrc->dvd->set_opp_level)
+ return 0;
+
+ dvfsrc->dvd->set_opp_level(dvfsrc, data);
+ break;
+ case MTK_DVFSRC_CMD_VCORE_LEVEL:
+ dvfsrc->dvd->set_vcore_level(dvfsrc, data);
+ break;
+ case MTK_DVFSRC_CMD_VSCP_LEVEL:
+ if (!dvfsrc->dvd->set_vscp_level)
+ return 0;
+
+ dvfsrc->dvd->set_vscp_level(dvfsrc, data);
+ break;
+ default:
+ dev_err(dvfsrc->dev, "unknown command: %d\n", cmd);
+ return -EOPNOTSUPP;
+ }
+
+ /* DVFSRC needs at least 2T(~196ns) to handle a request */
+ udelay(STARTUP_TIME_US);
+
+ ret = readx_poll_timeout_atomic(dvfsrc_is_idle, dvfsrc, state, state,
+ STARTUP_TIME_US, DVFSRC_POLL_TIMEOUT_US);
+ if (ret < 0) {
+ dev_warn(dvfsrc->dev,
+ "%d: idle timeout, data: %llu, last: %d -> %d\n", cmd, data,
+ dvfsrc->dvd->get_current_level(dvfsrc),
+ dvfsrc->dvd->get_target_level(dvfsrc));
+ return ret;
+ }
+
+ if (cmd == MTK_DVFSRC_CMD_OPP)
+ ret = dvfsrc->dvd->wait_for_opp_level(dvfsrc, data);
+ else
+ ret = dvfsrc->dvd->wait_for_vcore_level(dvfsrc, data);
+
+ if (ret < 0) {
+ dev_warn(dvfsrc->dev,
+ "%d: wait timeout, data: %llu, last: %d -> %d\n",
+ cmd, data,
+ dvfsrc->dvd->get_current_level(dvfsrc),
+ dvfsrc->dvd->get_target_level(dvfsrc));
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mtk_dvfsrc_send_request);
+
+int mtk_dvfsrc_query_info(const struct device *dev, u32 cmd, int *data)
+{
+ struct mtk_dvfsrc *dvfsrc = dev_get_drvdata(dev);
+
+ switch (cmd) {
+ case MTK_DVFSRC_CMD_VCORE_LEVEL:
+ *data = dvfsrc->dvd->get_vcore_level(dvfsrc);
+ break;
+ case MTK_DVFSRC_CMD_VSCP_LEVEL:
+ *data = dvfsrc->dvd->get_vscp_level(dvfsrc);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mtk_dvfsrc_query_info);
+
+static int mtk_dvfsrc_probe(struct platform_device *pdev)
+{
+ struct arm_smccc_res ares;
+ struct mtk_dvfsrc *dvfsrc;
+ int ret;
+
+ dvfsrc = devm_kzalloc(&pdev->dev, sizeof(*dvfsrc), GFP_KERNEL);
+ if (!dvfsrc)
+ return -ENOMEM;
+
+ dvfsrc->dvd = of_device_get_match_data(&pdev->dev);
+ dvfsrc->dev = &pdev->dev;
+
+ dvfsrc->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(dvfsrc->regs))
+ return PTR_ERR(dvfsrc->regs);
+
+ arm_smccc_smc(MTK_SIP_DVFSRC_VCOREFS_CONTROL, MTK_SIP_DVFSRC_INIT,
+ 0, 0, 0, 0, 0, 0, &ares);
+ if (ares.a0)
+ return dev_err_probe(&pdev->dev, -EINVAL, "DVFSRC init failed: %lu\n", ares.a0);
+
+ dvfsrc->dram_type = ares.a1;
+ dev_dbg(&pdev->dev, "DRAM Type: %d\n", dvfsrc->dram_type);
+
+ dvfsrc->curr_opps = &dvfsrc->dvd->opps_desc[dvfsrc->dram_type];
+ platform_set_drvdata(pdev, dvfsrc);
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to populate child devices\n");
+
+ /* Everything is set up - make it run! */
+ arm_smccc_smc(MTK_SIP_DVFSRC_VCOREFS_CONTROL, MTK_SIP_DVFSRC_START,
+ 0, 0, 0, 0, 0, 0, &ares);
+ if (ares.a0)
+ return dev_err_probe(&pdev->dev, -EINVAL, "Cannot start DVFSRC: %lu\n", ares.a0);
+
+ return 0;
+}
+
+static const struct dvfsrc_bw_constraints dvfsrc_bw_constr_v1 = { 0, 0, 0 };
+static const struct dvfsrc_bw_constraints dvfsrc_bw_constr_v2 = {
+ .max_dram_nom_bw = 255,
+ .max_dram_peak_bw = 255,
+ .max_dram_hrt_bw = 1023,
+};
+
+static const struct dvfsrc_opp dvfsrc_opp_mt6893_lp4[] = {
+ { 0, 0 }, { 1, 0 }, { 2, 0 }, { 3, 0 },
+ { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1 },
+ { 0, 2 }, { 1, 2 }, { 2, 2 }, { 3, 2 },
+ { 0, 3 }, { 1, 3 }, { 2, 3 }, { 3, 3 },
+ { 1, 4 }, { 2, 4 }, { 3, 4 }, { 2, 5 },
+ { 3, 5 }, { 3, 6 }, { 4, 6 }, { 4, 7 },
+};
+
+static const struct dvfsrc_opp_desc dvfsrc_opp_mt6893_desc[] = {
+ [0] = {
+ .opps = dvfsrc_opp_mt6893_lp4,
+ .num_opp = ARRAY_SIZE(dvfsrc_opp_mt6893_lp4),
+ }
+};
+
+static const struct dvfsrc_soc_data mt6893_data = {
+ .opps_desc = dvfsrc_opp_mt6893_desc,
+ .regs = dvfsrc_mt8195_regs,
+ .get_target_level = dvfsrc_get_target_level_v2,
+ .get_current_level = dvfsrc_get_current_level_v2,
+ .get_vcore_level = dvfsrc_get_vcore_level_v2,
+ .get_vscp_level = dvfsrc_get_vscp_level_v2,
+ .set_dram_bw = dvfsrc_set_dram_bw_v1,
+ .set_dram_peak_bw = dvfsrc_set_dram_peak_bw_v1,
+ .set_dram_hrt_bw = dvfsrc_set_dram_hrt_bw_v1,
+ .set_vcore_level = dvfsrc_set_vcore_level_v2,
+ .set_vscp_level = dvfsrc_set_vscp_level_v2,
+ .wait_for_opp_level = dvfsrc_wait_for_opp_level_v2,
+ .wait_for_vcore_level = dvfsrc_wait_for_vcore_level_v1,
+ .bw_constraints = &dvfsrc_bw_constr_v2,
+};
+
+static const struct dvfsrc_opp dvfsrc_opp_mt8183_lp4[] = {
+ { 0, 0 }, { 0, 1 }, { 0, 2 }, { 1, 2 },
+};
+
+static const struct dvfsrc_opp dvfsrc_opp_mt8183_lp3[] = {
+ { 0, 0 }, { 0, 1 }, { 1, 1 }, { 1, 2 },
+};
+
+static const struct dvfsrc_opp_desc dvfsrc_opp_mt8183_desc[] = {
+ [0] = {
+ .opps = dvfsrc_opp_mt8183_lp4,
+ .num_opp = ARRAY_SIZE(dvfsrc_opp_mt8183_lp4),
+ },
+ [1] = {
+ .opps = dvfsrc_opp_mt8183_lp3,
+ .num_opp = ARRAY_SIZE(dvfsrc_opp_mt8183_lp3),
+ },
+ [2] = {
+ .opps = dvfsrc_opp_mt8183_lp3,
+ .num_opp = ARRAY_SIZE(dvfsrc_opp_mt8183_lp3),
+ }
+};
+
+static const struct dvfsrc_soc_data mt8183_data = {
+ .opps_desc = dvfsrc_opp_mt8183_desc,
+ .regs = dvfsrc_mt8183_regs,
+ .get_target_level = dvfsrc_get_target_level_v1,
+ .get_current_level = dvfsrc_get_current_level_v1,
+ .get_vcore_level = dvfsrc_get_vcore_level_v1,
+ .set_dram_bw = dvfsrc_set_dram_bw_v1,
+ .set_opp_level = dvfsrc_set_opp_level_v1,
+ .set_vcore_level = dvfsrc_set_vcore_level_v1,
+ .wait_for_opp_level = dvfsrc_wait_for_opp_level_v1,
+ .wait_for_vcore_level = dvfsrc_wait_for_vcore_level_v1,
+ .bw_constraints = &dvfsrc_bw_constr_v1,
+};
+
+static const struct dvfsrc_opp dvfsrc_opp_mt8195_lp4[] = {
+ { 0, 0 }, { 1, 0 }, { 2, 0 }, { 3, 0 },
+ { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1 },
+ { 0, 2 }, { 1, 2 }, { 2, 2 }, { 3, 2 },
+ { 1, 3 }, { 2, 3 }, { 3, 3 }, { 1, 4 },
+ { 2, 4 }, { 3, 4 }, { 2, 5 }, { 3, 5 },
+ { 3, 6 },
+};
+
+static const struct dvfsrc_opp_desc dvfsrc_opp_mt8195_desc[] = {
+ [0] = {
+ .opps = dvfsrc_opp_mt8195_lp4,
+ .num_opp = ARRAY_SIZE(dvfsrc_opp_mt8195_lp4),
+ }
+};
+
+static const struct dvfsrc_soc_data mt8195_data = {
+ .opps_desc = dvfsrc_opp_mt8195_desc,
+ .regs = dvfsrc_mt8195_regs,
+ .get_target_level = dvfsrc_get_target_level_v2,
+ .get_current_level = dvfsrc_get_current_level_v2,
+ .get_vcore_level = dvfsrc_get_vcore_level_v2,
+ .get_vscp_level = dvfsrc_get_vscp_level_v2,
+ .set_dram_bw = dvfsrc_set_dram_bw_v1,
+ .set_dram_peak_bw = dvfsrc_set_dram_peak_bw_v1,
+ .set_dram_hrt_bw = dvfsrc_set_dram_hrt_bw_v1,
+ .set_vcore_level = dvfsrc_set_vcore_level_v2,
+ .set_vscp_level = dvfsrc_set_vscp_level_v2,
+ .wait_for_opp_level = dvfsrc_wait_for_opp_level_v2,
+ .wait_for_vcore_level = dvfsrc_wait_for_vcore_level_v1,
+ .bw_constraints = &dvfsrc_bw_constr_v2,
+};
+
+static const struct of_device_id mtk_dvfsrc_of_match[] = {
+ { .compatible = "mediatek,mt6893-dvfsrc", .data = &mt6893_data },
+ { .compatible = "mediatek,mt8183-dvfsrc", .data = &mt8183_data },
+ { .compatible = "mediatek,mt8195-dvfsrc", .data = &mt8195_data },
+ { /* sentinel */ }
+};
+
+static struct platform_driver mtk_dvfsrc_driver = {
+ .probe = mtk_dvfsrc_probe,
+ .driver = {
+ .name = "mtk-dvfsrc",
+ .of_match_table = mtk_dvfsrc_of_match,
+ },
+};
+module_platform_driver(mtk_dvfsrc_driver);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_AUTHOR("Dawei Chien <dawei.chien@mediatek.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MediaTek DVFSRC driver");
diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
index ffb75711a1da..bb4639ca0b8c 100644
--- a/drivers/soc/mediatek/mtk-mmsys.c
+++ b/drivers/soc/mediatek/mtk-mmsys.c
@@ -87,6 +87,29 @@ static const struct mtk_mmsys_driver_data mt8188_vdosys0_driver_data = {
.clk_driver = "clk-mt8188-vdo0",
.routes = mmsys_mt8188_routing_table,
.num_routes = ARRAY_SIZE(mmsys_mt8188_routing_table),
+ .sw0_rst_offset = MT8188_VDO0_SW0_RST_B,
+ .rst_tb = mmsys_mt8188_vdo0_rst_tb,
+ .num_resets = ARRAY_SIZE(mmsys_mt8188_vdo0_rst_tb),
+};
+
+static const struct mtk_mmsys_driver_data mt8188_vdosys1_driver_data = {
+ .clk_driver = "clk-mt8188-vdo1",
+ .routes = mmsys_mt8188_vdo1_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_mt8188_vdo1_routing_table),
+ .sw0_rst_offset = MT8188_VDO1_SW0_RST_B,
+ .rst_tb = mmsys_mt8188_vdo1_rst_tb,
+ .num_resets = ARRAY_SIZE(mmsys_mt8188_vdo1_rst_tb),
+ .vsync_len = 1,
+};
+
+static const struct mtk_mmsys_driver_data mt8188_vppsys0_driver_data = {
+ .clk_driver = "clk-mt8188-vpp0",
+ .is_vppsys = true,
+};
+
+static const struct mtk_mmsys_driver_data mt8188_vppsys1_driver_data = {
+ .clk_driver = "clk-mt8188-vpp1",
+ .is_vppsys = true,
};
static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
@@ -169,6 +192,10 @@ void mtk_mmsys_ddp_connect(struct device *dev,
if (cur == routes[i].from_comp && next == routes[i].to_comp)
mtk_mmsys_update_bits(mmsys, routes[i].addr, routes[i].mask,
routes[i].val, NULL);
+
+ if (mmsys->data->vsync_len)
+ mtk_mmsys_update_bits(mmsys, MT8188_VDO1_MIXER_VSYNC_LEN, GENMASK(31, 0),
+ mmsys->data->vsync_len, NULL);
}
EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_connect);
@@ -209,6 +236,7 @@ void mtk_mmsys_mixer_in_config(struct device *dev, int idx, bool alpha_sel, u16
mtk_mmsys_update_bits(mmsys, MT8195_VDO1_MIXER_IN1_ALPHA + (idx - 1) * 4, ~0,
alpha << 16 | alpha, cmdq_pkt);
+ mtk_mmsys_update_bits(mmsys, MT8195_VDO1_HDR_TOP_CFG, BIT(15 + idx), 0, cmdq_pkt);
mtk_mmsys_update_bits(mmsys, MT8195_VDO1_HDR_TOP_CFG, BIT(19 + idx),
alpha_sel << (19 + idx), cmdq_pkt);
mtk_mmsys_update_bits(mmsys, MT8195_VDO1_MIXER_IN1_PAD + (idx - 1) * 4,
@@ -302,6 +330,15 @@ static int mtk_mmsys_reset_update(struct reset_controller_dev *rcdev, unsigned l
u32 offset;
u32 reg;
+ if (mmsys->data->rst_tb) {
+ if (id >= mmsys->data->num_resets) {
+ dev_err(rcdev->dev, "Invalid reset ID: %lu (>=%u)\n",
+ id, mmsys->data->num_resets);
+ return -EINVAL;
+ }
+ id = mmsys->data->rst_tb[id];
+ }
+
offset = (id / MMSYS_SW_RESET_PER_REG) * sizeof(u32);
id = id % MMSYS_SW_RESET_PER_REG;
reg = mmsys->data->sw0_rst_offset + offset;
@@ -410,14 +447,12 @@ out_probe_done:
return 0;
}
-static int mtk_mmsys_remove(struct platform_device *pdev)
+static void mtk_mmsys_remove(struct platform_device *pdev)
{
struct mtk_mmsys *mmsys = platform_get_drvdata(pdev);
platform_device_unregister(mmsys->drm_pdev);
platform_device_unregister(mmsys->clks_pdev);
-
- return 0;
}
static const struct of_device_id of_match_mtk_mmsys[] = {
@@ -431,6 +466,9 @@ static const struct of_device_id of_match_mtk_mmsys[] = {
{ .compatible = "mediatek,mt8183-mmsys", .data = &mt8183_mmsys_driver_data },
{ .compatible = "mediatek,mt8186-mmsys", .data = &mt8186_mmsys_driver_data },
{ .compatible = "mediatek,mt8188-vdosys0", .data = &mt8188_vdosys0_driver_data },
+ { .compatible = "mediatek,mt8188-vdosys1", .data = &mt8188_vdosys1_driver_data },
+ { .compatible = "mediatek,mt8188-vppsys0", .data = &mt8188_vppsys0_driver_data },
+ { .compatible = "mediatek,mt8188-vppsys1", .data = &mt8188_vppsys1_driver_data },
{ .compatible = "mediatek,mt8192-mmsys", .data = &mt8192_mmsys_driver_data },
/* "mediatek,mt8195-mmsys" compatible is deprecated */
{ .compatible = "mediatek,mt8195-mmsys", .data = &mt8195_vdosys0_driver_data },
diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h
index 6725403d2e3a..fe628d5f5198 100644
--- a/drivers/soc/mediatek/mtk-mmsys.h
+++ b/drivers/soc/mediatek/mtk-mmsys.h
@@ -78,6 +78,22 @@
#define DSI_SEL_IN_RDMA 0x1
#define DSI_SEL_IN_MASK 0x1
+#define MMSYS_RST_NR(bank, bit) (((bank) * 32) + (bit))
+
+/*
+ * This macro adds a compile time check to make sure that the in/out
+ * selection bit(s) fit in the register mask, similar to bitfield
+ * macros, but this does not transform the value.
+ */
+#define MMSYS_ROUTE(from, to, reg_addr, reg_mask, selection) \
+ { DDP_COMPONENT_##from, DDP_COMPONENT_##to, reg_addr, reg_mask, \
+ (__BUILD_BUG_ON_ZERO_MSG((reg_mask) == 0, "Invalid mask") + \
+ __BUILD_BUG_ON_ZERO_MSG(~(reg_mask) & (selection), \
+ #selection " does not fit in " \
+ #reg_mask) + \
+ (selection)) \
+ }
+
struct mtk_mmsys_routes {
u32 from_comp;
u32 to_comp;
@@ -86,13 +102,43 @@ struct mtk_mmsys_routes {
u32 val;
};
+/**
+ * struct mtk_mmsys_driver_data - Settings of the mmsys
+ * @clk_driver: Clock driver name that the mmsys is using
+ * (defined in drivers/clk/mediatek/clk-*.c).
+ * @routes: Routing table of the mmsys.
+ * It provides mux settings from one module to another.
+ * @num_routes: Array size of the routes.
+ * @sw0_rst_offset: Register offset for the reset control.
+ * @num_resets: Number of reset bits that are defined
+ * @is_vppsys: Whether the mmsys is VPPSYS (Video Processing Pipe)
+ * or VDOSYS (Video). Only VDOSYS needs to be added to drm driver.
+ * @vsync_len: VSYNC length of the MIXER.
+ * VSYNC is usually triggered by the connector, so its length is a
+ * fixed value when the frame rate is decided, but ETHDR and
+ * MIXER generate their own VSYNC due to hardware design, therefore
+ * MIXER has to sync with ETHDR by adjusting VSYNC length.
+ * On MT8195, there is no such setting so we use the gap between
+ * falling edge and rising edge of SOF (Start of Frame) signal to
+ * do the job, but since MT8188, VSYNC_LEN setting is introduced to
+ * solve the problem and is given 0x40 (ticks) as the default value.
+ * Please notice that this value has to be set to 1 (minimum) if
+ * ETHDR is bypassed, otherwise MIXER could wait too long and causing
+ * underflow.
+ *
+ * Each MMSYS (multi-media system) may have different settings, they may use
+ * different clock sources, mux settings, reset control ...etc., and these
+ * differences are all stored here.
+ */
struct mtk_mmsys_driver_data {
const char *clk_driver;
const struct mtk_mmsys_routes *routes;
const unsigned int num_routes;
const u16 sw0_rst_offset;
+ const u8 *rst_tb;
const u32 num_resets;
const bool is_vppsys;
+ const u8 vsync_len;
};
/*
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index 9d9f5ae578ac..38179e8cd98f 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -17,16 +17,35 @@
#define MT2701_MUTEX0_MOD0 0x2c
#define MT2701_MUTEX0_SOF0 0x30
+#define MT2701_MUTEX0_MOD1 0x34
+
#define MT8183_MUTEX0_MOD0 0x30
+#define MT8183_MUTEX0_MOD1 0x34
#define MT8183_MUTEX0_SOF0 0x2c
#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
-#define DISP_REG_MUTEX_MOD(mutex_mod_reg, n) (mutex_mod_reg + 0x20 * (n))
-#define DISP_REG_MUTEX_MOD1(mutex_mod_reg, n) ((mutex_mod_reg) + 0x20 * (n) + 0x4)
+/*
+ * Some SoCs may have multiple MUTEX_MOD registers as more than 32 mods
+ * are present, hence requiring multiple 32-bits registers.
+ *
+ * The mutex_table_mod fully represents that by defining the number of
+ * the mod sequentially, later used as a bit number, which can be more
+ * than 0..31.
+ *
+ * In order to retain compatibility with older SoCs, we perform R/W on
+ * the single 32 bits registers, but this requires us to translate the
+ * mutex ID bit accordingly.
+ */
+#define DISP_REG_MUTEX_MOD(mutex, id, n) ({ \
+ const typeof(mutex) _mutex = (mutex); \
+ u32 _offset = (id) < 32 ? \
+ _mutex->data->mutex_mod_reg : \
+ _mutex->data->mutex_mod1_reg; \
+ _offset + 0x20 * (n); \
+})
#define DISP_REG_MUTEX_SOF(mutex_sof_reg, n) (mutex_sof_reg + 0x20 * (n))
-#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n))
#define INT_MUTEX BIT(1)
@@ -133,6 +152,31 @@
#define MT8188_MUTEX_MOD_DISP_POSTMASK0 24
#define MT8188_MUTEX_MOD2_DISP_PWM0 33
+#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA0 0
+#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA1 1
+#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA2 2
+#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA3 3
+#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA4 4
+#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA5 5
+#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA6 6
+#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA7 7
+#define MT8188_MUTEX_MOD_DISP1_PADDING0 8
+#define MT8188_MUTEX_MOD_DISP1_PADDING1 9
+#define MT8188_MUTEX_MOD_DISP1_PADDING2 10
+#define MT8188_MUTEX_MOD_DISP1_PADDING3 11
+#define MT8188_MUTEX_MOD_DISP1_PADDING4 12
+#define MT8188_MUTEX_MOD_DISP1_PADDING5 13
+#define MT8188_MUTEX_MOD_DISP1_PADDING6 14
+#define MT8188_MUTEX_MOD_DISP1_PADDING7 15
+#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE0 20
+#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE1 21
+#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE2 22
+#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE3 23
+#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE4 24
+#define MT8188_MUTEX_MOD_DISP1_DISP_MIXER 30
+#define MT8188_MUTEX_MOD_DISP1_DPI1 38
+#define MT8188_MUTEX_MOD_DISP1_DP_INTF1 39
+
#define MT8195_MUTEX_MOD_DISP_OVL0 0
#define MT8195_MUTEX_MOD_DISP_WDMA0 1
#define MT8195_MUTEX_MOD_DISP_RDMA0 2
@@ -264,6 +308,8 @@
#define MT8183_MUTEX_SOF_DPI0 2
#define MT8188_MUTEX_SOF_DSI0 1
#define MT8188_MUTEX_SOF_DP_INTF0 3
+#define MT8188_MUTEX_SOF_DP_INTF1 4
+#define MT8188_MUTEX_SOF_DPI1 5
#define MT8195_MUTEX_SOF_DSI0 1
#define MT8195_MUTEX_SOF_DSI1 2
#define MT8195_MUTEX_SOF_DP_INTF0 3
@@ -275,6 +321,8 @@
#define MT8183_MUTEX_EOF_DPI0 (MT8183_MUTEX_SOF_DPI0 << 6)
#define MT8188_MUTEX_EOF_DSI0 (MT8188_MUTEX_SOF_DSI0 << 7)
#define MT8188_MUTEX_EOF_DP_INTF0 (MT8188_MUTEX_SOF_DP_INTF0 << 7)
+#define MT8188_MUTEX_EOF_DP_INTF1 (MT8188_MUTEX_SOF_DP_INTF1 << 7)
+#define MT8188_MUTEX_EOF_DPI1 (MT8188_MUTEX_SOF_DPI1 << 7)
#define MT8195_MUTEX_EOF_DSI0 (MT8195_MUTEX_SOF_DSI0 << 7)
#define MT8195_MUTEX_EOF_DSI1 (MT8195_MUTEX_SOF_DSI1 << 7)
#define MT8195_MUTEX_EOF_DP_INTF0 (MT8195_MUTEX_SOF_DP_INTF0 << 7)
@@ -301,11 +349,12 @@ enum mtk_mutex_sof_id {
};
struct mtk_mutex_data {
- const unsigned int *mutex_mod;
- const unsigned int *mutex_sof;
- const unsigned int mutex_mod_reg;
- const unsigned int mutex_sof_reg;
- const unsigned int *mutex_table_mod;
+ const u8 *mutex_mod;
+ const u8 *mutex_table_mod;
+ const u16 *mutex_sof;
+ const u16 mutex_mod_reg;
+ const u16 mutex_mod1_reg;
+ const u16 mutex_sof_reg;
const bool no_clk;
};
@@ -319,7 +368,7 @@ struct mtk_mutex_ctx {
struct cmdq_client_reg cmdq_reg;
};
-static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_BLS] = MT2701_MUTEX_MOD_DISP_BLS,
[DDP_COMPONENT_COLOR0] = MT2701_MUTEX_MOD_DISP_COLOR,
[DDP_COMPONENT_OVL0] = MT2701_MUTEX_MOD_DISP_OVL,
@@ -328,7 +377,7 @@ static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA0] = MT2701_MUTEX_MOD_DISP_WDMA,
};
-static const unsigned int mt2712_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt2712_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT2712_MUTEX_MOD_DISP_AAL0,
[DDP_COMPONENT_AAL1] = MT2712_MUTEX_MOD2_DISP_AAL1,
[DDP_COMPONENT_COLOR0] = MT2712_MUTEX_MOD_DISP_COLOR0,
@@ -348,7 +397,7 @@ static const unsigned int mt2712_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA1] = MT2712_MUTEX_MOD_DISP_WDMA1,
};
-static const unsigned int mt8167_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt8167_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8167_MUTEX_MOD_DISP_AAL,
[DDP_COMPONENT_CCORR] = MT8167_MUTEX_MOD_DISP_CCORR,
[DDP_COMPONENT_COLOR0] = MT8167_MUTEX_MOD_DISP_COLOR,
@@ -363,7 +412,7 @@ static const unsigned int mt8167_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA0] = MT8167_MUTEX_MOD_DISP_WDMA0,
};
-static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8173_MUTEX_MOD_DISP_AAL,
[DDP_COMPONENT_COLOR0] = MT8173_MUTEX_MOD_DISP_COLOR0,
[DDP_COMPONENT_COLOR1] = MT8173_MUTEX_MOD_DISP_COLOR1,
@@ -381,7 +430,7 @@ static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1,
};
-static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8183_MUTEX_MOD_DISP_AAL0,
[DDP_COMPONENT_CCORR] = MT8183_MUTEX_MOD_DISP_CCORR0,
[DDP_COMPONENT_COLOR0] = MT8183_MUTEX_MOD_DISP_COLOR0,
@@ -395,7 +444,7 @@ static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA0] = MT8183_MUTEX_MOD_DISP_WDMA0,
};
-static const unsigned int mt8183_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
+static const u8 mt8183_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
[MUTEX_MOD_IDX_MDP_RDMA0] = MT8183_MUTEX_MOD_MDP_RDMA0,
[MUTEX_MOD_IDX_MDP_RSZ0] = MT8183_MUTEX_MOD_MDP_RSZ0,
[MUTEX_MOD_IDX_MDP_RSZ1] = MT8183_MUTEX_MOD_MDP_RSZ1,
@@ -406,7 +455,7 @@ static const unsigned int mt8183_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
[MUTEX_MOD_IDX_MDP_CCORR0] = MT8183_MUTEX_MOD_MDP_CCORR0,
};
-static const unsigned int mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8186_MUTEX_MOD_DISP_AAL0,
[DDP_COMPONENT_CCORR] = MT8186_MUTEX_MOD_DISP_CCORR0,
[DDP_COMPONENT_COLOR0] = MT8186_MUTEX_MOD_DISP_COLOR0,
@@ -419,7 +468,7 @@ static const unsigned int mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_RDMA1] = MT8186_MUTEX_MOD_DISP_RDMA1,
};
-static const unsigned int mt8186_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
+static const u8 mt8186_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
[MUTEX_MOD_IDX_MDP_RDMA0] = MT8186_MUTEX_MOD_MDP_RDMA0,
[MUTEX_MOD_IDX_MDP_RSZ0] = MT8186_MUTEX_MOD_MDP_RSZ0,
[MUTEX_MOD_IDX_MDP_RSZ1] = MT8186_MUTEX_MOD_MDP_RSZ1,
@@ -430,7 +479,7 @@ static const unsigned int mt8186_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
[MUTEX_MOD_IDX_MDP_COLOR0] = MT8186_MUTEX_MOD_MDP_COLOR0,
};
-static const unsigned int mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_OVL0] = MT8188_MUTEX_MOD_DISP_OVL0,
[DDP_COMPONENT_WDMA0] = MT8188_MUTEX_MOD_DISP_WDMA0,
[DDP_COMPONENT_RDMA0] = MT8188_MUTEX_MOD_DISP_RDMA0,
@@ -445,9 +494,67 @@ static const unsigned int mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_DSI0] = MT8188_MUTEX_MOD_DISP_DSI0,
[DDP_COMPONENT_PWM0] = MT8188_MUTEX_MOD2_DISP_PWM0,
[DDP_COMPONENT_DP_INTF0] = MT8188_MUTEX_MOD_DISP_DP_INTF0,
+ [DDP_COMPONENT_DP_INTF1] = MT8188_MUTEX_MOD_DISP1_DP_INTF1,
+ [DDP_COMPONENT_DPI1] = MT8188_MUTEX_MOD_DISP1_DPI1,
+ [DDP_COMPONENT_ETHDR_MIXER] = MT8188_MUTEX_MOD_DISP1_DISP_MIXER,
+ [DDP_COMPONENT_MDP_RDMA0] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA0,
+ [DDP_COMPONENT_MDP_RDMA1] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA1,
+ [DDP_COMPONENT_MDP_RDMA2] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA2,
+ [DDP_COMPONENT_MDP_RDMA3] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA3,
+ [DDP_COMPONENT_MDP_RDMA4] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA4,
+ [DDP_COMPONENT_MDP_RDMA5] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA5,
+ [DDP_COMPONENT_MDP_RDMA6] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA6,
+ [DDP_COMPONENT_MDP_RDMA7] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA7,
+ [DDP_COMPONENT_PADDING0] = MT8188_MUTEX_MOD_DISP1_PADDING0,
+ [DDP_COMPONENT_PADDING1] = MT8188_MUTEX_MOD_DISP1_PADDING1,
+ [DDP_COMPONENT_PADDING2] = MT8188_MUTEX_MOD_DISP1_PADDING2,
+ [DDP_COMPONENT_PADDING3] = MT8188_MUTEX_MOD_DISP1_PADDING3,
+ [DDP_COMPONENT_PADDING4] = MT8188_MUTEX_MOD_DISP1_PADDING4,
+ [DDP_COMPONENT_PADDING5] = MT8188_MUTEX_MOD_DISP1_PADDING5,
+ [DDP_COMPONENT_PADDING6] = MT8188_MUTEX_MOD_DISP1_PADDING6,
+ [DDP_COMPONENT_PADDING7] = MT8188_MUTEX_MOD_DISP1_PADDING7,
+ [DDP_COMPONENT_MERGE1] = MT8188_MUTEX_MOD_DISP1_VPP_MERGE0,
+ [DDP_COMPONENT_MERGE2] = MT8188_MUTEX_MOD_DISP1_VPP_MERGE1,
+ [DDP_COMPONENT_MERGE3] = MT8188_MUTEX_MOD_DISP1_VPP_MERGE2,
+ [DDP_COMPONENT_MERGE4] = MT8188_MUTEX_MOD_DISP1_VPP_MERGE3,
+ [DDP_COMPONENT_MERGE5] = MT8188_MUTEX_MOD_DISP1_VPP_MERGE4,
+};
+
+static const u8 mt8188_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
+ [MUTEX_MOD_IDX_MDP_RDMA0] = MT8195_MUTEX_MOD_MDP_RDMA0,
+ [MUTEX_MOD_IDX_MDP_RDMA2] = MT8195_MUTEX_MOD_MDP_RDMA2,
+ [MUTEX_MOD_IDX_MDP_RDMA3] = MT8195_MUTEX_MOD_MDP_RDMA3,
+ [MUTEX_MOD_IDX_MDP_FG0] = MT8195_MUTEX_MOD_MDP_FG0,
+ [MUTEX_MOD_IDX_MDP_FG2] = MT8195_MUTEX_MOD_MDP_FG2,
+ [MUTEX_MOD_IDX_MDP_FG3] = MT8195_MUTEX_MOD_MDP_FG3,
+ [MUTEX_MOD_IDX_MDP_HDR0] = MT8195_MUTEX_MOD_MDP_HDR0,
+ [MUTEX_MOD_IDX_MDP_HDR2] = MT8195_MUTEX_MOD_MDP_HDR2,
+ [MUTEX_MOD_IDX_MDP_HDR3] = MT8195_MUTEX_MOD_MDP_HDR3,
+ [MUTEX_MOD_IDX_MDP_AAL0] = MT8195_MUTEX_MOD_MDP_AAL0,
+ [MUTEX_MOD_IDX_MDP_AAL2] = MT8195_MUTEX_MOD_MDP_AAL2,
+ [MUTEX_MOD_IDX_MDP_AAL3] = MT8195_MUTEX_MOD_MDP_AAL3,
+ [MUTEX_MOD_IDX_MDP_RSZ0] = MT8195_MUTEX_MOD_MDP_RSZ0,
+ [MUTEX_MOD_IDX_MDP_RSZ2] = MT8195_MUTEX_MOD_MDP_RSZ2,
+ [MUTEX_MOD_IDX_MDP_RSZ3] = MT8195_MUTEX_MOD_MDP_RSZ3,
+ [MUTEX_MOD_IDX_MDP_MERGE2] = MT8195_MUTEX_MOD_MDP_MERGE2,
+ [MUTEX_MOD_IDX_MDP_MERGE3] = MT8195_MUTEX_MOD_MDP_MERGE3,
+ [MUTEX_MOD_IDX_MDP_TDSHP0] = MT8195_MUTEX_MOD_MDP_TDSHP0,
+ [MUTEX_MOD_IDX_MDP_TDSHP2] = MT8195_MUTEX_MOD_MDP_TDSHP2,
+ [MUTEX_MOD_IDX_MDP_TDSHP3] = MT8195_MUTEX_MOD_MDP_TDSHP3,
+ [MUTEX_MOD_IDX_MDP_COLOR0] = MT8195_MUTEX_MOD_MDP_COLOR0,
+ [MUTEX_MOD_IDX_MDP_COLOR2] = MT8195_MUTEX_MOD_MDP_COLOR2,
+ [MUTEX_MOD_IDX_MDP_COLOR3] = MT8195_MUTEX_MOD_MDP_COLOR3,
+ [MUTEX_MOD_IDX_MDP_OVL0] = MT8195_MUTEX_MOD_MDP_OVL0,
+ [MUTEX_MOD_IDX_MDP_PAD0] = MT8195_MUTEX_MOD_MDP_PAD0,
+ [MUTEX_MOD_IDX_MDP_PAD2] = MT8195_MUTEX_MOD_MDP_PAD2,
+ [MUTEX_MOD_IDX_MDP_PAD3] = MT8195_MUTEX_MOD_MDP_PAD3,
+ [MUTEX_MOD_IDX_MDP_TCC0] = MT8195_MUTEX_MOD_MDP_TCC0,
+ [MUTEX_MOD_IDX_MDP_WROT0] = MT8195_MUTEX_MOD_MDP_WROT0,
+ [MUTEX_MOD_IDX_MDP_WROT2] = MT8195_MUTEX_MOD_MDP_WROT2,
+ [MUTEX_MOD_IDX_MDP_WROT3] = MT8195_MUTEX_MOD_MDP_WROT3,
};
-static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8192_MUTEX_MOD_DISP_AAL0,
[DDP_COMPONENT_CCORR] = MT8192_MUTEX_MOD_DISP_CCORR0,
[DDP_COMPONENT_COLOR0] = MT8192_MUTEX_MOD_DISP_COLOR0,
@@ -461,7 +568,7 @@ static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_RDMA4] = MT8192_MUTEX_MOD_DISP_RDMA4,
};
-static const unsigned int mt8195_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt8195_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_OVL0] = MT8195_MUTEX_MOD_DISP_OVL0,
[DDP_COMPONENT_WDMA0] = MT8195_MUTEX_MOD_DISP_WDMA0,
[DDP_COMPONENT_RDMA0] = MT8195_MUTEX_MOD_DISP_RDMA0,
@@ -492,7 +599,7 @@ static const unsigned int mt8195_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_DP_INTF1] = MT8195_MUTEX_MOD_DISP1_DP_INTF0,
};
-static const unsigned int mt8195_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
+static const u8 mt8195_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
[MUTEX_MOD_IDX_MDP_RDMA0] = MT8195_MUTEX_MOD_MDP_RDMA0,
[MUTEX_MOD_IDX_MDP_RDMA1] = MT8195_MUTEX_MOD_MDP_RDMA1,
[MUTEX_MOD_IDX_MDP_RDMA2] = MT8195_MUTEX_MOD_MDP_RDMA2,
@@ -538,7 +645,7 @@ static const unsigned int mt8195_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
[MUTEX_MOD_IDX_MDP_WROT3] = MT8195_MUTEX_MOD_MDP_WROT3,
};
-static const unsigned int mt8365_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt8365_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8365_MUTEX_MOD_DISP_AAL,
[DDP_COMPONENT_CCORR] = MT8365_MUTEX_MOD_DISP_CCORR,
[DDP_COMPONENT_COLOR0] = MT8365_MUTEX_MOD_DISP_COLOR0,
@@ -554,7 +661,7 @@ static const unsigned int mt8365_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA0] = MT8365_MUTEX_MOD_DISP_WDMA0,
};
-static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+static const u16 mt2712_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
[MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
@@ -564,14 +671,14 @@ static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3,
};
-static const unsigned int mt6795_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+static const u16 mt6795_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
[MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
[MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0,
};
-static const unsigned int mt8167_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+static const u16 mt8167_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
[MUTEX_SOF_DPI0] = MT8167_MUTEX_SOF_DPI0,
@@ -579,13 +686,13 @@ static const unsigned int mt8167_mutex_sof[DDP_MUTEX_SOF_MAX] = {
};
/* Add EOF setting so overlay hardware can receive frame done irq */
-static const unsigned int mt8183_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+static const u16 mt8183_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0 | MT8183_MUTEX_EOF_DSI0,
[MUTEX_SOF_DPI0] = MT8183_MUTEX_SOF_DPI0 | MT8183_MUTEX_EOF_DPI0,
};
-static const unsigned int mt8186_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
+static const u16 mt8186_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MT8186_MUTEX_SOF_DSI0 | MT8186_MUTEX_EOF_DSI0,
[MUTEX_SOF_DPI0] = MT8186_MUTEX_SOF_DPI0 | MT8186_MUTEX_EOF_DPI0,
@@ -599,15 +706,19 @@ static const unsigned int mt8186_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
* but also detect the error at end of frame(EAEOF) when EOF signal
* arrives.
*/
-static const unsigned int mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+static const u16 mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] =
MT8188_MUTEX_SOF_DSI0 | MT8188_MUTEX_EOF_DSI0,
+ [MUTEX_SOF_DPI1] =
+ MT8188_MUTEX_SOF_DPI1 | MT8188_MUTEX_EOF_DPI1,
[MUTEX_SOF_DP_INTF0] =
MT8188_MUTEX_SOF_DP_INTF0 | MT8188_MUTEX_EOF_DP_INTF0,
+ [MUTEX_SOF_DP_INTF1] =
+ MT8188_MUTEX_SOF_DP_INTF1 | MT8188_MUTEX_EOF_DP_INTF1,
};
-static const unsigned int mt8195_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+static const u16 mt8195_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MT8195_MUTEX_SOF_DSI0 | MT8195_MUTEX_EOF_DSI0,
[MUTEX_SOF_DSI1] = MT8195_MUTEX_SOF_DSI1 | MT8195_MUTEX_EOF_DSI1,
@@ -623,6 +734,7 @@ static const struct mtk_mutex_data mt2701_mutex_driver_data = {
.mutex_mod = mt2701_mutex_mod,
.mutex_sof = mt2712_mutex_sof,
.mutex_mod_reg = MT2701_MUTEX0_MOD0,
+ .mutex_mod1_reg = MT2701_MUTEX0_MOD1,
.mutex_sof_reg = MT2701_MUTEX0_SOF0,
};
@@ -630,6 +742,7 @@ static const struct mtk_mutex_data mt2712_mutex_driver_data = {
.mutex_mod = mt2712_mutex_mod,
.mutex_sof = mt2712_mutex_sof,
.mutex_mod_reg = MT2701_MUTEX0_MOD0,
+ .mutex_mod1_reg = MT2701_MUTEX0_MOD1,
.mutex_sof_reg = MT2701_MUTEX0_SOF0,
};
@@ -637,6 +750,7 @@ static const struct mtk_mutex_data mt6795_mutex_driver_data = {
.mutex_mod = mt8173_mutex_mod,
.mutex_sof = mt6795_mutex_sof,
.mutex_mod_reg = MT2701_MUTEX0_MOD0,
+ .mutex_mod1_reg = MT2701_MUTEX0_MOD1,
.mutex_sof_reg = MT2701_MUTEX0_SOF0,
};
@@ -644,6 +758,7 @@ static const struct mtk_mutex_data mt8167_mutex_driver_data = {
.mutex_mod = mt8167_mutex_mod,
.mutex_sof = mt8167_mutex_sof,
.mutex_mod_reg = MT2701_MUTEX0_MOD0,
+ .mutex_mod1_reg = MT2701_MUTEX0_MOD1,
.mutex_sof_reg = MT2701_MUTEX0_SOF0,
.no_clk = true,
};
@@ -652,6 +767,7 @@ static const struct mtk_mutex_data mt8173_mutex_driver_data = {
.mutex_mod = mt8173_mutex_mod,
.mutex_sof = mt2712_mutex_sof,
.mutex_mod_reg = MT2701_MUTEX0_MOD0,
+ .mutex_mod1_reg = MT2701_MUTEX0_MOD1,
.mutex_sof_reg = MT2701_MUTEX0_SOF0,
};
@@ -659,6 +775,7 @@ static const struct mtk_mutex_data mt8183_mutex_driver_data = {
.mutex_mod = mt8183_mutex_mod,
.mutex_sof = mt8183_mutex_sof,
.mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_mod1_reg = MT8183_MUTEX0_MOD1,
.mutex_sof_reg = MT8183_MUTEX0_SOF0,
.mutex_table_mod = mt8183_mutex_table_mod,
.no_clk = true,
@@ -666,6 +783,7 @@ static const struct mtk_mutex_data mt8183_mutex_driver_data = {
static const struct mtk_mutex_data mt8186_mdp_mutex_driver_data = {
.mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_mod1_reg = MT8183_MUTEX0_MOD1,
.mutex_sof_reg = MT8183_MUTEX0_SOF0,
.mutex_table_mod = mt8186_mdp_mutex_table_mod,
};
@@ -674,6 +792,7 @@ static const struct mtk_mutex_data mt8186_mutex_driver_data = {
.mutex_mod = mt8186_mutex_mod,
.mutex_sof = mt8186_mutex_sof,
.mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_mod1_reg = MT8183_MUTEX0_MOD1,
.mutex_sof_reg = MT8183_MUTEX0_SOF0,
};
@@ -681,13 +800,23 @@ static const struct mtk_mutex_data mt8188_mutex_driver_data = {
.mutex_mod = mt8188_mutex_mod,
.mutex_sof = mt8188_mutex_sof,
.mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_mod1_reg = MT8183_MUTEX0_MOD1,
.mutex_sof_reg = MT8183_MUTEX0_SOF0,
};
+static const struct mtk_mutex_data mt8188_vpp_mutex_driver_data = {
+ .mutex_sof = mt8188_mutex_sof,
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_mod1_reg = MT8183_MUTEX0_MOD1,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+ .mutex_table_mod = mt8188_mdp_mutex_table_mod,
+};
+
static const struct mtk_mutex_data mt8192_mutex_driver_data = {
.mutex_mod = mt8192_mutex_mod,
.mutex_sof = mt8183_mutex_sof,
.mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_mod1_reg = MT8183_MUTEX0_MOD1,
.mutex_sof_reg = MT8183_MUTEX0_SOF0,
};
@@ -695,12 +824,14 @@ static const struct mtk_mutex_data mt8195_mutex_driver_data = {
.mutex_mod = mt8195_mutex_mod,
.mutex_sof = mt8195_mutex_sof,
.mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_mod1_reg = MT8183_MUTEX0_MOD1,
.mutex_sof_reg = MT8183_MUTEX0_SOF0,
};
static const struct mtk_mutex_data mt8195_vpp_mutex_driver_data = {
.mutex_sof = mt8195_mutex_sof,
.mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_mod1_reg = MT8183_MUTEX0_MOD1,
.mutex_sof_reg = MT8183_MUTEX0_SOF0,
.mutex_table_mod = mt8195_mutex_table_mod,
};
@@ -709,6 +840,7 @@ static const struct mtk_mutex_data mt8365_mutex_driver_data = {
.mutex_mod = mt8365_mutex_mod,
.mutex_sof = mt8183_mutex_sof,
.mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_mod1_reg = MT8183_MUTEX0_MOD1,
.mutex_sof_reg = MT8183_MUTEX0_SOF0,
.no_clk = true,
};
@@ -761,7 +893,7 @@ void mtk_mutex_add_comp(struct mtk_mutex *mutex,
struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
mutex[mutex->id]);
unsigned int reg;
- unsigned int sof_id;
+ unsigned int sof_id, mod_id;
unsigned int offset;
WARN_ON(&mtx->mutex[mutex->id] != mutex);
@@ -792,18 +924,11 @@ void mtk_mutex_add_comp(struct mtk_mutex *mutex,
sof_id = MUTEX_SOF_DP_INTF1;
break;
default:
- if (mtx->data->mutex_mod[id] < 32) {
- offset = DISP_REG_MUTEX_MOD(mtx->data->mutex_mod_reg,
- mutex->id);
- reg = readl_relaxed(mtx->regs + offset);
- reg |= 1 << mtx->data->mutex_mod[id];
- writel_relaxed(reg, mtx->regs + offset);
- } else {
- offset = DISP_REG_MUTEX_MOD2(mutex->id);
- reg = readl_relaxed(mtx->regs + offset);
- reg |= 1 << (mtx->data->mutex_mod[id] - 32);
- writel_relaxed(reg, mtx->regs + offset);
- }
+ offset = DISP_REG_MUTEX_MOD(mtx, mtx->data->mutex_mod[id], mutex->id);
+ mod_id = mtx->data->mutex_mod[id] % 32;
+ reg = readl_relaxed(mtx->regs + offset);
+ reg |= BIT(mod_id);
+ writel_relaxed(reg, mtx->regs + offset);
return;
}
@@ -819,6 +944,7 @@ void mtk_mutex_remove_comp(struct mtk_mutex *mutex,
struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
mutex[mutex->id]);
unsigned int reg;
+ unsigned int mod_id;
unsigned int offset;
WARN_ON(&mtx->mutex[mutex->id] != mutex);
@@ -838,18 +964,11 @@ void mtk_mutex_remove_comp(struct mtk_mutex *mutex,
mutex->id));
break;
default:
- if (mtx->data->mutex_mod[id] < 32) {
- offset = DISP_REG_MUTEX_MOD(mtx->data->mutex_mod_reg,
- mutex->id);
- reg = readl_relaxed(mtx->regs + offset);
- reg &= ~(1 << mtx->data->mutex_mod[id]);
- writel_relaxed(reg, mtx->regs + offset);
- } else {
- offset = DISP_REG_MUTEX_MOD2(mutex->id);
- reg = readl_relaxed(mtx->regs + offset);
- reg &= ~(1 << (mtx->data->mutex_mod[id] - 32));
- writel_relaxed(reg, mtx->regs + offset);
- }
+ offset = DISP_REG_MUTEX_MOD(mtx, mtx->data->mutex_mod[id], mutex->id);
+ mod_id = mtx->data->mutex_mod[id] % 32;
+ reg = readl_relaxed(mtx->regs + offset);
+ reg &= ~BIT(mod_id);
+ writel_relaxed(reg, mtx->regs + offset);
break;
}
}
@@ -925,7 +1044,7 @@ int mtk_mutex_write_mod(struct mtk_mutex *mutex,
struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
mutex[mutex->id]);
unsigned int reg;
- u32 reg_offset, id_offset = 0;
+ u32 offset, mod_id;
WARN_ON(&mtx->mutex[mutex->id] != mutex);
@@ -935,34 +1054,16 @@ int mtk_mutex_write_mod(struct mtk_mutex *mutex,
return -EINVAL;
}
- /*
- * Some SoCs may have multiple MUTEX_MOD registers as more than 32 mods
- * are present, hence requiring multiple 32-bits registers.
- *
- * The mutex_table_mod fully represents that by defining the number of
- * the mod sequentially, later used as a bit number, which can be more
- * than 0..31.
- *
- * In order to retain compatibility with older SoCs, we perform R/W on
- * the single 32 bits registers, but this requires us to translate the
- * mutex ID bit accordingly.
- */
- if (mtx->data->mutex_table_mod[idx] < 32) {
- reg_offset = DISP_REG_MUTEX_MOD(mtx->data->mutex_mod_reg,
- mutex->id);
- } else {
- reg_offset = DISP_REG_MUTEX_MOD1(mtx->data->mutex_mod_reg,
- mutex->id);
- id_offset = 32;
- }
+ offset = DISP_REG_MUTEX_MOD(mtx, mtx->data->mutex_table_mod[idx], mutex->id);
+ mod_id = mtx->data->mutex_table_mod[idx] % 32;
- reg = readl_relaxed(mtx->regs + reg_offset);
+ reg = readl_relaxed(mtx->regs + offset);
if (clear)
- reg &= ~BIT(mtx->data->mutex_table_mod[idx] - id_offset);
+ reg &= ~BIT(mod_id);
else
- reg |= BIT(mtx->data->mutex_table_mod[idx] - id_offset);
+ reg |= BIT(mod_id);
- writel_relaxed(reg, mtx->regs + reg_offset);
+ writel_relaxed(reg, mtx->regs + offset);
return 0;
}
@@ -1038,6 +1139,7 @@ static const struct of_device_id mutex_driver_dt_match[] = {
{ .compatible = "mediatek,mt8186-disp-mutex", .data = &mt8186_mutex_driver_data },
{ .compatible = "mediatek,mt8186-mdp3-mutex", .data = &mt8186_mdp_mutex_driver_data },
{ .compatible = "mediatek,mt8188-disp-mutex", .data = &mt8188_mutex_driver_data },
+ { .compatible = "mediatek,mt8188-vpp-mutex", .data = &mt8188_vpp_mutex_driver_data },
{ .compatible = "mediatek,mt8192-disp-mutex", .data = &mt8192_mutex_driver_data },
{ .compatible = "mediatek,mt8195-disp-mutex", .data = &mt8195_mutex_driver_data },
{ .compatible = "mediatek,mt8195-vpp-mutex", .data = &mt8195_vpp_mutex_driver_data },
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index efd9cae212dc..0bcd85826375 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -483,7 +483,7 @@ enum pwrap_regs {
PWRAP_MSB_FIRST,
};
-static int mt2701_regs[] = {
+static const int mt2701_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -569,7 +569,7 @@ static int mt2701_regs[] = {
[PWRAP_ADC_RDATA_ADDR2] = 0x154,
};
-static int mt6765_regs[] = {
+static const int mt6765_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -601,7 +601,7 @@ static int mt6765_regs[] = {
[PWRAP_DCM_DBC_PRD] = 0x1E0,
};
-static int mt6779_regs[] = {
+static const int mt6779_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -640,7 +640,7 @@ static int mt6779_regs[] = {
[PWRAP_WACS2_VLDCLR] = 0xC28,
};
-static int mt6795_regs[] = {
+static const int mt6795_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -725,7 +725,7 @@ static int mt6795_regs[] = {
[PWRAP_EXT_CK] = 0x14c,
};
-static int mt6797_regs[] = {
+static const int mt6797_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -758,7 +758,7 @@ static int mt6797_regs[] = {
[PWRAP_DCM_DBC_PRD] = 0x1D4,
};
-static int mt6873_regs[] = {
+static const int mt6873_regs[] = {
[PWRAP_INIT_DONE2] = 0x0,
[PWRAP_TIMER_EN] = 0x3E0,
[PWRAP_INT_EN] = 0x448,
@@ -769,7 +769,7 @@ static int mt6873_regs[] = {
[PWRAP_WACS2_RDATA] = 0xCA8,
};
-static int mt7622_regs[] = {
+static const int mt7622_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -881,7 +881,7 @@ static int mt7622_regs[] = {
[PWRAP_SPI2_CTRL] = 0x244,
};
-static int mt8135_regs[] = {
+static const int mt8135_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -954,7 +954,7 @@ static int mt8135_regs[] = {
[PWRAP_DCM_DBC_PRD] = 0x160,
};
-static int mt8173_regs[] = {
+static const int mt8173_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -1036,7 +1036,7 @@ static int mt8173_regs[] = {
[PWRAP_DCM_DBC_PRD] = 0x148,
};
-static int mt8183_regs[] = {
+static const int mt8183_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -1087,7 +1087,7 @@ static int mt8183_regs[] = {
[PWRAP_WACS2_VLDCLR] = 0xC28,
};
-static int mt8195_regs[] = {
+static const int mt8195_regs[] = {
[PWRAP_INIT_DONE2] = 0x0,
[PWRAP_STAUPD_CTRL] = 0x4C,
[PWRAP_TIMER_EN] = 0x3E4,
@@ -1104,7 +1104,7 @@ static int mt8195_regs[] = {
[PWRAP_WACS2_RDATA] = 0x8A8,
};
-static int mt8365_regs[] = {
+static const int mt8365_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -1166,7 +1166,7 @@ static int mt8365_regs[] = {
[PWRAP_WDT_SRC_EN_1] = 0xf8,
};
-static int mt8516_regs[] = {
+static const int mt8516_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -1251,7 +1251,7 @@ static int mt8516_regs[] = {
[PWRAP_MSB_FIRST] = 0x170,
};
-static int mt8186_regs[] = {
+static const int mt8186_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -1366,10 +1366,6 @@ struct pmic_wrapper {
struct regmap *regmap;
const struct pmic_wrapper_type *master;
const struct pwrap_slv_type *slave;
- struct clk *clk_spi;
- struct clk *clk_wrap;
- struct clk *clk_sys;
- struct clk *clk_tmr;
struct reset_control *rstc;
struct reset_control *rstc_bridge;
@@ -1377,7 +1373,7 @@ struct pmic_wrapper {
};
struct pmic_wrapper_type {
- int *regs;
+ const int *regs;
enum pwrap_type type;
u32 arb_en_all;
u32 int_en_all;
@@ -2397,7 +2393,7 @@ static const struct pmic_wrapper_type pwrap_mt8183 = {
.init_soc_specific = pwrap_mt8183_init_soc_specific,
};
-static struct pmic_wrapper_type pwrap_mt8195 = {
+static const struct pmic_wrapper_type pwrap_mt8195 = {
.regs = mt8195_regs,
.type = PWRAP_MT8195,
.arb_en_all = 0x777f, /* NEED CONFIRM */
@@ -2423,7 +2419,7 @@ static const struct pmic_wrapper_type pwrap_mt8365 = {
.init_soc_specific = NULL,
};
-static struct pmic_wrapper_type pwrap_mt8516 = {
+static const struct pmic_wrapper_type pwrap_mt8516 = {
.regs = mt8516_regs,
.type = PWRAP_MT8516,
.arb_en_all = 0xff,
@@ -2435,7 +2431,7 @@ static struct pmic_wrapper_type pwrap_mt8516 = {
.init_soc_specific = NULL,
};
-static struct pmic_wrapper_type pwrap_mt8186 = {
+static const struct pmic_wrapper_type pwrap_mt8186 = {
.regs = mt8186_regs,
.type = PWRAP_MT8186,
.arb_en_all = 0xfb27f,
@@ -2472,6 +2468,7 @@ static int pwrap_probe(struct platform_device *pdev)
int ret, irq;
u32 mask_done;
struct pmic_wrapper *wrp;
+ struct clk_bulk_data *clk;
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_slave_id = NULL;
@@ -2521,49 +2518,10 @@ static int pwrap_probe(struct platform_device *pdev)
}
}
- wrp->clk_spi = devm_clk_get(wrp->dev, "spi");
- if (IS_ERR(wrp->clk_spi)) {
- dev_dbg(wrp->dev, "failed to get clock: %ld\n",
- PTR_ERR(wrp->clk_spi));
- return PTR_ERR(wrp->clk_spi);
- }
-
- wrp->clk_wrap = devm_clk_get(wrp->dev, "wrap");
- if (IS_ERR(wrp->clk_wrap)) {
- dev_dbg(wrp->dev, "failed to get clock: %ld\n",
- PTR_ERR(wrp->clk_wrap));
- return PTR_ERR(wrp->clk_wrap);
- }
-
- wrp->clk_sys = devm_clk_get_optional(wrp->dev, "sys");
- if (IS_ERR(wrp->clk_sys)) {
- return dev_err_probe(wrp->dev, PTR_ERR(wrp->clk_sys),
- "failed to get clock: %pe\n",
- wrp->clk_sys);
- }
-
- wrp->clk_tmr = devm_clk_get_optional(wrp->dev, "tmr");
- if (IS_ERR(wrp->clk_tmr)) {
- return dev_err_probe(wrp->dev, PTR_ERR(wrp->clk_tmr),
- "failed to get clock: %pe\n",
- wrp->clk_tmr);
- }
-
- ret = clk_prepare_enable(wrp->clk_spi);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(wrp->clk_wrap);
- if (ret)
- goto err_out1;
-
- ret = clk_prepare_enable(wrp->clk_sys);
- if (ret)
- goto err_out2;
-
- ret = clk_prepare_enable(wrp->clk_tmr);
- if (ret)
- goto err_out3;
+ ret = devm_clk_bulk_get_all_enabled(wrp->dev, &clk);
+ if (ret < 0)
+ return dev_err_probe(wrp->dev, ret,
+ "failed to get clocks\n");
/* Enable internal dynamic clock */
if (HAS_CAP(wrp->master->caps, PWRAP_CAP_DCM)) {
@@ -2579,7 +2537,7 @@ static int pwrap_probe(struct platform_device *pdev)
ret = pwrap_init(wrp);
if (ret) {
dev_dbg(wrp->dev, "init failed with %d\n", ret);
- goto err_out4;
+ return ret;
}
}
@@ -2592,8 +2550,7 @@ static int pwrap_probe(struct platform_device *pdev)
if (!(pwrap_readl(wrp, PWRAP_WACS2_RDATA) & mask_done)) {
dev_dbg(wrp->dev, "initialization isn't finished\n");
- ret = -ENODEV;
- goto err_out4;
+ return -ENODEV;
}
/* Initialize watchdog, may not be done by the bootloader */
@@ -2622,42 +2579,27 @@ static int pwrap_probe(struct platform_device *pdev)
pwrap_writel(wrp, wrp->master->int1_en_all, PWRAP_INT1_EN);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err_out2;
- }
+ if (irq < 0)
+ return irq;
ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt,
IRQF_TRIGGER_HIGH,
"mt-pmic-pwrap", wrp);
if (ret)
- goto err_out4;
+ return ret;
wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, wrp->slave->regops->regmap);
- if (IS_ERR(wrp->regmap)) {
- ret = PTR_ERR(wrp->regmap);
- goto err_out2;
- }
+ if (IS_ERR(wrp->regmap))
+ return PTR_ERR(wrp->regmap);
ret = of_platform_populate(np, NULL, NULL, wrp->dev);
if (ret) {
dev_dbg(wrp->dev, "failed to create child devices at %pOF\n",
np);
- goto err_out4;
+ return ret;
}
return 0;
-
-err_out4:
- clk_disable_unprepare(wrp->clk_tmr);
-err_out3:
- clk_disable_unprepare(wrp->clk_sys);
-err_out2:
- clk_disable_unprepare(wrp->clk_wrap);
-err_out1:
- clk_disable_unprepare(wrp->clk_spi);
-
- return ret;
}
static struct platform_driver pwrap_drv = {
diff --git a/drivers/soc/mediatek/mtk-regulator-coupler.c b/drivers/soc/mediatek/mtk-regulator-coupler.c
index ad2ed42aa697..0b6a2884145e 100644
--- a/drivers/soc/mediatek/mtk-regulator-coupler.c
+++ b/drivers/soc/mediatek/mtk-regulator-coupler.c
@@ -147,6 +147,7 @@ static int mediatek_regulator_coupler_init(void)
{
if (!of_machine_is_compatible("mediatek,mt8183") &&
!of_machine_is_compatible("mediatek,mt8186") &&
+ !of_machine_is_compatible("mediatek,mt8188") &&
!of_machine_is_compatible("mediatek,mt8192"))
return 0;
diff --git a/drivers/soc/mediatek/mtk-socinfo.c b/drivers/soc/mediatek/mtk-socinfo.c
new file mode 100644
index 000000000000..978c43e9115a
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-socinfo.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/device.h>
+#include <linux/device/bus.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/sys_soc.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#define MTK_SOCINFO_ENTRY(_soc_name, _segment_name, _marketing_name, _cell_data1, _cell_data2) {\
+ .soc_name = _soc_name, \
+ .segment_name = _segment_name, \
+ .marketing_name = _marketing_name, \
+ .cell_data = {_cell_data1, _cell_data2} \
+}
+#define CELL_NOT_USED (0xFFFFFFFF)
+#define MAX_CELLS (2)
+
+struct mtk_socinfo {
+ struct device *dev;
+ struct name_data *name_data;
+ struct socinfo_data *socinfo_data;
+ struct soc_device *soc_dev;
+};
+
+struct socinfo_data {
+ char *soc_name;
+ char *segment_name;
+ char *marketing_name;
+ u32 cell_data[MAX_CELLS];
+};
+
+static const char *cell_names[MAX_CELLS] = {"socinfo-data1", "socinfo-data2"};
+
+static struct socinfo_data socinfo_data_table[] = {
+ MTK_SOCINFO_ENTRY("MT8173", "MT8173V/AC", "MT8173", 0x6CA20004, 0x10000000),
+ MTK_SOCINFO_ENTRY("MT8183", "MT8183V/AZA", "Kompanio 500", 0x00010043, 0x00000840),
+ MTK_SOCINFO_ENTRY("MT8183", "MT8183V/AZA", "Kompanio 500", 0x00010043, 0x00000940),
+ MTK_SOCINFO_ENTRY("MT8186", "MT8186GV/AZA", "Kompanio 520", 0x81861001, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8186T", "MT8186TV/AZA", "Kompanio 528", 0x81862001, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8188", "MT8188GV/AZA", "Kompanio 838", 0x81880000, 0x00000010),
+ MTK_SOCINFO_ENTRY("MT8188", "MT8188GV/HZA", "Kompanio 838", 0x81880000, 0x00000011),
+ MTK_SOCINFO_ENTRY("MT8189", "MT8189GV/AZA", "Kompanio 540", 0x81890000, 0x00000020),
+ MTK_SOCINFO_ENTRY("MT8189", "MT8189HV/AZA", "Kompanio 540", 0x81890000, 0x00000021),
+ MTK_SOCINFO_ENTRY("MT8192", "MT8192V/AZA", "Kompanio 820", 0x00001100, 0x00040080),
+ MTK_SOCINFO_ENTRY("MT8192T", "MT8192V/ATZA", "Kompanio 828", 0x00000100, 0x000400C0),
+ MTK_SOCINFO_ENTRY("MT8195", "MT8195GV/EZA", "Kompanio 1200", 0x81950300, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8195", "MT8195GV/EHZA", "Kompanio 1200", 0x81950304, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EZA", "Kompanio 1380", 0x81950400, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EHZA", "Kompanio 1380", 0x81950404, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8370", "MT8370AV/AZA", "Genio 510", 0x83700000, 0x00000081),
+ MTK_SOCINFO_ENTRY("MT8390", "MT8390AV/AZA", "Genio 700", 0x83900000, 0x00000080),
+ MTK_SOCINFO_ENTRY("MT8391", "MT8391AV/AZA", "Genio 720", 0x83910000, 0x00000080),
+ MTK_SOCINFO_ENTRY("MT8395", "MT8395AV/ZA", "Genio 1200", 0x83950100, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8395", "MT8395AV/ZA", "Genio 1200", 0x83950800, CELL_NOT_USED),
+};
+
+static int mtk_socinfo_create_socinfo_node(struct mtk_socinfo *mtk_socinfop)
+{
+ struct soc_device_attribute *attrs;
+ struct socinfo_data *data = mtk_socinfop->socinfo_data;
+ static const char *soc_manufacturer = "MediaTek";
+
+ attrs = devm_kzalloc(mtk_socinfop->dev, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ if (data->marketing_name != NULL && data->marketing_name[0] != '\0')
+ attrs->family = devm_kasprintf(mtk_socinfop->dev, GFP_KERNEL, "MediaTek %s",
+ data->marketing_name);
+ else
+ attrs->family = soc_manufacturer;
+
+ attrs->soc_id = data->soc_name;
+ /*
+ * The "machine" field will be populated automatically with the model
+ * name from board DTS (if available).
+ **/
+
+ mtk_socinfop->soc_dev = soc_device_register(attrs);
+ if (IS_ERR(mtk_socinfop->soc_dev))
+ return PTR_ERR(mtk_socinfop->soc_dev);
+
+ dev_info(mtk_socinfop->dev, "%s (%s) SoC detected.\n", attrs->family, attrs->soc_id);
+ return 0;
+}
+
+static u32 mtk_socinfo_read_cell(struct device *dev, const char *name)
+{
+ struct nvmem_device *nvmemp;
+ struct device_node *np, *nvmem_node = dev->parent->of_node;
+ u32 offset;
+ u32 cell_val = CELL_NOT_USED;
+
+ /* should never fail since the nvmem driver registers this child */
+ nvmemp = nvmem_device_find(nvmem_node, device_match_of_node);
+ if (IS_ERR(nvmemp))
+ goto out;
+
+ np = of_get_child_by_name(nvmem_node, name);
+ if (!np)
+ goto put_device;
+
+ if (of_property_read_u32_index(np, "reg", 0, &offset))
+ goto put_node;
+
+ nvmem_device_read(nvmemp, offset, sizeof(cell_val), &cell_val);
+
+put_node:
+ of_node_put(np);
+put_device:
+ nvmem_device_put(nvmemp);
+out:
+ return cell_val;
+}
+
+static int mtk_socinfo_get_socinfo_data(struct mtk_socinfo *mtk_socinfop)
+{
+ unsigned int i, j;
+ unsigned int num_cell_data = 0;
+ u32 cell_data[MAX_CELLS] = {0};
+ bool match_socinfo;
+ int match_socinfo_index = -1;
+
+ for (i = 0; i < MAX_CELLS; i++) {
+ cell_data[i] = mtk_socinfo_read_cell(mtk_socinfop->dev, cell_names[i]);
+ if (cell_data[i] != CELL_NOT_USED)
+ num_cell_data++;
+ else
+ break;
+ }
+
+ if (!num_cell_data)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(socinfo_data_table); i++) {
+ match_socinfo = true;
+ for (j = 0; j < num_cell_data; j++) {
+ if (cell_data[j] != socinfo_data_table[i].cell_data[j]) {
+ match_socinfo = false;
+ break;
+ }
+ }
+ if (match_socinfo) {
+ mtk_socinfop->socinfo_data = &(socinfo_data_table[i]);
+ match_socinfo_index = i;
+ break;
+ }
+ }
+
+ if (match_socinfo_index < 0) {
+ dev_warn(mtk_socinfop->dev,
+ "Unknown MediaTek SoC with ID 0x%08x 0x%08x\n",
+ cell_data[0], cell_data[1]);
+ return -ENOENT;
+ }
+
+ return match_socinfo_index;
+}
+
+static int mtk_socinfo_probe(struct platform_device *pdev)
+{
+ struct mtk_socinfo *mtk_socinfop;
+ int ret;
+
+ mtk_socinfop = devm_kzalloc(&pdev->dev, sizeof(*mtk_socinfop), GFP_KERNEL);
+ if (!mtk_socinfop)
+ return -ENOMEM;
+
+ mtk_socinfop->dev = &pdev->dev;
+
+ ret = mtk_socinfo_get_socinfo_data(mtk_socinfop);
+ if (ret < 0)
+ return dev_err_probe(mtk_socinfop->dev, ret, "Failed to get socinfo data\n");
+
+ ret = mtk_socinfo_create_socinfo_node(mtk_socinfop);
+ if (ret)
+ return dev_err_probe(mtk_socinfop->dev, ret, "Cannot create node\n");
+
+ platform_set_drvdata(pdev, mtk_socinfop);
+ return 0;
+}
+
+static void mtk_socinfo_remove(struct platform_device *pdev)
+{
+ struct mtk_socinfo *mtk_socinfop = platform_get_drvdata(pdev);
+
+ soc_device_unregister(mtk_socinfop->soc_dev);
+}
+
+static struct platform_driver mtk_socinfo = {
+ .probe = mtk_socinfo_probe,
+ .remove = mtk_socinfo_remove,
+ .driver = {
+ .name = "mtk-socinfo",
+ },
+};
+module_platform_driver(mtk_socinfo);
+
+MODULE_AUTHOR("William-TW LIN <william-tw.lin@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek socinfo driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
index 3a2f97cd5272..f45537546553 100644
--- a/drivers/soc/mediatek/mtk-svs.c
+++ b/drivers/soc/mediatek/mtk-svs.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2022 MediaTek Inc.
+ * Copyright (C) 2022 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
*/
#include <linux/bitfield.h>
@@ -32,16 +34,6 @@
#include <linux/spinlock.h>
#include <linux/thermal.h>
-/* svs bank 1-line software id */
-#define SVSB_CPU_LITTLE BIT(0)
-#define SVSB_CPU_BIG BIT(1)
-#define SVSB_CCI BIT(2)
-#define SVSB_GPU BIT(3)
-
-/* svs bank 2-line type */
-#define SVSB_LOW BIT(8)
-#define SVSB_HIGH BIT(9)
-
/* svs bank mode support */
#define SVSB_MODE_ALL_DISABLE 0
#define SVSB_MODE_INIT01 BIT(1)
@@ -128,6 +120,13 @@
#define SVSB_VOPS_FLD_VOP2_6 GENMASK(23, 16)
#define SVSB_VOPS_FLD_VOP3_7 GENMASK(31, 24)
+/* SVS Thermal Coefficients */
+#define SVSB_TS_COEFF_MT8195 250460
+#define SVSB_TS_COEFF_MT8186 204650
+
+/* Algo helpers */
+#define FUSE_DATA_NOT_VALID U32_MAX
+
/* svs bank related setting */
#define BITS8 8
#define MAX_OPP_ENTRIES 16
@@ -175,6 +174,36 @@ static DEFINE_SPINLOCK(svs_lock);
#endif
/**
+ * enum svsb_sw_id - SVS Bank Software ID
+ * @SVSB_SWID_CPU_LITTLE: CPU little cluster Bank
+ * @SVSB_SWID_CPU_BIG: CPU big cluster Bank
+ * @SVSB_SWID_CCI: Cache Coherent Interconnect Bank
+ * @SVSB_SWID_GPU: GPU Bank
+ * @SVSB_SWID_MAX: Total number of Banks
+ */
+enum svsb_sw_id {
+ SVSB_SWID_CPU_LITTLE,
+ SVSB_SWID_CPU_BIG,
+ SVSB_SWID_CCI,
+ SVSB_SWID_GPU,
+ SVSB_SWID_MAX
+};
+
+/**
+ * enum svsb_type - SVS Bank 2-line: Type and Role
+ * @SVSB_TYPE_NONE: One-line type Bank - Global role
+ * @SVSB_TYPE_LOW: Two-line type Bank - Low bank role
+ * @SVSB_TYPE_HIGH: Two-line type Bank - High bank role
+ * @SVSB_TYPE_MAX: Total number of bank types
+ */
+enum svsb_type {
+ SVSB_TYPE_NONE,
+ SVSB_TYPE_LOW,
+ SVSB_TYPE_HIGH,
+ SVSB_TYPE_MAX
+};
+
+/**
* enum svsb_phase - svs bank phase enumeration
* @SVSB_PHASE_ERROR: svs bank encounters unexpected condition
* @SVSB_PHASE_INIT01: svs bank basic init for data calibration
@@ -256,60 +285,88 @@ enum svs_reg_index {
};
static const u32 svs_regs_v2[] = {
- [DESCHAR] = 0xc00,
- [TEMPCHAR] = 0xc04,
- [DETCHAR] = 0xc08,
- [AGECHAR] = 0xc0c,
- [DCCONFIG] = 0xc10,
- [AGECONFIG] = 0xc14,
- [FREQPCT30] = 0xc18,
- [FREQPCT74] = 0xc1c,
- [LIMITVALS] = 0xc20,
- [VBOOT] = 0xc24,
- [DETWINDOW] = 0xc28,
- [CONFIG] = 0xc2c,
- [TSCALCS] = 0xc30,
- [RUNCONFIG] = 0xc34,
- [SVSEN] = 0xc38,
- [INIT2VALS] = 0xc3c,
- [DCVALUES] = 0xc40,
- [AGEVALUES] = 0xc44,
- [VOP30] = 0xc48,
- [VOP74] = 0xc4c,
- [TEMP] = 0xc50,
- [INTSTS] = 0xc54,
- [INTSTSRAW] = 0xc58,
- [INTEN] = 0xc5c,
- [CHKINT] = 0xc60,
- [CHKSHIFT] = 0xc64,
- [STATUS] = 0xc68,
- [VDESIGN30] = 0xc6c,
- [VDESIGN74] = 0xc70,
- [DVT30] = 0xc74,
- [DVT74] = 0xc78,
- [AGECOUNT] = 0xc7c,
- [SMSTATE0] = 0xc80,
- [SMSTATE1] = 0xc84,
- [CTL0] = 0xc88,
- [DESDETSEC] = 0xce0,
- [TEMPAGESEC] = 0xce4,
- [CTRLSPARE0] = 0xcf0,
- [CTRLSPARE1] = 0xcf4,
- [CTRLSPARE2] = 0xcf8,
- [CTRLSPARE3] = 0xcfc,
- [CORESEL] = 0xf00,
- [THERMINTST] = 0xf04,
- [INTST] = 0xf08,
- [THSTAGE0ST] = 0xf0c,
- [THSTAGE1ST] = 0xf10,
- [THSTAGE2ST] = 0xf14,
- [THAHBST0] = 0xf18,
- [THAHBST1] = 0xf1c,
- [SPARE0] = 0xf20,
- [SPARE1] = 0xf24,
- [SPARE2] = 0xf28,
- [SPARE3] = 0xf2c,
- [THSLPEVEB] = 0xf30,
+ [DESCHAR] = 0x00,
+ [TEMPCHAR] = 0x04,
+ [DETCHAR] = 0x08,
+ [AGECHAR] = 0x0c,
+ [DCCONFIG] = 0x10,
+ [AGECONFIG] = 0x14,
+ [FREQPCT30] = 0x18,
+ [FREQPCT74] = 0x1c,
+ [LIMITVALS] = 0x20,
+ [VBOOT] = 0x24,
+ [DETWINDOW] = 0x28,
+ [CONFIG] = 0x2c,
+ [TSCALCS] = 0x30,
+ [RUNCONFIG] = 0x34,
+ [SVSEN] = 0x38,
+ [INIT2VALS] = 0x3c,
+ [DCVALUES] = 0x40,
+ [AGEVALUES] = 0x44,
+ [VOP30] = 0x48,
+ [VOP74] = 0x4c,
+ [TEMP] = 0x50,
+ [INTSTS] = 0x54,
+ [INTSTSRAW] = 0x58,
+ [INTEN] = 0x5c,
+ [CHKINT] = 0x60,
+ [CHKSHIFT] = 0x64,
+ [STATUS] = 0x68,
+ [VDESIGN30] = 0x6c,
+ [VDESIGN74] = 0x70,
+ [DVT30] = 0x74,
+ [DVT74] = 0x78,
+ [AGECOUNT] = 0x7c,
+ [SMSTATE0] = 0x80,
+ [SMSTATE1] = 0x84,
+ [CTL0] = 0x88,
+ [DESDETSEC] = 0xe0,
+ [TEMPAGESEC] = 0xe4,
+ [CTRLSPARE0] = 0xf0,
+ [CTRLSPARE1] = 0xf4,
+ [CTRLSPARE2] = 0xf8,
+ [CTRLSPARE3] = 0xfc,
+ [CORESEL] = 0x300,
+ [THERMINTST] = 0x304,
+ [INTST] = 0x308,
+ [THSTAGE0ST] = 0x30c,
+ [THSTAGE1ST] = 0x310,
+ [THSTAGE2ST] = 0x314,
+ [THAHBST0] = 0x318,
+ [THAHBST1] = 0x31c,
+ [SPARE0] = 0x320,
+ [SPARE1] = 0x324,
+ [SPARE2] = 0x328,
+ [SPARE3] = 0x32c,
+ [THSLPEVEB] = 0x330,
+};
+
+static const char * const svs_swid_names[SVSB_SWID_MAX] = {
+ "SVSB_CPU_LITTLE", "SVSB_CPU_BIG", "SVSB_CCI", "SVSB_GPU"
+};
+
+static const char * const svs_type_names[SVSB_TYPE_MAX] = {
+ "", "_LOW", "_HIGH"
+};
+
+enum svs_fusemap_dev {
+ BDEV_BDES,
+ BDEV_MDES,
+ BDEV_MTDES,
+ BDEV_DCBDET,
+ BDEV_DCMDET,
+ BDEV_MAX
+};
+
+enum svs_fusemap_glb {
+ GLB_FT_PGM,
+ GLB_VMIN,
+ GLB_MAX
+};
+
+struct svs_fusemap {
+ s8 index;
+ u8 ofst;
};
/**
@@ -317,88 +374,124 @@ static const u32 svs_regs_v2[] = {
* @base: svs platform register base
* @dev: svs platform device
* @main_clk: main clock for svs bank
- * @pbank: svs bank pointer needing to be protected by spin_lock section
* @banks: svs banks that svs platform supports
* @rst: svs platform reset control
* @efuse_max: total number of svs efuse
* @tefuse_max: total number of thermal efuse
* @regs: svs platform registers map
- * @bank_max: total number of svs banks
* @efuse: svs efuse data received from NVMEM framework
* @tefuse: thermal efuse data received from NVMEM framework
+ * @ts_coeff: thermal sensors coefficient
+ * @bank_max: total number of svs banks
*/
struct svs_platform {
void __iomem *base;
struct device *dev;
struct clk *main_clk;
- struct svs_bank *pbank;
struct svs_bank *banks;
struct reset_control *rst;
size_t efuse_max;
size_t tefuse_max;
const u32 *regs;
- u32 bank_max;
u32 *efuse;
u32 *tefuse;
+ u32 ts_coeff;
+ u16 bank_max;
};
struct svs_platform_data {
char *name;
struct svs_bank *banks;
- bool (*efuse_parsing)(struct svs_platform *svsp);
+ bool (*efuse_parsing)(struct svs_platform *svsp, const struct svs_platform_data *pdata);
int (*probe)(struct svs_platform *svsp);
+ const struct svs_fusemap *glb_fuse_map;
const u32 *regs;
- u32 bank_max;
+ u32 ts_coeff;
+ u16 bank_max;
+};
+
+/**
+ * struct svs_bank_pdata - SVS Bank immutable config parameters
+ * @dev_fuse_map: Bank fuse map data
+ * @buck_name: Regulator name
+ * @tzone_name: Thermal zone name
+ * @age_config: Bank age configuration
+ * @ctl0: TS-x selection
+ * @dc_config: Bank dc configuration
+ * @int_st: Bank interrupt identification
+ * @turn_freq_base: Reference frequency for 2-line turn point
+ * @tzone_htemp: Thermal zone high temperature threshold
+ * @tzone_ltemp: Thermal zone low temperature threshold
+ * @volt_step: Bank voltage step
+ * @volt_base: Bank voltage base
+ * @tzone_htemp_voffset: Thermal zone high temperature voltage offset
+ * @tzone_ltemp_voffset: Thermal zone low temperature voltage offset
+ * @chk_shift: Bank chicken shift
+ * @cpu_id: CPU core ID for SVS CPU bank use only
+ * @opp_count: Bank opp count
+ * @vboot: Voltage request for bank init01 only
+ * @vco: Bank VCO value
+ * @sw_id: Bank software identification
+ * @type: SVS Bank Type (1 or 2-line) and Role (high/low)
+ * @set_freq_pct: function pointer to set bank frequency percent table
+ * @get_volts: function pointer to get bank voltages
+ */
+struct svs_bank_pdata {
+ const struct svs_fusemap *dev_fuse_map;
+ char *buck_name;
+ char *tzone_name;
+ u32 age_config;
+ u32 ctl0;
+ u32 dc_config;
+ u32 int_st;
+ u32 turn_freq_base;
+ u32 tzone_htemp;
+ u32 tzone_ltemp;
+ u32 volt_step;
+ u32 volt_base;
+ u16 tzone_htemp_voffset;
+ u16 tzone_ltemp_voffset;
+ u8 chk_shift;
+ u8 cpu_id;
+ u8 opp_count;
+ u8 vboot;
+ u8 vco;
+ u8 sw_id;
+ u8 type;
+
+ /* Callbacks */
+ void (*set_freq_pct)(struct svs_platform *svsp, struct svs_bank *svsb);
+ void (*get_volts)(struct svs_platform *svsp, struct svs_bank *svsb);
};
/**
* struct svs_bank - svs bank representation
+ * @pdata: SVS Bank immutable config parameters
* @dev: bank device
* @opp_dev: device for opp table/buck control
* @init_completion: the timeout completion for bank init
* @buck: regulator used by opp_dev
* @tzd: thermal zone device for getting temperature
* @lock: mutex lock to protect voltage update process
- * @set_freq_pct: function pointer to set bank frequency percent table
- * @get_volts: function pointer to get bank voltages
* @name: bank name
- * @buck_name: regulator name
- * @tzone_name: thermal zone name
* @phase: bank current phase
* @volt_od: bank voltage overdrive
* @reg_data: bank register data in different phase for debug purpose
* @pm_runtime_enabled_count: bank pm runtime enabled count
- * @mode_support: bank mode support.
+ * @mode_support: bank mode support
* @freq_base: reference frequency for bank init
- * @turn_freq_base: refenrece frequency for 2-line turn point
- * @vboot: voltage request for bank init01 only
* @opp_dfreq: default opp frequency table
* @opp_dvolt: default opp voltage table
* @freq_pct: frequency percent table for bank init
* @volt: bank voltage table
- * @volt_step: bank voltage step
- * @volt_base: bank voltage base
* @volt_flags: bank voltage flags
* @vmax: bank voltage maximum
* @vmin: bank voltage minimum
- * @age_config: bank age configuration
* @age_voffset_in: bank age voltage offset
- * @dc_config: bank dc configuration
* @dc_voffset_in: bank dc voltage offset
* @dvt_fixed: bank dvt fixed value
- * @vco: bank VCO value
- * @chk_shift: bank chicken shift
* @core_sel: bank selection
- * @opp_count: bank opp count
- * @int_st: bank interrupt identification
- * @sw_id: bank software identification
- * @cpu_id: cpu core id for SVS CPU bank use only
- * @ctl0: TS-x selection
* @temp: bank temperature
- * @tzone_htemp: thermal zone high temperature threshold
- * @tzone_htemp_voffset: thermal zone high temperature voltage offset
- * @tzone_ltemp: thermal zone low temperature threshold
- * @tzone_ltemp_voffset: thermal zone low temperature voltage offset
* @bts: svs efuse data
* @mts: svs efuse data
* @bdes: svs efuse data
@@ -407,69 +500,49 @@ struct svs_platform_data {
* @dcbdet: svs efuse data
* @dcmdet: svs efuse data
* @turn_pt: 2-line turn point tells which opp_volt calculated by high/low bank
- * @type: bank type to represent it is 2-line (high/low) bank or 1-line bank
+ * @vbin_turn_pt: voltage bin turn point helps know which svsb_volt should be overridden
*
- * Svs bank will generate suitalbe voltages by below general math equation
+ * Svs bank will generate suitable voltages by below general math equation
* and provide these voltages to opp voltage table.
*
* opp_volt[i] = (volt[i] * volt_step) + volt_base;
*/
struct svs_bank {
+ const struct svs_bank_pdata pdata;
struct device *dev;
struct device *opp_dev;
struct completion init_completion;
struct regulator *buck;
struct thermal_zone_device *tzd;
- struct mutex lock; /* lock to protect voltage update process */
- void (*set_freq_pct)(struct svs_platform *svsp);
- void (*get_volts)(struct svs_platform *svsp);
+ struct mutex lock;
+ int pm_runtime_enabled_count;
+ short int volt_od;
char *name;
- char *buck_name;
- char *tzone_name;
enum svsb_phase phase;
- s32 volt_od;
u32 reg_data[SVSB_PHASE_MAX][SVS_REG_MAX];
- u32 pm_runtime_enabled_count;
- u32 mode_support;
- u32 freq_base;
- u32 turn_freq_base;
- u32 vboot;
+ u8 mode_support;
u32 opp_dfreq[MAX_OPP_ENTRIES];
u32 opp_dvolt[MAX_OPP_ENTRIES];
u32 freq_pct[MAX_OPP_ENTRIES];
u32 volt[MAX_OPP_ENTRIES];
- u32 volt_step;
- u32 volt_base;
u32 volt_flags;
- u32 vmax;
- u32 vmin;
- u32 age_config;
- u32 age_voffset_in;
- u32 dc_config;
- u32 dc_voffset_in;
- u32 dvt_fixed;
- u32 vco;
- u32 chk_shift;
+ u32 freq_base;
+ u32 turn_pt;
+ u32 vbin_turn_pt;
u32 core_sel;
- u32 opp_count;
- u32 int_st;
- u32 sw_id;
- u32 cpu_id;
- u32 ctl0;
u32 temp;
- u32 tzone_htemp;
- u32 tzone_htemp_voffset;
- u32 tzone_ltemp;
- u32 tzone_ltemp_voffset;
- u32 bts;
- u32 mts;
- u32 bdes;
- u32 mdes;
- u32 mtdes;
- u32 dcbdet;
- u32 dcmdet;
- u32 turn_pt;
- u32 type;
+ u16 age_voffset_in;
+ u16 dc_voffset_in;
+ u8 dvt_fixed;
+ u8 vmax;
+ u8 vmin;
+ u16 bts;
+ u16 mts;
+ u16 bdes;
+ u16 mdes;
+ u8 mtdes;
+ u8 dcbdet;
+ u8 dcmdet;
};
static u32 percent(u32 numerator, u32 denominator)
@@ -492,10 +565,8 @@ static void svs_writel_relaxed(struct svs_platform *svsp, u32 val,
writel_relaxed(val, svsp->base + svsp->regs[rg_i]);
}
-static void svs_switch_bank(struct svs_platform *svsp)
+static void svs_switch_bank(struct svs_platform *svsp, struct svs_bank *svsb)
{
- struct svs_bank *svsb = svsp->pbank;
-
svs_writel_relaxed(svsp, svsb->core_sel, CORESEL);
}
@@ -513,10 +584,11 @@ static u32 svs_opp_volt_to_bank_volt(u32 opp_u_volt, u32 svsb_volt_step,
static int svs_sync_bank_volts_from_opp(struct svs_bank *svsb)
{
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
struct dev_pm_opp *opp;
u32 i, opp_u_volt;
- for (i = 0; i < svsb->opp_count; i++) {
+ for (i = 0; i < bdata->opp_count; i++) {
opp = dev_pm_opp_find_freq_exact(svsb->opp_dev,
svsb->opp_dfreq[i],
true);
@@ -528,8 +600,8 @@ static int svs_sync_bank_volts_from_opp(struct svs_bank *svsb)
opp_u_volt = dev_pm_opp_get_voltage(opp);
svsb->volt[i] = svs_opp_volt_to_bank_volt(opp_u_volt,
- svsb->volt_step,
- svsb->volt_base);
+ bdata->volt_step,
+ bdata->volt_base);
dev_pm_opp_put(opp);
}
@@ -539,6 +611,7 @@ static int svs_sync_bank_volts_from_opp(struct svs_bank *svsb)
static int svs_adjust_pm_opp_volts(struct svs_bank *svsb)
{
int ret = -EPERM, tzone_temp = 0;
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
u32 i, svsb_volt, opp_volt, temp_voffset = 0, opp_start, opp_stop;
mutex_lock(&svsb->lock);
@@ -547,15 +620,15 @@ static int svs_adjust_pm_opp_volts(struct svs_bank *svsb)
* 2-line bank updates its corresponding opp volts.
* 1-line bank updates all opp volts.
*/
- if (svsb->type == SVSB_HIGH) {
+ if (bdata->type == SVSB_TYPE_HIGH) {
opp_start = 0;
opp_stop = svsb->turn_pt;
- } else if (svsb->type == SVSB_LOW) {
+ } else if (bdata->type == SVSB_TYPE_LOW) {
opp_start = svsb->turn_pt;
- opp_stop = svsb->opp_count;
+ opp_stop = bdata->opp_count;
} else {
opp_start = 0;
- opp_stop = svsb->opp_count;
+ opp_stop = bdata->opp_count;
}
/* Get thermal effect */
@@ -564,20 +637,20 @@ static int svs_adjust_pm_opp_volts(struct svs_bank *svsb)
if (ret || (svsb->temp > SVSB_TEMP_UPPER_BOUND &&
svsb->temp < SVSB_TEMP_LOWER_BOUND)) {
dev_err(svsb->dev, "%s: %d (0x%x), run default volts\n",
- svsb->tzone_name, ret, svsb->temp);
+ bdata->tzone_name, ret, svsb->temp);
svsb->phase = SVSB_PHASE_ERROR;
}
- if (tzone_temp >= svsb->tzone_htemp)
- temp_voffset += svsb->tzone_htemp_voffset;
- else if (tzone_temp <= svsb->tzone_ltemp)
- temp_voffset += svsb->tzone_ltemp_voffset;
+ if (tzone_temp >= bdata->tzone_htemp)
+ temp_voffset += bdata->tzone_htemp_voffset;
+ else if (tzone_temp <= bdata->tzone_ltemp)
+ temp_voffset += bdata->tzone_ltemp_voffset;
/* 2-line bank update all opp volts when running mon mode */
- if (svsb->phase == SVSB_PHASE_MON && (svsb->type == SVSB_HIGH ||
- svsb->type == SVSB_LOW)) {
+ if (svsb->phase == SVSB_PHASE_MON && (bdata->type == SVSB_TYPE_HIGH ||
+ bdata->type == SVSB_TYPE_LOW)) {
opp_start = 0;
- opp_stop = svsb->opp_count;
+ opp_stop = bdata->opp_count;
}
}
@@ -594,8 +667,8 @@ static int svs_adjust_pm_opp_volts(struct svs_bank *svsb)
case SVSB_PHASE_MON:
svsb_volt = max(svsb->volt[i] + temp_voffset, svsb->vmin);
opp_volt = svs_bank_volt_to_opp_volt(svsb_volt,
- svsb->volt_step,
- svsb->volt_base);
+ bdata->volt_step,
+ bdata->volt_base);
break;
default:
dev_err(svsb->dev, "unknown phase: %u\n", svsb->phase);
@@ -630,8 +703,7 @@ static void svs_bank_disable_and_restore_default_volts(struct svs_platform *svsp
return;
spin_lock_irqsave(&svs_lock, flags);
- svsp->pbank = svsb;
- svs_switch_bank(svsp);
+ svs_switch_bank(svsp, svsb);
svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
spin_unlock_irqrestore(&svs_lock, flags);
@@ -751,13 +823,14 @@ static int svs_status_debug_show(struct seq_file *m, void *v)
ret = thermal_zone_get_temp(svsb->tzd, &tzone_temp);
if (ret)
- seq_printf(m, "%s: temperature ignore, turn_pt = %u\n",
- svsb->name, svsb->turn_pt);
+ seq_printf(m, "%s: temperature ignore, vbin_turn_pt = %u, turn_pt = %u\n",
+ svsb->name, svsb->vbin_turn_pt, svsb->turn_pt);
else
- seq_printf(m, "%s: temperature = %d, turn_pt = %u\n",
- svsb->name, tzone_temp, svsb->turn_pt);
+ seq_printf(m, "%s: temperature = %d, vbin_turn_pt = %u, turn_pt = %u\n",
+ svsb->name, tzone_temp, svsb->vbin_turn_pt,
+ svsb->turn_pt);
- for (i = 0; i < svsb->opp_count; i++) {
+ for (i = 0; i < svsb->pdata.opp_count; i++) {
opp = dev_pm_opp_find_freq_exact(svsb->opp_dev,
svsb->opp_dfreq[i], true);
if (IS_ERR(opp)) {
@@ -862,12 +935,12 @@ static u32 interpolate(u32 f0, u32 f1, u32 v0, u32 v1, u32 fx)
return DIV_ROUND_UP(vx, 100);
}
-static void svs_get_bank_volts_v3(struct svs_platform *svsp)
+static void svs_get_bank_volts_v3(struct svs_platform *svsp, struct svs_bank *svsb)
{
- struct svs_bank *svsb = svsp->pbank;
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
u32 i, j, *vop, vop74, vop30, turn_pt = svsb->turn_pt;
u32 b_sft, shift_byte = 0, opp_start = 0, opp_stop = 0;
- u32 middle_index = (svsb->opp_count / 2);
+ u32 middle_index = (bdata->opp_count / 2);
if (svsb->phase == SVSB_PHASE_MON &&
svsb->volt_flags & SVSB_MON_VOLT_IGNORE)
@@ -878,7 +951,7 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp)
/* Target is to set svsb->volt[] by algorithm */
if (turn_pt < middle_index) {
- if (svsb->type == SVSB_HIGH) {
+ if (bdata->type == SVSB_TYPE_HIGH) {
/* volt[0] ~ volt[turn_pt - 1] */
for (i = 0; i < turn_pt; i++) {
b_sft = BITS8 * (shift_byte % REG_BYTES);
@@ -887,12 +960,12 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp)
svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0);
shift_byte++;
}
- } else if (svsb->type == SVSB_LOW) {
+ } else if (bdata->type == SVSB_TYPE_LOW) {
/* volt[turn_pt] + volt[j] ~ volt[opp_count - 1] */
- j = svsb->opp_count - 7;
+ j = bdata->opp_count - 7;
svsb->volt[turn_pt] = FIELD_GET(SVSB_VOPS_FLD_VOP0_4, vop30);
shift_byte++;
- for (i = j; i < svsb->opp_count; i++) {
+ for (i = j; i < bdata->opp_count; i++) {
b_sft = BITS8 * (shift_byte % REG_BYTES);
vop = (shift_byte < REG_BYTES) ? &vop30 :
&vop74;
@@ -909,7 +982,7 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp)
svsb->freq_pct[i]);
}
} else {
- if (svsb->type == SVSB_HIGH) {
+ if (bdata->type == SVSB_TYPE_HIGH) {
/* volt[0] + volt[j] ~ volt[turn_pt - 1] */
j = turn_pt - 7;
svsb->volt[0] = FIELD_GET(SVSB_VOPS_FLD_VOP0_4, vop30);
@@ -929,9 +1002,9 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp)
svsb->volt[0],
svsb->volt[j],
svsb->freq_pct[i]);
- } else if (svsb->type == SVSB_LOW) {
+ } else if (bdata->type == SVSB_TYPE_LOW) {
/* volt[turn_pt] ~ volt[opp_count - 1] */
- for (i = turn_pt; i < svsb->opp_count; i++) {
+ for (i = turn_pt; i < bdata->opp_count; i++) {
b_sft = BITS8 * (shift_byte % REG_BYTES);
vop = (shift_byte < REG_BYTES) ? &vop30 :
&vop74;
@@ -941,28 +1014,51 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp)
}
}
- if (svsb->type == SVSB_HIGH) {
+ if (bdata->type == SVSB_TYPE_HIGH) {
opp_start = 0;
opp_stop = svsb->turn_pt;
- } else if (svsb->type == SVSB_LOW) {
+ } else if (bdata->type == SVSB_TYPE_LOW) {
opp_start = svsb->turn_pt;
- opp_stop = svsb->opp_count;
+ opp_stop = bdata->opp_count;
}
for (i = opp_start; i < opp_stop; i++)
if (svsb->volt_flags & SVSB_REMOVE_DVTFIXED_VOLT)
svsb->volt[i] -= svsb->dvt_fixed;
+
+ /* For voltage bin support */
+ if (svsb->opp_dfreq[0] > svsb->freq_base) {
+ svsb->volt[0] = svs_opp_volt_to_bank_volt(svsb->opp_dvolt[0],
+ bdata->volt_step,
+ bdata->volt_base);
+
+ /* Find voltage bin turn point */
+ for (i = 0; i < bdata->opp_count; i++) {
+ if (svsb->opp_dfreq[i] <= svsb->freq_base) {
+ svsb->vbin_turn_pt = i;
+ break;
+ }
+ }
+
+ /* Override svs bank voltages */
+ for (i = 1; i < svsb->vbin_turn_pt; i++)
+ svsb->volt[i] = interpolate(svsb->freq_pct[0],
+ svsb->freq_pct[svsb->vbin_turn_pt],
+ svsb->volt[0],
+ svsb->volt[svsb->vbin_turn_pt],
+ svsb->freq_pct[i]);
+ }
}
-static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp)
+static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp, struct svs_bank *svsb)
{
- struct svs_bank *svsb = svsp->pbank;
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
u32 i, j, *freq_pct, freq_pct74 = 0, freq_pct30 = 0;
u32 b_sft, shift_byte = 0, turn_pt;
- u32 middle_index = (svsb->opp_count / 2);
+ u32 middle_index = (bdata->opp_count / 2);
- for (i = 0; i < svsb->opp_count; i++) {
- if (svsb->opp_dfreq[i] <= svsb->turn_freq_base) {
+ for (i = 0; i < bdata->opp_count; i++) {
+ if (svsb->opp_dfreq[i] <= bdata->turn_freq_base) {
svsb->turn_pt = i;
break;
}
@@ -972,11 +1068,11 @@ static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp)
/* Target is to fill out freq_pct74 / freq_pct30 by algorithm */
if (turn_pt < middle_index) {
- if (svsb->type == SVSB_HIGH) {
+ if (bdata->type == SVSB_TYPE_HIGH) {
/*
* If we don't handle this situation,
- * SVSB_HIGH's FREQPCT74 / FREQPCT30 would keep "0"
- * and this leads SVSB_LOW to work abnormally.
+ * SVSB_TYPE_HIGH's FREQPCT74 / FREQPCT30 would keep "0"
+ * and this leads SVSB_TYPE_LOW to work abnormally.
*/
if (turn_pt == 0)
freq_pct30 = svsb->freq_pct[0];
@@ -989,15 +1085,15 @@ static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp)
*freq_pct |= (svsb->freq_pct[i] << b_sft);
shift_byte++;
}
- } else if (svsb->type == SVSB_LOW) {
+ } else if (bdata->type == SVSB_TYPE_LOW) {
/*
* freq_pct[turn_pt] +
* freq_pct[opp_count - 7] ~ freq_pct[opp_count -1]
*/
freq_pct30 = svsb->freq_pct[turn_pt];
shift_byte++;
- j = svsb->opp_count - 7;
- for (i = j; i < svsb->opp_count; i++) {
+ j = bdata->opp_count - 7;
+ for (i = j; i < bdata->opp_count; i++) {
b_sft = BITS8 * (shift_byte % REG_BYTES);
freq_pct = (shift_byte < REG_BYTES) ?
&freq_pct30 : &freq_pct74;
@@ -1006,7 +1102,7 @@ static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp)
}
}
} else {
- if (svsb->type == SVSB_HIGH) {
+ if (bdata->type == SVSB_TYPE_HIGH) {
/*
* freq_pct[0] +
* freq_pct[turn_pt - 7] ~ freq_pct[turn_pt - 1]
@@ -1021,9 +1117,9 @@ static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp)
*freq_pct |= (svsb->freq_pct[i] << b_sft);
shift_byte++;
}
- } else if (svsb->type == SVSB_LOW) {
+ } else if (bdata->type == SVSB_TYPE_LOW) {
/* freq_pct[turn_pt] ~ freq_pct[opp_count - 1] */
- for (i = turn_pt; i < svsb->opp_count; i++) {
+ for (i = turn_pt; i < bdata->opp_count; i++) {
b_sft = BITS8 * (shift_byte % REG_BYTES);
freq_pct = (shift_byte < REG_BYTES) ?
&freq_pct30 : &freq_pct74;
@@ -1037,9 +1133,9 @@ static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp)
svs_writel_relaxed(svsp, freq_pct30, FREQPCT30);
}
-static void svs_get_bank_volts_v2(struct svs_platform *svsp)
+static void svs_get_bank_volts_v2(struct svs_platform *svsp, struct svs_bank *svsb)
{
- struct svs_bank *svsb = svsp->pbank;
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
u32 temp, i;
temp = svs_readl_relaxed(svsp, VOP74);
@@ -1067,13 +1163,35 @@ static void svs_get_bank_volts_v2(struct svs_platform *svsp)
svsb->volt[14],
svsb->freq_pct[15]);
- for (i = 0; i < svsb->opp_count; i++)
+ for (i = 0; i < bdata->opp_count; i++)
svsb->volt[i] += svsb->volt_od;
+
+ /* For voltage bin support */
+ if (svsb->opp_dfreq[0] > svsb->freq_base) {
+ svsb->volt[0] = svs_opp_volt_to_bank_volt(svsb->opp_dvolt[0],
+ bdata->volt_step,
+ bdata->volt_base);
+
+ /* Find voltage bin turn point */
+ for (i = 0; i < bdata->opp_count; i++) {
+ if (svsb->opp_dfreq[i] <= svsb->freq_base) {
+ svsb->vbin_turn_pt = i;
+ break;
+ }
+ }
+
+ /* Override svs bank voltages */
+ for (i = 1; i < svsb->vbin_turn_pt; i++)
+ svsb->volt[i] = interpolate(svsb->freq_pct[0],
+ svsb->freq_pct[svsb->vbin_turn_pt],
+ svsb->volt[0],
+ svsb->volt[svsb->vbin_turn_pt],
+ svsb->freq_pct[i]);
+ }
}
-static void svs_set_bank_freq_pct_v2(struct svs_platform *svsp)
+static void svs_set_bank_freq_pct_v2(struct svs_platform *svsp, struct svs_bank *svsb)
{
- struct svs_bank *svsb = svsp->pbank;
u32 freqpct74_val, freqpct30_val;
freqpct74_val = FIELD_PREP(SVSB_FREQPCTS_FLD_PCT0_4, svsb->freq_pct[8]) |
@@ -1091,18 +1209,20 @@ static void svs_set_bank_freq_pct_v2(struct svs_platform *svsp)
}
static void svs_set_bank_phase(struct svs_platform *svsp,
+ unsigned int bank_idx,
enum svsb_phase target_phase)
{
- struct svs_bank *svsb = svsp->pbank;
+ struct svs_bank *svsb = &svsp->banks[bank_idx];
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
u32 des_char, temp_char, det_char, limit_vals, init2vals, ts_calcs;
- svs_switch_bank(svsp);
+ svs_switch_bank(svsp, svsb);
des_char = FIELD_PREP(SVSB_DESCHAR_FLD_BDES, svsb->bdes) |
FIELD_PREP(SVSB_DESCHAR_FLD_MDES, svsb->mdes);
svs_writel_relaxed(svsp, des_char, DESCHAR);
- temp_char = FIELD_PREP(SVSB_TEMPCHAR_FLD_VCO, svsb->vco) |
+ temp_char = FIELD_PREP(SVSB_TEMPCHAR_FLD_VCO, bdata->vco) |
FIELD_PREP(SVSB_TEMPCHAR_FLD_MTDES, svsb->mtdes) |
FIELD_PREP(SVSB_TEMPCHAR_FLD_DVT_FIXED, svsb->dvt_fixed);
svs_writel_relaxed(svsp, temp_char, TEMPCHAR);
@@ -1111,11 +1231,11 @@ static void svs_set_bank_phase(struct svs_platform *svsp,
FIELD_PREP(SVSB_DETCHAR_FLD_DCMDET, svsb->dcmdet);
svs_writel_relaxed(svsp, det_char, DETCHAR);
- svs_writel_relaxed(svsp, svsb->dc_config, DCCONFIG);
- svs_writel_relaxed(svsp, svsb->age_config, AGECONFIG);
+ svs_writel_relaxed(svsp, bdata->dc_config, DCCONFIG);
+ svs_writel_relaxed(svsp, bdata->age_config, AGECONFIG);
svs_writel_relaxed(svsp, SVSB_RUNCONFIG_DEFAULT, RUNCONFIG);
- svsb->set_freq_pct(svsp);
+ bdata->set_freq_pct(svsp, svsb);
limit_vals = FIELD_PREP(SVSB_LIMITVALS_FLD_DTLO, SVSB_VAL_DTLO) |
FIELD_PREP(SVSB_LIMITVALS_FLD_DTHI, SVSB_VAL_DTHI) |
@@ -1125,13 +1245,13 @@ static void svs_set_bank_phase(struct svs_platform *svsp,
svs_writel_relaxed(svsp, SVSB_DET_WINDOW, DETWINDOW);
svs_writel_relaxed(svsp, SVSB_DET_MAX, CONFIG);
- svs_writel_relaxed(svsp, svsb->chk_shift, CHKSHIFT);
- svs_writel_relaxed(svsp, svsb->ctl0, CTL0);
+ svs_writel_relaxed(svsp, bdata->chk_shift, CHKSHIFT);
+ svs_writel_relaxed(svsp, bdata->ctl0, CTL0);
svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
switch (target_phase) {
case SVSB_PHASE_INIT01:
- svs_writel_relaxed(svsp, svsb->vboot, VBOOT);
+ svs_writel_relaxed(svsp, bdata->vboot, VBOOT);
svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN);
svs_writel_relaxed(svsp, SVSB_PTPEN_INIT01, SVSEN);
break;
@@ -1157,18 +1277,20 @@ static void svs_set_bank_phase(struct svs_platform *svsp,
}
static inline void svs_save_bank_register_data(struct svs_platform *svsp,
+ unsigned short bank_idx,
enum svsb_phase phase)
{
- struct svs_bank *svsb = svsp->pbank;
+ struct svs_bank *svsb = &svsp->banks[bank_idx];
enum svs_reg_index rg_i;
for (rg_i = DESCHAR; rg_i < SVS_REG_MAX; rg_i++)
svsb->reg_data[phase][rg_i] = svs_readl_relaxed(svsp, rg_i);
}
-static inline void svs_error_isr_handler(struct svs_platform *svsp)
+static inline void svs_error_isr_handler(struct svs_platform *svsp,
+ unsigned short bank_idx)
{
- struct svs_bank *svsb = svsp->pbank;
+ struct svs_bank *svsb = &svsp->banks[bank_idx];
dev_err(svsb->dev, "%s: CORESEL = 0x%08x\n",
__func__, svs_readl_relaxed(svsp, CORESEL));
@@ -1180,27 +1302,29 @@ static inline void svs_error_isr_handler(struct svs_platform *svsp)
svs_readl_relaxed(svsp, SMSTATE1));
dev_err(svsb->dev, "TEMP = 0x%08x\n", svs_readl_relaxed(svsp, TEMP));
- svs_save_bank_register_data(svsp, SVSB_PHASE_ERROR);
+ svs_save_bank_register_data(svsp, bank_idx, SVSB_PHASE_ERROR);
svsb->phase = SVSB_PHASE_ERROR;
svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
}
-static inline void svs_init01_isr_handler(struct svs_platform *svsp)
+static inline void svs_init01_isr_handler(struct svs_platform *svsp,
+ unsigned short bank_idx)
{
- struct svs_bank *svsb = svsp->pbank;
+ struct svs_bank *svsb = &svsp->banks[bank_idx];
+ u32 val;
dev_info(svsb->dev, "%s: VDN74~30:0x%08x~0x%08x, DC:0x%08x\n",
__func__, svs_readl_relaxed(svsp, VDESIGN74),
svs_readl_relaxed(svsp, VDESIGN30),
svs_readl_relaxed(svsp, DCVALUES));
- svs_save_bank_register_data(svsp, SVSB_PHASE_INIT01);
+ svs_save_bank_register_data(svsp, bank_idx, SVSB_PHASE_INIT01);
svsb->phase = SVSB_PHASE_INIT01;
- svsb->dc_voffset_in = ~(svs_readl_relaxed(svsp, DCVALUES) &
- GENMASK(15, 0)) + 1;
+ val = ~(svs_readl_relaxed(svsp, DCVALUES) & GENMASK(15, 0)) + 1;
+ svsb->dc_voffset_in = val & GENMASK(15, 0);
if (svsb->volt_flags & SVSB_INIT01_VOLT_IGNORE ||
(svsb->dc_voffset_in & SVSB_DC_SIGNED_BIT &&
svsb->volt_flags & SVSB_INIT01_VOLT_INC_ONLY))
@@ -1214,32 +1338,36 @@ static inline void svs_init01_isr_handler(struct svs_platform *svsp)
svsb->core_sel &= ~SVSB_DET_CLK_EN;
}
-static inline void svs_init02_isr_handler(struct svs_platform *svsp)
+static inline void svs_init02_isr_handler(struct svs_platform *svsp,
+ unsigned short bank_idx)
{
- struct svs_bank *svsb = svsp->pbank;
+ struct svs_bank *svsb = &svsp->banks[bank_idx];
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
dev_info(svsb->dev, "%s: VOP74~30:0x%08x~0x%08x, DC:0x%08x\n",
__func__, svs_readl_relaxed(svsp, VOP74),
svs_readl_relaxed(svsp, VOP30),
svs_readl_relaxed(svsp, DCVALUES));
- svs_save_bank_register_data(svsp, SVSB_PHASE_INIT02);
+ svs_save_bank_register_data(svsp, bank_idx, SVSB_PHASE_INIT02);
svsb->phase = SVSB_PHASE_INIT02;
- svsb->get_volts(svsp);
+ bdata->get_volts(svsp, svsb);
svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
svs_writel_relaxed(svsp, SVSB_INTSTS_F0_COMPLETE, INTSTS);
}
-static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp)
+static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp,
+ unsigned short bank_idx)
{
- struct svs_bank *svsb = svsp->pbank;
+ struct svs_bank *svsb = &svsp->banks[bank_idx];
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
- svs_save_bank_register_data(svsp, SVSB_PHASE_MON);
+ svs_save_bank_register_data(svsp, bank_idx, SVSB_PHASE_MON);
svsb->phase = SVSB_PHASE_MON;
- svsb->get_volts(svsp);
+ bdata->get_volts(svsp, svsb);
svsb->temp = svs_readl_relaxed(svsp, TEMP) & GENMASK(7, 0);
svs_writel_relaxed(svsp, SVSB_INTSTS_FLD_MONVOP, INTSTS);
@@ -1248,37 +1376,38 @@ static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp)
static irqreturn_t svs_isr(int irq, void *data)
{
struct svs_platform *svsp = data;
+ const struct svs_bank_pdata *bdata;
struct svs_bank *svsb = NULL;
unsigned long flags;
u32 idx, int_sts, svs_en;
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
+ bdata = &svsb->pdata;
WARN(!svsb, "%s: svsb(%s) is null", __func__, svsb->name);
spin_lock_irqsave(&svs_lock, flags);
- svsp->pbank = svsb;
/* Find out which svs bank fires interrupt */
- if (svsb->int_st & svs_readl_relaxed(svsp, INTST)) {
+ if (bdata->int_st & svs_readl_relaxed(svsp, INTST)) {
spin_unlock_irqrestore(&svs_lock, flags);
continue;
}
- svs_switch_bank(svsp);
+ svs_switch_bank(svsp, svsb);
int_sts = svs_readl_relaxed(svsp, INTSTS);
svs_en = svs_readl_relaxed(svsp, SVSEN);
if (int_sts == SVSB_INTSTS_F0_COMPLETE &&
svs_en == SVSB_PTPEN_INIT01)
- svs_init01_isr_handler(svsp);
+ svs_init01_isr_handler(svsp, idx);
else if (int_sts == SVSB_INTSTS_F0_COMPLETE &&
svs_en == SVSB_PTPEN_INIT02)
- svs_init02_isr_handler(svsp);
+ svs_init02_isr_handler(svsp, idx);
else if (int_sts & SVSB_INTSTS_FLD_MONVOP)
- svs_mon_mode_isr_handler(svsp);
+ svs_mon_mode_isr_handler(svsp, idx);
else
- svs_error_isr_handler(svsp);
+ svs_error_isr_handler(svsp, idx);
spin_unlock_irqrestore(&svs_lock, flags);
break;
@@ -1293,20 +1422,35 @@ static irqreturn_t svs_isr(int irq, void *data)
return IRQ_HANDLED;
}
+static bool svs_mode_available(struct svs_platform *svsp, u8 mode)
+{
+ int i;
+
+ for (i = 0; i < svsp->bank_max; i++)
+ if (svsp->banks[i].mode_support & mode)
+ return true;
+ return false;
+}
+
static int svs_init01(struct svs_platform *svsp)
{
+ const struct svs_bank_pdata *bdata;
struct svs_bank *svsb;
unsigned long flags, time_left;
bool search_done;
int ret = 0, r;
u32 opp_freq, opp_vboot, buck_volt, idx, i;
+ if (!svs_mode_available(svsp, SVSB_MODE_INIT01))
+ return 0;
+
/* Keep CPUs' core power on for svs_init01 initialization */
cpuidle_pause_and_lock();
/* Svs bank init01 preparation - power enable */
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
+ bdata = &svsb->pdata;
if (!(svsb->mode_support & SVSB_MODE_INIT01))
continue;
@@ -1314,7 +1458,7 @@ static int svs_init01(struct svs_platform *svsp)
ret = regulator_enable(svsb->buck);
if (ret) {
dev_err(svsb->dev, "%s enable fail: %d\n",
- svsb->buck_name, ret);
+ bdata->buck_name, ret);
goto svs_init01_resume_cpuidle;
}
@@ -1344,6 +1488,7 @@ static int svs_init01(struct svs_platform *svsp)
*/
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
+ bdata = &svsb->pdata;
if (!(svsb->mode_support & SVSB_MODE_INIT01))
continue;
@@ -1353,11 +1498,11 @@ static int svs_init01(struct svs_platform *svsp)
* fix to that freq until svs_init01 is done.
*/
search_done = false;
- opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot,
- svsb->volt_step,
- svsb->volt_base);
+ opp_vboot = svs_bank_volt_to_opp_volt(bdata->vboot,
+ bdata->volt_step,
+ bdata->volt_base);
- for (i = 0; i < svsb->opp_count; i++) {
+ for (i = 0; i < bdata->opp_count; i++) {
opp_freq = svsb->opp_dfreq[i];
if (!search_done && svsb->opp_dvolt[i] <= opp_vboot) {
ret = dev_pm_opp_adjust_voltage(svsb->opp_dev,
@@ -1389,13 +1534,14 @@ static int svs_init01(struct svs_platform *svsp)
/* Svs bank init01 begins */
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
+ bdata = &svsb->pdata;
if (!(svsb->mode_support & SVSB_MODE_INIT01))
continue;
- opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot,
- svsb->volt_step,
- svsb->volt_base);
+ opp_vboot = svs_bank_volt_to_opp_volt(bdata->vboot,
+ bdata->volt_step,
+ bdata->volt_base);
buck_volt = regulator_get_voltage(svsb->buck);
if (buck_volt != opp_vboot) {
@@ -1407,8 +1553,7 @@ static int svs_init01(struct svs_platform *svsp)
}
spin_lock_irqsave(&svs_lock, flags);
- svsp->pbank = svsb;
- svs_set_bank_phase(svsp, SVSB_PHASE_INIT01);
+ svs_set_bank_phase(svsp, idx, SVSB_PHASE_INIT01);
spin_unlock_irqrestore(&svs_lock, flags);
time_left = wait_for_completion_timeout(&svsb->init_completion,
@@ -1423,11 +1568,12 @@ static int svs_init01(struct svs_platform *svsp)
svs_init01_finish:
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
+ bdata = &svsb->pdata;
if (!(svsb->mode_support & SVSB_MODE_INIT01))
continue;
- for (i = 0; i < svsb->opp_count; i++) {
+ for (i = 0; i < bdata->opp_count; i++) {
r = dev_pm_opp_enable(svsb->opp_dev,
svsb->opp_dfreq[i]);
if (r)
@@ -1453,7 +1599,7 @@ svs_init01_finish:
r = regulator_disable(svsb->buck);
if (r)
dev_err(svsb->dev, "%s disable fail: %d\n",
- svsb->buck_name, r);
+ bdata->buck_name, r);
}
svs_init01_resume_cpuidle:
@@ -1464,11 +1610,15 @@ svs_init01_resume_cpuidle:
static int svs_init02(struct svs_platform *svsp)
{
+ const struct svs_bank_pdata *bdata;
struct svs_bank *svsb;
unsigned long flags, time_left;
int ret;
u32 idx;
+ if (!svs_mode_available(svsp, SVSB_MODE_INIT02))
+ return 0;
+
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
@@ -1477,8 +1627,7 @@ static int svs_init02(struct svs_platform *svsp)
reinit_completion(&svsb->init_completion);
spin_lock_irqsave(&svs_lock, flags);
- svsp->pbank = svsb;
- svs_set_bank_phase(svsp, SVSB_PHASE_INIT02);
+ svs_set_bank_phase(svsp, idx, SVSB_PHASE_INIT02);
spin_unlock_irqrestore(&svs_lock, flags);
time_left = wait_for_completion_timeout(&svsb->init_completion,
@@ -1497,11 +1646,12 @@ static int svs_init02(struct svs_platform *svsp)
*/
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
+ bdata = &svsb->pdata;
if (!(svsb->mode_support & SVSB_MODE_INIT02))
continue;
- if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) {
+ if (bdata->type == SVSB_TYPE_HIGH || bdata->type == SVSB_TYPE_LOW) {
if (svs_sync_bank_volts_from_opp(svsb)) {
dev_err(svsb->dev, "sync volt fail\n");
ret = -EPERM;
@@ -1534,8 +1684,7 @@ static void svs_mon_mode(struct svs_platform *svsp)
continue;
spin_lock_irqsave(&svs_lock, flags);
- svsp->pbank = svsb;
- svs_set_bank_phase(svsp, SVSB_PHASE_MON);
+ svs_set_bank_phase(svsp, idx, SVSB_PHASE_MON);
spin_unlock_irqrestore(&svs_lock, flags);
}
}
@@ -1560,12 +1709,12 @@ static int svs_start(struct svs_platform *svsp)
static int svs_suspend(struct device *dev)
{
struct svs_platform *svsp = dev_get_drvdata(dev);
- struct svs_bank *svsb;
int ret;
u32 idx;
for (idx = 0; idx < svsp->bank_max; idx++) {
- svsb = &svsp->banks[idx];
+ struct svs_bank *svsb = &svsp->banks[idx];
+
svs_bank_disable_and_restore_default_volts(svsp, svsb);
}
@@ -1616,8 +1765,10 @@ out_of_resume:
static int svs_bank_resource_setup(struct svs_platform *svsp)
{
+ const struct svs_bank_pdata *bdata;
struct svs_bank *svsb;
struct dev_pm_opp *opp;
+ char tz_name_buf[20];
unsigned long freq;
int count, ret;
u32 idx, i;
@@ -1626,35 +1777,23 @@ static int svs_bank_resource_setup(struct svs_platform *svsp)
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
+ bdata = &svsb->pdata;
- switch (svsb->sw_id) {
- case SVSB_CPU_LITTLE:
- svsb->name = "SVSB_CPU_LITTLE";
- break;
- case SVSB_CPU_BIG:
- svsb->name = "SVSB_CPU_BIG";
- break;
- case SVSB_CCI:
- svsb->name = "SVSB_CCI";
- break;
- case SVSB_GPU:
- if (svsb->type == SVSB_HIGH)
- svsb->name = "SVSB_GPU_HIGH";
- else if (svsb->type == SVSB_LOW)
- svsb->name = "SVSB_GPU_LOW";
- else
- svsb->name = "SVSB_GPU";
- break;
- default:
- dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ if (bdata->sw_id >= SVSB_SWID_MAX || bdata->type >= SVSB_TYPE_MAX) {
+ dev_err(svsb->dev, "unknown bank sw_id or type\n");
return -EINVAL;
}
- svsb->dev = devm_kzalloc(svsp->dev, sizeof(*svsb->dev),
- GFP_KERNEL);
+ svsb->dev = devm_kzalloc(svsp->dev, sizeof(*svsb->dev), GFP_KERNEL);
if (!svsb->dev)
return -ENOMEM;
+ svsb->name = devm_kasprintf(svsp->dev, GFP_KERNEL, "%s%s",
+ svs_swid_names[bdata->sw_id],
+ svs_type_names[bdata->type]);
+ if (!svsb->name)
+ return -ENOMEM;
+
ret = dev_set_name(svsb->dev, "%s", svsb->name);
if (ret)
return ret;
@@ -1672,32 +1811,34 @@ static int svs_bank_resource_setup(struct svs_platform *svsp)
if (svsb->mode_support & SVSB_MODE_INIT01) {
svsb->buck = devm_regulator_get_optional(svsb->opp_dev,
- svsb->buck_name);
+ bdata->buck_name);
if (IS_ERR(svsb->buck)) {
dev_err(svsb->dev, "cannot get \"%s-supply\"\n",
- svsb->buck_name);
+ bdata->buck_name);
return PTR_ERR(svsb->buck);
}
}
- if (!IS_ERR_OR_NULL(svsb->tzone_name)) {
- svsb->tzd = thermal_zone_get_zone_by_name(svsb->tzone_name);
+ if (!IS_ERR_OR_NULL(bdata->tzone_name)) {
+ snprintf(tz_name_buf, ARRAY_SIZE(tz_name_buf),
+ "%s-thermal", bdata->tzone_name);
+ svsb->tzd = thermal_zone_get_zone_by_name(tz_name_buf);
if (IS_ERR(svsb->tzd)) {
dev_err(svsb->dev, "cannot get \"%s\" thermal zone\n",
- svsb->tzone_name);
+ tz_name_buf);
return PTR_ERR(svsb->tzd);
}
}
count = dev_pm_opp_get_opp_count(svsb->opp_dev);
- if (svsb->opp_count != count) {
+ if (bdata->opp_count != count) {
dev_err(svsb->dev,
"opp_count not \"%u\" but get \"%d\"?\n",
- svsb->opp_count, count);
+ bdata->opp_count, count);
return count;
}
- for (i = 0, freq = U32_MAX; i < svsb->opp_count; i++, freq--) {
+ for (i = 0, freq = ULONG_MAX; i < bdata->opp_count; i++, freq--) {
opp = dev_pm_opp_find_freq_floor(svsb->opp_dev, &freq);
if (IS_ERR(opp)) {
dev_err(svsb->dev, "cannot find freq = %ld\n",
@@ -1731,8 +1872,6 @@ static int svs_get_efuse_data(struct svs_platform *svsp,
*svsp_efuse = nvmem_cell_read(cell, svsp_efuse_max);
if (IS_ERR(*svsp_efuse)) {
- dev_err(svsp->dev, "cannot read \"%s\" efuse: %ld\n",
- nvmem_cell_name, PTR_ERR(*svsp_efuse));
nvmem_cell_put(cell);
return PTR_ERR(*svsp_efuse);
}
@@ -1743,79 +1882,91 @@ static int svs_get_efuse_data(struct svs_platform *svsp,
return 0;
}
-static bool svs_mt8192_efuse_parsing(struct svs_platform *svsp)
+static u32 svs_get_fuse_val(u32 *fuse_array, const struct svs_fusemap *fmap, u8 nbits)
{
- struct svs_bank *svsb;
- u32 idx, i, vmin, golden_temp;
- int ret;
+ u32 val;
- for (i = 0; i < svsp->efuse_max; i++)
+ if (fmap->index < 0)
+ return FUSE_DATA_NOT_VALID;
+
+ val = fuse_array[fmap->index] >> fmap->ofst;
+ val &= GENMASK(nbits - 1, 0);
+
+ return val;
+}
+
+static bool svs_is_available(struct svs_platform *svsp)
+{
+ int i, num_populated = 0;
+
+ /* If at least two fuse arrays are populated, SVS is calibrated */
+ for (i = 0; i < svsp->efuse_max; i++) {
if (svsp->efuse[i])
- dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n",
- i, svsp->efuse[i]);
+ num_populated++;
- if (!svsp->efuse[9]) {
- dev_notice(svsp->dev, "svs_efuse[9] = 0x0?\n");
- return false;
+ if (num_populated > 1)
+ return true;
}
- /* Svs efuse parsing */
- vmin = (svsp->efuse[19] >> 4) & GENMASK(1, 0);
+ return false;
+}
- for (idx = 0; idx < svsp->bank_max; idx++) {
- svsb = &svsp->banks[idx];
+static bool svs_common_parse_efuse(struct svs_platform *svsp,
+ const struct svs_platform_data *pdata)
+{
+ const struct svs_fusemap *gfmap = pdata->glb_fuse_map;
+ struct svs_fusemap tfm = { 0, 24 };
+ u32 golden_temp, val;
+ u8 ft_pgm, vmin;
+ int i;
- if (vmin == 0x1)
- svsb->vmin = 0x1e;
+ if (!svs_is_available(svsp))
+ return false;
- if (svsb->type == SVSB_LOW) {
- svsb->mtdes = svsp->efuse[10] & GENMASK(7, 0);
- svsb->bdes = (svsp->efuse[10] >> 16) & GENMASK(7, 0);
- svsb->mdes = (svsp->efuse[10] >> 24) & GENMASK(7, 0);
- svsb->dcbdet = (svsp->efuse[17]) & GENMASK(7, 0);
- svsb->dcmdet = (svsp->efuse[17] >> 8) & GENMASK(7, 0);
- } else if (svsb->type == SVSB_HIGH) {
- svsb->mtdes = svsp->efuse[9] & GENMASK(7, 0);
- svsb->bdes = (svsp->efuse[9] >> 16) & GENMASK(7, 0);
- svsb->mdes = (svsp->efuse[9] >> 24) & GENMASK(7, 0);
- svsb->dcbdet = (svsp->efuse[17] >> 16) & GENMASK(7, 0);
- svsb->dcmdet = (svsp->efuse[17] >> 24) & GENMASK(7, 0);
- }
+ /* Get golden temperature from SVS-Thermal calibration */
+ val = svs_get_fuse_val(svsp->tefuse, &tfm, 8);
- svsb->vmax += svsb->dvt_fixed;
- }
+ /* If golden temp is not programmed, use the default of 50 */
+ golden_temp = val ? val : 50;
- ret = svs_get_efuse_data(svsp, "t-calibration-data",
- &svsp->tefuse, &svsp->tefuse_max);
- if (ret)
- return false;
+ /* Parse fused SVS calibration */
+ ft_pgm = svs_get_fuse_val(svsp->efuse, &gfmap[GLB_FT_PGM], 8);
+ vmin = svs_get_fuse_val(svsp->efuse, &gfmap[GLB_VMIN], 2);
- for (i = 0; i < svsp->tefuse_max; i++)
- if (svsp->tefuse[i] != 0)
- break;
+ for (i = 0; i < svsp->bank_max; i++) {
+ struct svs_bank *svsb = &svsp->banks[i];
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
+ const struct svs_fusemap *dfmap = bdata->dev_fuse_map;
- if (i == svsp->tefuse_max)
- golden_temp = 50; /* All thermal efuse data are 0 */
- else
- golden_temp = (svsp->tefuse[0] >> 24) & GENMASK(7, 0);
+ if (vmin == 1)
+ svsb->vmin = 0x1e;
- for (idx = 0; idx < svsp->bank_max; idx++) {
- svsb = &svsp->banks[idx];
- svsb->mts = 500;
- svsb->bts = (((500 * golden_temp + 250460) / 1000) - 25) * 4;
+ if (ft_pgm == 0)
+ svsb->volt_flags |= SVSB_INIT01_VOLT_IGNORE;
+
+ svsb->mtdes = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_MTDES], 8);
+ svsb->bdes = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_BDES], 8);
+ svsb->mdes = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_MDES], 8);
+ svsb->dcbdet = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_DCBDET], 8);
+ svsb->dcmdet = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_DCMDET], 8);
+ svsb->vmax += svsb->dvt_fixed;
+
+ svsb->mts = (svsp->ts_coeff * 2) / 1000;
+ svsb->bts = (((500 * golden_temp + svsp->ts_coeff) / 1000) - 25) * 4;
}
return true;
}
-static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp)
+static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp,
+ const struct svs_platform_data *pdata)
{
struct svs_bank *svsb;
+ const struct svs_bank_pdata *bdata;
int format[6], x_roomt[6], o_vtsmcu[5], o_vtsabb, tb_roomt = 0;
int adc_ge_t, adc_oe_t, ge, oe, gain, degc_cali, adc_cali_en_t;
int o_slope, o_slope_sign, ts_id;
u32 idx, i, ft_pgm, mts, temp0, temp1, temp2;
- int ret;
for (i = 0; i < svsp->efuse_max; i++)
if (svsp->efuse[i])
@@ -1828,74 +1979,48 @@ static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp)
}
/* Svs efuse parsing */
- ft_pgm = (svsp->efuse[0] >> 4) & GENMASK(3, 0);
+ ft_pgm = svs_get_fuse_val(svsp->efuse, &pdata->glb_fuse_map[GLB_FT_PGM], 4);
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
+ bdata = &svsb->pdata;
+ const struct svs_fusemap *dfmap = bdata->dev_fuse_map;
if (ft_pgm <= 1)
svsb->volt_flags |= SVSB_INIT01_VOLT_IGNORE;
- switch (svsb->sw_id) {
- case SVSB_CPU_LITTLE:
- svsb->bdes = svsp->efuse[16] & GENMASK(7, 0);
- svsb->mdes = (svsp->efuse[16] >> 8) & GENMASK(7, 0);
- svsb->dcbdet = (svsp->efuse[16] >> 16) & GENMASK(7, 0);
- svsb->dcmdet = (svsp->efuse[16] >> 24) & GENMASK(7, 0);
- svsb->mtdes = (svsp->efuse[17] >> 16) & GENMASK(7, 0);
+ svsb->mtdes = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_MTDES], 8);
+ svsb->bdes = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_BDES], 8);
+ svsb->mdes = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_MDES], 8);
+ svsb->dcbdet = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_DCBDET], 8);
+ svsb->dcmdet = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_DCMDET], 8);
+ switch (bdata->sw_id) {
+ case SVSB_SWID_CPU_LITTLE:
+ case SVSB_SWID_CCI:
if (ft_pgm <= 3)
svsb->volt_od += 10;
else
svsb->volt_od += 2;
break;
- case SVSB_CPU_BIG:
- svsb->bdes = svsp->efuse[18] & GENMASK(7, 0);
- svsb->mdes = (svsp->efuse[18] >> 8) & GENMASK(7, 0);
- svsb->dcbdet = (svsp->efuse[18] >> 16) & GENMASK(7, 0);
- svsb->dcmdet = (svsp->efuse[18] >> 24) & GENMASK(7, 0);
- svsb->mtdes = svsp->efuse[17] & GENMASK(7, 0);
-
+ case SVSB_SWID_CPU_BIG:
if (ft_pgm <= 3)
svsb->volt_od += 15;
else
svsb->volt_od += 12;
break;
- case SVSB_CCI:
- svsb->bdes = svsp->efuse[4] & GENMASK(7, 0);
- svsb->mdes = (svsp->efuse[4] >> 8) & GENMASK(7, 0);
- svsb->dcbdet = (svsp->efuse[4] >> 16) & GENMASK(7, 0);
- svsb->dcmdet = (svsp->efuse[4] >> 24) & GENMASK(7, 0);
- svsb->mtdes = (svsp->efuse[5] >> 16) & GENMASK(7, 0);
-
- if (ft_pgm <= 3)
- svsb->volt_od += 10;
- else
- svsb->volt_od += 2;
- break;
- case SVSB_GPU:
- svsb->bdes = svsp->efuse[6] & GENMASK(7, 0);
- svsb->mdes = (svsp->efuse[6] >> 8) & GENMASK(7, 0);
- svsb->dcbdet = (svsp->efuse[6] >> 16) & GENMASK(7, 0);
- svsb->dcmdet = (svsp->efuse[6] >> 24) & GENMASK(7, 0);
- svsb->mtdes = svsp->efuse[5] & GENMASK(7, 0);
-
- if (ft_pgm >= 2) {
+ case SVSB_SWID_GPU:
+ if (ft_pgm != FUSE_DATA_NOT_VALID && ft_pgm >= 2) {
svsb->freq_base = 800000000; /* 800MHz */
svsb->dvt_fixed = 2;
}
break;
default:
- dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ dev_err(svsb->dev, "unknown sw_id: %u\n", bdata->sw_id);
return false;
}
}
- ret = svs_get_efuse_data(svsp, "t-calibration-data",
- &svsp->tefuse, &svsp->tefuse_max);
- if (ret)
- return false;
-
/* Thermal efuse parsing */
adc_ge_t = (svsp->tefuse[1] >> 22) & GENMASK(9, 0);
adc_oe_t = (svsp->tefuse[1] >> 12) & GENMASK(9, 0);
@@ -1955,23 +2080,24 @@ static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp)
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
+ bdata = &svsb->pdata;
svsb->mts = mts;
- switch (svsb->sw_id) {
- case SVSB_CPU_LITTLE:
+ switch (bdata->sw_id) {
+ case SVSB_SWID_CPU_LITTLE:
tb_roomt = x_roomt[3];
break;
- case SVSB_CPU_BIG:
+ case SVSB_SWID_CPU_BIG:
tb_roomt = x_roomt[4];
break;
- case SVSB_CCI:
+ case SVSB_SWID_CCI:
tb_roomt = x_roomt[3];
break;
- case SVSB_GPU:
+ case SVSB_SWID_GPU:
tb_roomt = x_roomt[1];
break;
default:
- dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ dev_err(svsb->dev, "unknown sw_id: %u\n", bdata->sw_id);
goto remove_mt8183_svsb_mon_mode;
}
@@ -2007,14 +2133,12 @@ static struct device *svs_get_subsys_device(struct svs_platform *svsp,
}
pdev = of_find_device_by_node(np);
+ of_node_put(np);
if (!pdev) {
- of_node_put(np);
dev_err(svsp->dev, "cannot find pdev by %s\n", node_name);
return ERR_PTR(-ENXIO);
}
- of_node_put(np);
-
return &pdev->dev;
}
@@ -2041,34 +2165,63 @@ static struct device *svs_add_device_link(struct svs_platform *svsp,
return dev;
}
+static void svs_put_device(void *_dev)
+{
+ struct device *dev = _dev;
+
+ put_device(dev);
+}
+
static int svs_mt8192_platform_probe(struct svs_platform *svsp)
{
struct device *dev;
- struct svs_bank *svsb;
u32 idx;
+ int ret;
svsp->rst = devm_reset_control_get_optional(svsp->dev, "svs_rst");
if (IS_ERR(svsp->rst))
return dev_err_probe(svsp->dev, PTR_ERR(svsp->rst),
"cannot get svs reset control\n");
- dev = svs_add_device_link(svsp, "lvts");
+ dev = svs_add_device_link(svsp, "thermal-sensor");
if (IS_ERR(dev))
return dev_err_probe(svsp->dev, PTR_ERR(dev),
"failed to get lvts device\n");
+ put_device(dev);
for (idx = 0; idx < svsp->bank_max; idx++) {
- svsb = &svsp->banks[idx];
-
- if (svsb->type == SVSB_HIGH)
- svsb->opp_dev = svs_add_device_link(svsp, "gpu");
- else if (svsb->type == SVSB_LOW)
- svsb->opp_dev = svs_get_subsys_device(svsp, "gpu");
+ struct svs_bank *svsb = &svsp->banks[idx];
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
+
+ switch (bdata->sw_id) {
+ case SVSB_SWID_CPU_LITTLE:
+ case SVSB_SWID_CPU_BIG:
+ svsb->opp_dev = get_cpu_device(bdata->cpu_id);
+ get_device(svsb->opp_dev);
+ break;
+ case SVSB_SWID_CCI:
+ svsb->opp_dev = svs_add_device_link(svsp, "cci");
+ break;
+ case SVSB_SWID_GPU:
+ if (bdata->type == SVSB_TYPE_LOW)
+ svsb->opp_dev = svs_get_subsys_device(svsp, "gpu");
+ else
+ svsb->opp_dev = svs_add_device_link(svsp, "gpu");
+ break;
+ default:
+ dev_err(svsb->dev, "unknown sw_id: %u\n", bdata->sw_id);
+ return -EINVAL;
+ }
if (IS_ERR(svsb->opp_dev))
return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
"failed to get OPP device for bank %d\n",
idx);
+
+ ret = devm_add_action_or_reset(svsp->dev, svs_put_device,
+ svsb->opp_dev);
+ if (ret)
+ return ret;
}
return 0;
@@ -2077,30 +2230,33 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp)
static int svs_mt8183_platform_probe(struct svs_platform *svsp)
{
struct device *dev;
- struct svs_bank *svsb;
u32 idx;
+ int ret;
- dev = svs_add_device_link(svsp, "thermal");
+ dev = svs_add_device_link(svsp, "thermal-sensor");
if (IS_ERR(dev))
return dev_err_probe(svsp->dev, PTR_ERR(dev),
"failed to get thermal device\n");
+ put_device(dev);
for (idx = 0; idx < svsp->bank_max; idx++) {
- svsb = &svsp->banks[idx];
-
- switch (svsb->sw_id) {
- case SVSB_CPU_LITTLE:
- case SVSB_CPU_BIG:
- svsb->opp_dev = get_cpu_device(svsb->cpu_id);
+ struct svs_bank *svsb = &svsp->banks[idx];
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
+
+ switch (bdata->sw_id) {
+ case SVSB_SWID_CPU_LITTLE:
+ case SVSB_SWID_CPU_BIG:
+ svsb->opp_dev = get_cpu_device(bdata->cpu_id);
+ get_device(svsb->opp_dev);
break;
- case SVSB_CCI:
+ case SVSB_SWID_CCI:
svsb->opp_dev = svs_add_device_link(svsp, "cci");
break;
- case SVSB_GPU:
+ case SVSB_SWID_GPU:
svsb->opp_dev = svs_add_device_link(svsp, "gpu");
break;
default:
- dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ dev_err(svsb->dev, "unknown sw_id: %u\n", bdata->sw_id);
return -EINVAL;
}
@@ -2108,182 +2264,554 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp)
return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
"failed to get OPP device for bank %d\n",
idx);
+
+ ret = devm_add_action_or_reset(svsp->dev, svs_put_device,
+ svsb->opp_dev);
+ if (ret)
+ return ret;
}
return 0;
}
+static struct svs_bank svs_mt8195_banks[] = {
+ {
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_GPU,
+ .type = SVSB_TYPE_LOW,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .opp_count = MAX_OPP_ENTRIES,
+ .turn_freq_base = 640000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x555555,
+ .dc_config = 0x1,
+ .vco = 0x18,
+ .chk_shift = 0x87,
+ .int_st = BIT(0),
+ .ctl0 = 0x00540003,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 10, 16 }, { 10, 24 }, { 10, 0 }, { 8, 0 }, { 8, 8 }
+ }
+ },
+ .mode_support = SVSB_MODE_INIT02,
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT,
+ .freq_base = 640000000,
+ .core_sel = 0x0fff0100,
+ .dvt_fixed = 0x1,
+ .vmax = 0x38,
+ .vmin = 0x14,
+ },
+ {
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_GPU,
+ .type = SVSB_TYPE_HIGH,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .tzone_name = "gpu",
+ .opp_count = MAX_OPP_ENTRIES,
+ .turn_freq_base = 640000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x555555,
+ .dc_config = 0x1,
+ .vco = 0x18,
+ .chk_shift = 0x87,
+ .int_st = BIT(1),
+ .ctl0 = 0x00540003,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 0,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 7,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 9, 16 }, { 9, 24 }, { 9, 0 }, { 8, 0 }, { 8, 8 }
+ },
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE,
+ .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .freq_base = 880000000,
+ .core_sel = 0x0fff0101,
+ .dvt_fixed = 0x6,
+ .vmax = 0x38,
+ .vmin = 0x14,
+ },
+};
+
static struct svs_bank svs_mt8192_banks[] = {
{
- .sw_id = SVSB_GPU,
- .type = SVSB_LOW,
- .set_freq_pct = svs_set_bank_freq_pct_v3,
- .get_volts = svs_get_bank_volts_v3,
- .tzone_name = "gpu1",
- .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT,
- .mode_support = SVSB_MODE_INIT02,
- .opp_count = MAX_OPP_ENTRIES,
- .freq_base = 688000000,
- .turn_freq_base = 688000000,
- .volt_step = 6250,
- .volt_base = 400000,
- .vmax = 0x60,
- .vmin = 0x1a,
- .age_config = 0x555555,
- .dc_config = 0x1,
- .dvt_fixed = 0x1,
- .vco = 0x18,
- .chk_shift = 0x87,
- .core_sel = 0x0fff0100,
- .int_st = BIT(0),
- .ctl0 = 0x00540003,
- .tzone_htemp = 85000,
- .tzone_htemp_voffset = 0,
- .tzone_ltemp = 25000,
- .tzone_ltemp_voffset = 7,
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_GPU,
+ .type = SVSB_TYPE_LOW,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .tzone_name = "gpu",
+ .opp_count = MAX_OPP_ENTRIES,
+ .turn_freq_base = 688000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x555555,
+ .dc_config = 0x1,
+ .vco = 0x18,
+ .chk_shift = 0x87,
+ .int_st = BIT(0),
+ .ctl0 = 0x00540003,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 0,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 7,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 10, 16 }, { 10, 24 }, { 10, 0 }, { 17, 0 }, { 17, 8 }
+ }
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT,
+ .mode_support = SVSB_MODE_INIT02,
+ .freq_base = 688000000,
+ .core_sel = 0x0fff0100,
+ .dvt_fixed = 0x1,
+ .vmax = 0x60,
+ .vmin = 0x1a,
},
{
- .sw_id = SVSB_GPU,
- .type = SVSB_HIGH,
- .set_freq_pct = svs_set_bank_freq_pct_v3,
- .get_volts = svs_get_bank_volts_v3,
- .tzone_name = "gpu1",
- .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT |
- SVSB_MON_VOLT_IGNORE,
- .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
- .opp_count = MAX_OPP_ENTRIES,
- .freq_base = 902000000,
- .turn_freq_base = 688000000,
- .volt_step = 6250,
- .volt_base = 400000,
- .vmax = 0x60,
- .vmin = 0x1a,
- .age_config = 0x555555,
- .dc_config = 0x1,
- .dvt_fixed = 0x6,
- .vco = 0x18,
- .chk_shift = 0x87,
- .core_sel = 0x0fff0101,
- .int_st = BIT(1),
- .ctl0 = 0x00540003,
- .tzone_htemp = 85000,
- .tzone_htemp_voffset = 0,
- .tzone_ltemp = 25000,
- .tzone_ltemp_voffset = 7,
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_GPU,
+ .type = SVSB_TYPE_HIGH,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .tzone_name = "gpu",
+ .opp_count = MAX_OPP_ENTRIES,
+ .turn_freq_base = 688000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x555555,
+ .dc_config = 0x1,
+ .vco = 0x18,
+ .chk_shift = 0x87,
+ .int_st = BIT(1),
+ .ctl0 = 0x00540003,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 0,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 7,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 9, 16 }, { 9, 24 }, { 17, 0 }, { 17, 16 }, { 17, 24 }
+ }
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE,
+ .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .freq_base = 902000000,
+ .core_sel = 0x0fff0101,
+ .dvt_fixed = 0x6,
+ .vmax = 0x60,
+ .vmin = 0x1a,
+ },
+};
+
+static struct svs_bank svs_mt8188_banks[] = {
+ {
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_GPU,
+ .type = SVSB_TYPE_LOW,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .opp_count = MAX_OPP_ENTRIES,
+ .turn_freq_base = 640000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .vco = 0x10,
+ .chk_shift = 0x87,
+ .int_st = BIT(0),
+ .ctl0 = 0x00100003,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 5, 16 }, { 5, 24 }, { 5, 0 }, { 15, 16 }, { 15, 24 }
+ }
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT,
+ .mode_support = SVSB_MODE_INIT02,
+ .freq_base = 640000000,
+ .core_sel = 0x0fff0000,
+ .dvt_fixed = 0x1,
+ .vmax = 0x38,
+ .vmin = 0x1c,
+ },
+ {
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_GPU,
+ .type = SVSB_TYPE_HIGH,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .tzone_name = "gpu",
+ .opp_count = MAX_OPP_ENTRIES,
+ .turn_freq_base = 640000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .vco = 0x10,
+ .chk_shift = 0x87,
+ .int_st = BIT(1),
+ .ctl0 = 0x00100003,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 0,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 7,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 4, 16 }, { 4, 24 }, { 4, 0 }, { 14, 0 }, { 14, 8 }
+ }
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE,
+ .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .freq_base = 880000000,
+ .core_sel = 0x0fff0001,
+ .dvt_fixed = 0x4,
+ .vmax = 0x38,
+ .vmin = 0x1c,
+ },
+};
+
+static struct svs_bank svs_mt8186_banks[] = {
+ {
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_CPU_BIG,
+ .type = SVSB_TYPE_LOW,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .cpu_id = 6,
+ .opp_count = MAX_OPP_ENTRIES,
+ .turn_freq_base = 1670000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x1,
+ .dc_config = 0x1,
+ .vco = 0x10,
+ .chk_shift = 0x87,
+ .int_st = BIT(0),
+ .ctl0 = 0x00540003,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 3, 16 }, { 3, 24 }, { 3, 0 }, { 14, 16 }, { 14, 24 }
+ }
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT,
+ .volt_od = 4,
+ .mode_support = SVSB_MODE_INIT02,
+ .freq_base = 1670000000,
+ .core_sel = 0x0fff0100,
+ .dvt_fixed = 0x3,
+ .vmax = 0x59,
+ .vmin = 0x20,
+ },
+ {
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_CPU_BIG,
+ .type = SVSB_TYPE_HIGH,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .cpu_id = 6,
+ .tzone_name = "cpu-big",
+ .opp_count = MAX_OPP_ENTRIES,
+ .turn_freq_base = 1670000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x1,
+ .dc_config = 0x1,
+ .vco = 0x10,
+ .chk_shift = 0x87,
+ .int_st = BIT(1),
+ .ctl0 = 0x00540003,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 8,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 8,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 2, 16 }, { 2, 24 }, { 2, 0 }, { 13, 0 }, { 13, 8 }
+ }
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE,
+ .volt_od = 4,
+ .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .freq_base = 2050000000,
+ .core_sel = 0x0fff0101,
+ .dvt_fixed = 0x6,
+ .vmax = 0x73,
+ .vmin = 0x20,
+ },
+ {
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_CPU_LITTLE,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .cpu_id = 0,
+ .tzone_name = "cpu-little",
+ .opp_count = MAX_OPP_ENTRIES,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x1,
+ .dc_config = 0x1,
+ .vco = 0x10,
+ .chk_shift = 0x87,
+ .int_st = BIT(2),
+ .ctl0 = 0x3210000f,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 8,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 8,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 4, 16 }, { 4, 24 }, { 4, 0 }, { 14, 0 }, { 14, 8 }
+ }
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE,
+ .volt_od = 3,
+ .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .freq_base = 2000000000,
+ .core_sel = 0x0fff0102,
+ .dvt_fixed = 0x6,
+ .vmax = 0x65,
+ .vmin = 0x20,
+ },
+ {
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_CCI,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .tzone_name = "cci",
+ .opp_count = MAX_OPP_ENTRIES,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x1,
+ .dc_config = 0x1,
+ .vco = 0x10,
+ .chk_shift = 0x87,
+ .int_st = BIT(3),
+ .ctl0 = 0x3210000f,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 8,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 8,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 5, 16 }, { 5, 24 }, { 5, 0 }, { 15, 16 }, { 15, 24 }
+ }
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE,
+ .volt_od = 3,
+ .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .freq_base = 1400000000,
+ .core_sel = 0x0fff0103,
+ .dvt_fixed = 0x6,
+ .vmax = 0x65,
+ .vmin = 0x20,
+ },
+ {
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_GPU,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .tzone_name = "gpu",
+ .opp_count = MAX_OPP_ENTRIES,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x555555,
+ .dc_config = 0x1,
+ .vco = 0x10,
+ .chk_shift = 0x87,
+ .int_st = BIT(4),
+ .ctl0 = 0x00100003,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 8,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 7,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 6, 16 }, { 6, 24 }, { 6, 0 }, { 15, 8 }, { 15, 0 }
+ }
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE,
+ .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .freq_base = 850000000,
+ .core_sel = 0x0fff0104,
+ .dvt_fixed = 0x4,
+ .vmax = 0x58,
+ .vmin = 0x20,
},
};
static struct svs_bank svs_mt8183_banks[] = {
{
- .sw_id = SVSB_CPU_LITTLE,
- .set_freq_pct = svs_set_bank_freq_pct_v2,
- .get_volts = svs_get_bank_volts_v2,
- .cpu_id = 0,
- .buck_name = "proc",
- .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
- .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
- .opp_count = MAX_OPP_ENTRIES,
- .freq_base = 1989000000,
- .vboot = 0x30,
- .volt_step = 6250,
- .volt_base = 500000,
- .vmax = 0x64,
- .vmin = 0x18,
- .age_config = 0x555555,
- .dc_config = 0x555555,
- .dvt_fixed = 0x7,
- .vco = 0x10,
- .chk_shift = 0x77,
- .core_sel = 0x8fff0000,
- .int_st = BIT(0),
- .ctl0 = 0x00010001,
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_CPU_LITTLE,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .cpu_id = 0,
+ .buck_name = "proc",
+ .opp_count = MAX_OPP_ENTRIES,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .int_st = BIT(0),
+ .ctl0 = 0x00010001,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 16, 0 }, { 16, 8 }, { 17, 16 }, { 16, 16 }, { 16, 24 }
+ }
+ },
+ .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
+ .freq_base = 1989000000,
+ .core_sel = 0x8fff0000,
+ .dvt_fixed = 0x7,
+ .vmax = 0x64,
+ .vmin = 0x18,
+
},
{
- .sw_id = SVSB_CPU_BIG,
- .set_freq_pct = svs_set_bank_freq_pct_v2,
- .get_volts = svs_get_bank_volts_v2,
- .cpu_id = 4,
- .buck_name = "proc",
- .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
- .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
- .opp_count = MAX_OPP_ENTRIES,
- .freq_base = 1989000000,
- .vboot = 0x30,
- .volt_step = 6250,
- .volt_base = 500000,
- .vmax = 0x58,
- .vmin = 0x10,
- .age_config = 0x555555,
- .dc_config = 0x555555,
- .dvt_fixed = 0x7,
- .vco = 0x10,
- .chk_shift = 0x77,
- .core_sel = 0x8fff0001,
- .int_st = BIT(1),
- .ctl0 = 0x00000001,
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_CPU_BIG,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .cpu_id = 4,
+ .buck_name = "proc",
+ .opp_count = MAX_OPP_ENTRIES,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .int_st = BIT(1),
+ .ctl0 = 0x00000001,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 18, 0 }, { 18, 8 }, { 17, 0 }, { 18, 16 }, { 18, 24 }
+ }
+ },
+ .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
+ .freq_base = 1989000000,
+ .core_sel = 0x8fff0001,
+ .dvt_fixed = 0x7,
+ .vmax = 0x58,
+ .vmin = 0x10,
+
},
{
- .sw_id = SVSB_CCI,
- .set_freq_pct = svs_set_bank_freq_pct_v2,
- .get_volts = svs_get_bank_volts_v2,
- .buck_name = "proc",
- .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
- .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
- .opp_count = MAX_OPP_ENTRIES,
- .freq_base = 1196000000,
- .vboot = 0x30,
- .volt_step = 6250,
- .volt_base = 500000,
- .vmax = 0x64,
- .vmin = 0x18,
- .age_config = 0x555555,
- .dc_config = 0x555555,
- .dvt_fixed = 0x7,
- .vco = 0x10,
- .chk_shift = 0x77,
- .core_sel = 0x8fff0002,
- .int_st = BIT(2),
- .ctl0 = 0x00100003,
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_CCI,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .buck_name = "proc",
+ .opp_count = MAX_OPP_ENTRIES,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .int_st = BIT(2),
+ .ctl0 = 0x00100003,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 4, 0 }, { 4, 8 }, { 5, 16 }, { 4, 16 }, { 4, 24 }
+ }
+ },
+ .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
+ .freq_base = 1196000000,
+ .core_sel = 0x8fff0002,
+ .dvt_fixed = 0x7,
+ .vmax = 0x64,
+ .vmin = 0x18,
},
{
- .sw_id = SVSB_GPU,
- .set_freq_pct = svs_set_bank_freq_pct_v2,
- .get_volts = svs_get_bank_volts_v2,
- .buck_name = "mali",
- .tzone_name = "tzts2",
- .volt_flags = SVSB_INIT01_PD_REQ |
- SVSB_INIT01_VOLT_INC_ONLY,
- .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02 |
- SVSB_MODE_MON,
- .opp_count = MAX_OPP_ENTRIES,
- .freq_base = 900000000,
- .vboot = 0x30,
- .volt_step = 6250,
- .volt_base = 500000,
- .vmax = 0x40,
- .vmin = 0x14,
- .age_config = 0x555555,
- .dc_config = 0x555555,
- .dvt_fixed = 0x3,
- .vco = 0x10,
- .chk_shift = 0x77,
- .core_sel = 0x8fff0003,
- .int_st = BIT(3),
- .ctl0 = 0x00050001,
- .tzone_htemp = 85000,
- .tzone_htemp_voffset = 0,
- .tzone_ltemp = 25000,
- .tzone_ltemp_voffset = 3,
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_GPU,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .buck_name = "mali",
+ .tzone_name = "gpu",
+ .opp_count = MAX_OPP_ENTRIES,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .int_st = BIT(3),
+ .ctl0 = 0x00050001,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 0,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 3,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 6, 0 }, { 6, 8 }, { 5, 0 }, { 6, 16 }, { 6, 24 }
+ }
+ },
+ .volt_flags = SVSB_INIT01_PD_REQ | SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .freq_base = 900000000,
+ .core_sel = 0x8fff0003,
+ .dvt_fixed = 0x3,
+ .vmax = 0x40,
+ .vmin = 0x14,
},
};
+static const struct svs_platform_data svs_mt8195_platform_data = {
+ .name = "mt8195-svs",
+ .banks = svs_mt8195_banks,
+ .efuse_parsing = svs_common_parse_efuse,
+ .probe = svs_mt8192_platform_probe,
+ .regs = svs_regs_v2,
+ .bank_max = ARRAY_SIZE(svs_mt8195_banks),
+ .ts_coeff = SVSB_TS_COEFF_MT8195,
+ .glb_fuse_map = (const struct svs_fusemap[GLB_MAX]) {
+ { 0, 0 }, { 19, 4 }
+ }
+};
+
static const struct svs_platform_data svs_mt8192_platform_data = {
.name = "mt8192-svs",
.banks = svs_mt8192_banks,
- .efuse_parsing = svs_mt8192_efuse_parsing,
+ .efuse_parsing = svs_common_parse_efuse,
.probe = svs_mt8192_platform_probe,
.regs = svs_regs_v2,
.bank_max = ARRAY_SIZE(svs_mt8192_banks),
+ .ts_coeff = SVSB_TS_COEFF_MT8195,
+ .glb_fuse_map = (const struct svs_fusemap[GLB_MAX]) {
+ /* FT_PGM not present */
+ { -1, 0 }, { 19, 4 }
+ }
+};
+
+static const struct svs_platform_data svs_mt8188_platform_data = {
+ .name = "mt8188-svs",
+ .banks = svs_mt8188_banks,
+ .efuse_parsing = svs_common_parse_efuse,
+ .probe = svs_mt8192_platform_probe,
+ .regs = svs_regs_v2,
+ .bank_max = ARRAY_SIZE(svs_mt8188_banks),
+ .ts_coeff = SVSB_TS_COEFF_MT8195,
+ .glb_fuse_map = (const struct svs_fusemap[GLB_MAX]) {
+ /* FT_PGM and VMIN not present */
+ { -1, 0 }, { -1, 0 }
+ }
+};
+
+static const struct svs_platform_data svs_mt8186_platform_data = {
+ .name = "mt8186-svs",
+ .banks = svs_mt8186_banks,
+ .efuse_parsing = svs_common_parse_efuse,
+ .probe = svs_mt8192_platform_probe,
+ .regs = svs_regs_v2,
+ .bank_max = ARRAY_SIZE(svs_mt8186_banks),
+ .ts_coeff = SVSB_TS_COEFF_MT8186,
+ .glb_fuse_map = (const struct svs_fusemap[GLB_MAX]) {
+ /* FT_PGM and VMIN not present */
+ { -1, 0 }, { -1, 0 }
+ }
};
static const struct svs_platform_data svs_mt8183_platform_data = {
@@ -2293,18 +2821,19 @@ static const struct svs_platform_data svs_mt8183_platform_data = {
.probe = svs_mt8183_platform_probe,
.regs = svs_regs_v2,
.bank_max = ARRAY_SIZE(svs_mt8183_banks),
+ .glb_fuse_map = (const struct svs_fusemap[GLB_MAX]) {
+ /* VMIN not present */
+ { 0, 4 }, { -1, 0 }
+ }
};
static const struct of_device_id svs_of_match[] = {
- {
- .compatible = "mediatek,mt8192-svs",
- .data = &svs_mt8192_platform_data,
- }, {
- .compatible = "mediatek,mt8183-svs",
- .data = &svs_mt8183_platform_data,
- }, {
- /* Sentinel */
- },
+ { .compatible = "mediatek,mt8195-svs", .data = &svs_mt8195_platform_data },
+ { .compatible = "mediatek,mt8192-svs", .data = &svs_mt8192_platform_data },
+ { .compatible = "mediatek,mt8188-svs", .data = &svs_mt8188_platform_data },
+ { .compatible = "mediatek,mt8186-svs", .data = &svs_mt8186_platform_data },
+ { .compatible = "mediatek,mt8183-svs", .data = &svs_mt8183_platform_data },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, svs_of_match);
@@ -2324,6 +2853,7 @@ static int svs_probe(struct platform_device *pdev)
svsp->banks = svsp_data->banks;
svsp->regs = svsp_data->regs;
svsp->bank_max = svsp_data->bank_max;
+ svsp->ts_coeff = svsp_data->ts_coeff;
ret = svsp_data->probe(svsp);
if (ret)
@@ -2331,20 +2861,24 @@ static int svs_probe(struct platform_device *pdev)
ret = svs_get_efuse_data(svsp, "svs-calibration-data",
&svsp->efuse, &svsp->efuse_max);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Cannot read SVS calibration\n");
+
+ ret = svs_get_efuse_data(svsp, "t-calibration-data",
+ &svsp->tefuse, &svsp->tefuse_max);
if (ret) {
- ret = -EPERM;
+ dev_err_probe(&pdev->dev, ret, "Cannot read SVS-Thermal calibration\n");
goto svs_probe_free_efuse;
}
- if (!svsp_data->efuse_parsing(svsp)) {
- dev_err(svsp->dev, "efuse data parsing failed\n");
- ret = -EPERM;
+ if (!svsp_data->efuse_parsing(svsp, svsp_data)) {
+ ret = dev_err_probe(svsp->dev, -EINVAL, "efuse data parsing failed\n");
goto svs_probe_free_tefuse;
}
ret = svs_bank_resource_setup(svsp);
if (ret) {
- dev_err(svsp->dev, "svs bank resource setup fail: %d\n", ret);
+ dev_err_probe(svsp->dev, ret, "svs bank resource setup fail\n");
goto svs_probe_free_tefuse;
}
@@ -2356,43 +2890,40 @@ static int svs_probe(struct platform_device *pdev)
svsp->main_clk = devm_clk_get(svsp->dev, "main");
if (IS_ERR(svsp->main_clk)) {
- dev_err(svsp->dev, "failed to get clock: %ld\n",
- PTR_ERR(svsp->main_clk));
- ret = PTR_ERR(svsp->main_clk);
+ ret = dev_err_probe(svsp->dev, PTR_ERR(svsp->main_clk),
+ "failed to get clock\n");
goto svs_probe_free_tefuse;
}
ret = clk_prepare_enable(svsp->main_clk);
if (ret) {
- dev_err(svsp->dev, "cannot enable main clk: %d\n", ret);
+ dev_err_probe(svsp->dev, ret, "cannot enable main clk\n");
goto svs_probe_free_tefuse;
}
svsp->base = of_iomap(svsp->dev->of_node, 0);
if (IS_ERR_OR_NULL(svsp->base)) {
- dev_err(svsp->dev, "cannot find svs register base\n");
- ret = -EINVAL;
+ ret = dev_err_probe(svsp->dev, -EINVAL, "cannot find svs register base\n");
goto svs_probe_clk_disable;
}
ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr,
IRQF_ONESHOT, svsp_data->name, svsp);
if (ret) {
- dev_err(svsp->dev, "register irq(%d) failed: %d\n",
- svsp_irq, ret);
+ dev_err_probe(svsp->dev, ret, "register irq(%d) failed\n", svsp_irq);
goto svs_probe_iounmap;
}
ret = svs_start(svsp);
if (ret) {
- dev_err(svsp->dev, "svs start fail: %d\n", ret);
+ dev_err_probe(svsp->dev, ret, "svs start fail\n");
goto svs_probe_iounmap;
}
#ifdef CONFIG_DEBUG_FS
ret = svs_create_debug_cmds(svsp);
if (ret) {
- dev_err(svsp->dev, "svs create debug cmds fail: %d\n", ret);
+ dev_err_probe(svsp->dev, ret, "svs create debug cmds fail\n");
goto svs_probe_iounmap;
}
#endif
@@ -2401,18 +2932,12 @@ static int svs_probe(struct platform_device *pdev)
svs_probe_iounmap:
iounmap(svsp->base);
-
svs_probe_clk_disable:
clk_disable_unprepare(svsp->main_clk);
-
svs_probe_free_tefuse:
- if (!IS_ERR_OR_NULL(svsp->tefuse))
- kfree(svsp->tefuse);
-
+ kfree(svsp->tefuse);
svs_probe_free_efuse:
- if (!IS_ERR_OR_NULL(svsp->efuse))
- kfree(svsp->efuse);
-
+ kfree(svsp->efuse);
return ret;
}
@@ -2430,5 +2955,6 @@ static struct platform_driver svs_driver = {
module_platform_driver(svs_driver);
MODULE_AUTHOR("Roger Lu <roger.lu@mediatek.com>");
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
MODULE_DESCRIPTION("MediaTek SVS driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/soc/microchip/Kconfig b/drivers/soc/microchip/Kconfig
index eb656b33156b..bcf554602561 100644
--- a/drivers/soc/microchip/Kconfig
+++ b/drivers/soc/microchip/Kconfig
@@ -1,6 +1,7 @@
config POLARFIRE_SOC_SYS_CTRL
- tristate "POLARFIRE_SOC_SYS_CTRL"
+ tristate "Microchip PolarFire SoC (MPFS) system controller support"
depends on POLARFIRE_SOC_MAILBOX
+ depends on MTD
help
This driver adds support for the PolarFire SoC (MPFS) system controller.
@@ -8,3 +9,15 @@ config POLARFIRE_SOC_SYS_CTRL
module will be called mpfs_system_controller.
If unsure, say N.
+
+config POLARFIRE_SOC_SYSCONS
+ bool "PolarFire SoC (MPFS) syscon drivers"
+ default y
+ depends on ARCH_MICROCHIP
+ select MFD_CORE
+ help
+ These drivers add support for the syscons on PolarFire SoC (MPFS).
+ Without these drivers core parts of the kernel such as clocks
+ and resets will not function correctly.
+
+ If unsure, and on a PolarFire SoC, say y.
diff --git a/drivers/soc/microchip/Makefile b/drivers/soc/microchip/Makefile
index 14489919fe4b..1a3a1594b089 100644
--- a/drivers/soc/microchip/Makefile
+++ b/drivers/soc/microchip/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_POLARFIRE_SOC_SYS_CTRL) += mpfs-sys-controller.o
+obj-$(CONFIG_POLARFIRE_SOC_SYSCONS) += mpfs-control-scb.o mpfs-mss-top-sysreg.o
diff --git a/drivers/soc/microchip/mpfs-control-scb.c b/drivers/soc/microchip/mpfs-control-scb.c
new file mode 100644
index 000000000000..f0b84b1f49cb
--- /dev/null
+++ b/drivers/soc/microchip/mpfs-control-scb.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/array_size.h>
+#include <linux/of.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+
+static const struct mfd_cell mpfs_control_scb_devs[] = {
+ MFD_CELL_NAME("mpfs-tvs"),
+};
+
+static int mpfs_control_scb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ return mfd_add_devices(dev, PLATFORM_DEVID_NONE, mpfs_control_scb_devs,
+ ARRAY_SIZE(mpfs_control_scb_devs), NULL, 0, NULL);
+}
+
+static const struct of_device_id mpfs_control_scb_of_match[] = {
+ { .compatible = "microchip,mpfs-control-scb", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpfs_control_scb_of_match);
+
+static struct platform_driver mpfs_control_scb_driver = {
+ .driver = {
+ .name = "mpfs-control-scb",
+ .of_match_table = mpfs_control_scb_of_match,
+ },
+ .probe = mpfs_control_scb_probe,
+};
+module_platform_driver(mpfs_control_scb_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_DESCRIPTION("PolarFire SoC control scb driver");
diff --git a/drivers/soc/microchip/mpfs-mss-top-sysreg.c b/drivers/soc/microchip/mpfs-mss-top-sysreg.c
new file mode 100644
index 000000000000..b2244e44ff0f
--- /dev/null
+++ b/drivers/soc/microchip/mpfs-mss-top-sysreg.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/array_size.h>
+#include <linux/of.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+static const struct mfd_cell mpfs_mss_top_sysreg_devs[] = {
+ MFD_CELL_NAME("mpfs-reset"),
+};
+
+static int mpfs_mss_top_sysreg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = mfd_add_devices(dev, PLATFORM_DEVID_NONE, mpfs_mss_top_sysreg_devs,
+ ARRAY_SIZE(mpfs_mss_top_sysreg_devs) , NULL, 0, NULL);
+ if (ret)
+ return ret;
+
+ return devm_of_platform_populate(dev);
+}
+
+static const struct of_device_id mpfs_mss_top_sysreg_of_match[] = {
+ { .compatible = "microchip,mpfs-mss-top-sysreg", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpfs_mss_top_sysreg_of_match);
+
+static struct platform_driver mpfs_mss_top_sysreg_driver = {
+ .driver = {
+ .name = "mpfs-mss-top-sysreg",
+ .of_match_table = mpfs_mss_top_sysreg_of_match,
+ },
+ .probe = mpfs_mss_top_sysreg_probe,
+};
+module_platform_driver(mpfs_mss_top_sysreg_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_DESCRIPTION("PolarFire SoC mss top sysreg driver");
diff --git a/drivers/soc/microchip/mpfs-sys-controller.c b/drivers/soc/microchip/mpfs-sys-controller.c
index fbcd5fd24d7c..30bc45d17d34 100644
--- a/drivers/soc/microchip/mpfs-sys-controller.c
+++ b/drivers/soc/microchip/mpfs-sys-controller.c
@@ -12,6 +12,8 @@
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/jiffies.h>
+#include <linux/mtd/mtd.h>
+#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/mailbox_client.h>
@@ -30,6 +32,7 @@ struct mpfs_sys_controller {
struct mbox_client client;
struct mbox_chan *chan;
struct completion c;
+ struct mtd_info *flash;
struct kref consumers;
};
@@ -63,7 +66,9 @@ int mpfs_blocking_transaction(struct mpfs_sys_controller *sys_controller, struct
*/
if (!wait_for_completion_timeout(&sys_controller->c, timeout)) {
ret = -EBADMSG;
- dev_warn(sys_controller->client.dev, "MPFS sys controller service failed\n");
+ dev_warn(sys_controller->client.dev,
+ "MPFS sys controller service failed with status: %d\n",
+ msg->response->resp_status);
} else {
ret = 0;
}
@@ -99,6 +104,12 @@ static void mpfs_sys_controller_put(void *data)
kref_put(&sys_controller->consumers, mpfs_sys_controller_delete);
}
+struct mtd_info *mpfs_sys_controller_get_flash(struct mpfs_sys_controller *mpfs_client)
+{
+ return mpfs_client->flash;
+}
+EXPORT_SYMBOL(mpfs_sys_controller_get_flash);
+
static struct platform_device subdevs[] = {
{
.name = "mpfs-rng",
@@ -107,19 +118,34 @@ static struct platform_device subdevs[] = {
{
.name = "mpfs-generic-service",
.id = -1,
- }
+ },
+ {
+ .name = "mpfs-auto-update",
+ .id = -1,
+ },
};
static int mpfs_sys_controller_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mpfs_sys_controller *sys_controller;
+ struct device_node *np;
int i, ret;
sys_controller = kzalloc(sizeof(*sys_controller), GFP_KERNEL);
if (!sys_controller)
return -ENOMEM;
+ np = of_parse_phandle(dev->of_node, "microchip,bitstream-flash", 0);
+ if (!np)
+ goto no_flash;
+
+ sys_controller->flash = of_get_mtd_device_by_node(np);
+ of_node_put(np);
+ if (IS_ERR(sys_controller->flash))
+ return dev_err_probe(dev, PTR_ERR(sys_controller->flash), "Failed to get flash\n");
+
+no_flash:
sys_controller->client.dev = dev;
sys_controller->client.rx_callback = mpfs_sys_controller_rx_callback;
sys_controller->client.tx_block = 1U;
@@ -138,7 +164,6 @@ static int mpfs_sys_controller_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sys_controller);
- dev_info(&pdev->dev, "Registered MPFS system controller\n");
for (i = 0; i < ARRAY_SIZE(subdevs); i++) {
subdevs[i].dev.parent = dev;
@@ -146,16 +171,16 @@ static int mpfs_sys_controller_probe(struct platform_device *pdev)
dev_warn(dev, "Error registering sub device %s\n", subdevs[i].name);
}
+ dev_info(&pdev->dev, "Registered MPFS system controller\n");
+
return 0;
}
-static int mpfs_sys_controller_remove(struct platform_device *pdev)
+static void mpfs_sys_controller_remove(struct platform_device *pdev)
{
struct mpfs_sys_controller *sys_controller = platform_get_drvdata(pdev);
mpfs_sys_controller_put(sys_controller);
-
- return 0;
}
static const struct of_device_id mpfs_sys_controller_of_match[] = {
diff --git a/drivers/soc/pxa/ssp.c b/drivers/soc/pxa/ssp.c
index bd029e838241..bb0062c165fe 100644
--- a/drivers/soc/pxa/ssp.c
+++ b/drivers/soc/pxa/ssp.c
@@ -25,7 +25,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_device.h>
-#include <linux/spi/pxa2xx_spi.h>
+#include <linux/pxa2xx_ssp.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -152,11 +152,11 @@ static int pxa_ssp_probe(struct platform_device *pdev)
if (dev->of_node) {
const struct of_device_id *id =
of_match_device(of_match_ptr(pxa_ssp_of_ids), dev);
- ssp->type = (int) id->data;
+ ssp->type = (uintptr_t) id->data;
} else {
const struct platform_device_id *id =
platform_get_device_id(pdev);
- ssp->type = (int) id->driver_data;
+ ssp->type = id->driver_data;
/* PXA2xx/3xx SSP ports starts from 1 and the internal pdev->id
* starts from 0, do a translation here
@@ -176,15 +176,13 @@ static int pxa_ssp_probe(struct platform_device *pdev)
return 0;
}
-static int pxa_ssp_remove(struct platform_device *pdev)
+static void pxa_ssp_remove(struct platform_device *pdev)
{
struct ssp_device *ssp = platform_get_drvdata(pdev);
mutex_lock(&ssp_lock);
list_del(&ssp->node);
mutex_unlock(&ssp_lock);
-
- return 0;
}
static const struct platform_device_id ssp_id_table[] = {
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 715348869d04..2caadbbcf830 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -26,22 +26,6 @@ config QCOM_COMMAND_DB
resource on a RPM-hardened platform must use this database to get
SoC specific identifier and information for the shared resources.
-config QCOM_CPR
- tristate "QCOM Core Power Reduction (CPR) support"
- depends on ARCH_QCOM && HAS_IOMEM
- select PM_OPP
- select REGMAP
- help
- Say Y here to enable support for the CPR hardware found on Qualcomm
- SoCs like QCS404.
-
- This driver populates CPU OPPs tables and makes adjustments to the
- tables based on feedback from the CPR hardware. If you want to do
- CPUfrequency scaling say Y here.
-
- To compile this driver as a module, choose M here: the module will
- be called qcom-cpr
-
config QCOM_GENI_SE
tristate "QCOM GENI Serial Engine Driver"
depends on ARCH_QCOM || COMPILE_TEST
@@ -88,11 +72,40 @@ config QCOM_OCMEM
requirements. This is typically used by the GPU, camera/video, and
audio components on some Snapdragon SoCs.
+config QCOM_PD_MAPPER
+ tristate "Qualcomm Protection Domain Mapper"
+ select QCOM_QMI_HELPERS
+ select QCOM_PDR_MSG
+ select AUXILIARY_BUS
+ depends on NET && QRTR && (ARCH_QCOM || COMPILE_TEST)
+ default QCOM_RPROC_COMMON
+ help
+ The Protection Domain Mapper maps registered services to the domains
+ and instances handled by the remote DSPs. This is a kernel-space
+ implementation of the service. It is a simpler alternative to the
+ userspace daemon.
+
config QCOM_PDR_HELPERS
tristate
select QCOM_QMI_HELPERS
+ select QCOM_PDR_MSG
depends on NET
+config QCOM_PDR_MSG
+ tristate
+
+config QCOM_PMIC_PDCHARGER_ULOG
+ tristate "Qualcomm PMIC PDCharger ULOG driver"
+ depends on RPMSG
+ depends on EVENT_TRACING
+ help
+ The Qualcomm PMIC PDCharger ULOG driver provides access to logs of
+ the ADSP firmware PDCharger module in charge of Battery and Power
+ Delivery on modern systems.
+
+ Say yes here to support PDCharger ULOG event tracing on modern
+ Qualcomm platforms.
+
config QCOM_PMIC_GLINK
tristate "Qualcomm PMIC GLINK driver"
depends on RPMSG
@@ -102,6 +115,7 @@ config QCOM_PMIC_GLINK
depends on OF
select AUXILIARY_BUS
select QCOM_PDR_HELPERS
+ select DRM_AUX_HPD_BRIDGE
help
The Qualcomm PMIC GLINK driver provides access, over GLINK, to the
USB and battery firmware running on one of the coprocessors in
@@ -125,7 +139,7 @@ config QCOM_RAMP_CTRL
config QCOM_RMTFS_MEM
tristate "Qualcomm Remote Filesystem memory driver"
- depends on ARCH_QCOM
+ depends on ARCH_QCOM || COMPILE_TEST
select QCOM_SCM
help
The Qualcomm remote filesystem memory driver is used for allocating
@@ -157,27 +171,6 @@ config QCOM_RPMH
of hardware components aggregate requests for these resources and
help apply the aggregated state on the resource.
-config QCOM_RPMHPD
- tristate "Qualcomm RPMh Power domain driver"
- depends on QCOM_RPMH && QCOM_COMMAND_DB
- help
- QCOM RPMh Power domain driver to support power-domains with
- performance states. The driver communicates a performance state
- value to RPMh which then translates it into corresponding voltage
- for the voltage rail.
-
-config QCOM_RPMPD
- tristate "Qualcomm RPM Power domain driver"
- depends on PM && OF
- depends on QCOM_SMD_RPM
- select PM_GENERIC_DOMAINS
- select PM_GENERIC_DOMAINS_OF
- help
- QCOM RPM Power domain driver to support power-domains with
- performance states. The driver communicates a performance state
- value to RPM which then translates it into corresponding voltage
- for the voltage rail.
-
config QCOM_SMEM
tristate "Qualcomm Shared Memory Manager (SMEM)"
depends on ARCH_QCOM || COMPILE_TEST
@@ -218,6 +211,7 @@ config QCOM_SMP2P
config QCOM_SMSM
tristate "Qualcomm Shared Memory State Machine"
+ depends on MAILBOX
depends on QCOM_SMEM
select QCOM_SMEM_STATE
select IRQ_DOMAIN
@@ -246,6 +240,7 @@ config QCOM_STATS
tristate "Qualcomm Technologies, Inc. (QTI) Sleep stats driver"
depends on (ARCH_QCOM && DEBUG_FS) || COMPILE_TEST
depends on QCOM_SMEM
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
help
Qualcomm Technologies, Inc. (QTI) Sleep stats driver to read
the shared memory exported by the remote processor related to
@@ -291,4 +286,21 @@ config QCOM_INLINE_CRYPTO_ENGINE
tristate
select QCOM_SCM
+config QCOM_PBS
+ tristate "PBS trigger support for Qualcomm Technologies, Inc. PMICS"
+ depends on SPMI
+ help
+ This driver supports configuring software programmable boot sequencer (PBS)
+ trigger event through PBS RAM on Qualcomm Technologies, Inc. PMICs.
+ This module provides the APIs to the client drivers that wants to send the
+ PBS trigger event to the PBS RAM.
+
endmenu
+
+config QCOM_UBWC_CONFIG
+ tristate
+ help
+ Most Qualcomm SoCs feature a number of Universal Bandwidth Compression
+ (UBWC) engines across various IP blocks, which need to be initialized
+ with coherent configuration data. This module functions as a single
+ source of truth for that information.
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index bbca2e1e55bb..b7f1d2a57367 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,14 +1,19 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS_rpmh-rsc.o := -I$(src)
+CFLAGS_qcom_aoss.o := -I$(src)
obj-$(CONFIG_QCOM_AOSS_QMP) += qcom_aoss.o
obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
obj-$(CONFIG_QCOM_OCMEM) += ocmem.o
+obj-$(CONFIG_QCOM_PD_MAPPER) += qcom_pd_mapper.o
obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o
+obj-$(CONFIG_QCOM_PDR_MSG) += qcom_pdr_msg.o
obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink.o
obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink_altmode.o
+obj-$(CONFIG_QCOM_PMIC_PDCHARGER_ULOG) += pmic_pdcharger_ulog.o
+CFLAGS_pmic_pdcharger_ulog.o := -I$(src)
obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
qmi_helpers-y += qmi_encdec.o qmi_interface.o
obj-$(CONFIG_QCOM_RAMP_CTRL) += ramp_controller.o
@@ -20,6 +25,7 @@ qcom_rpmh-y += rpmh.o
obj-$(CONFIG_QCOM_SMD_RPM) += rpm-proc.o smd-rpm.o
obj-$(CONFIG_QCOM_SMEM) += smem.o
obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
+CFLAGS_smp2p.o := -I$(src)
obj-$(CONFIG_QCOM_SMP2P) += smp2p.o
obj-$(CONFIG_QCOM_SMSM) += smsm.o
obj-$(CONFIG_QCOM_SOCINFO) += socinfo.o
@@ -32,3 +38,5 @@ obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o
obj-$(CONFIG_QCOM_ICC_BWMON) += icc-bwmon.o
qcom_ice-objs += ice.o
obj-$(CONFIG_QCOM_INLINE_CRYPTO_ENGINE) += qcom_ice.o
+obj-$(CONFIG_QCOM_PBS) += qcom-pbs.o
+obj-$(CONFIG_QCOM_UBWC_CONFIG) += ubwc_config.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 30f81d6d9d9d..a956c407ce03 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -41,7 +41,7 @@ struct packet_router {
struct apr_rx_buf {
struct list_head node;
int len;
- uint8_t buf[];
+ uint8_t buf[] __counted_by(len);
};
/**
@@ -171,7 +171,7 @@ static int apr_callback(struct rpmsg_device *rpdev, void *buf,
return -EINVAL;
}
- abuf = kzalloc(sizeof(*abuf) + len, GFP_ATOMIC);
+ abuf = kzalloc(struct_size(abuf, buf, len), GFP_ATOMIC);
if (!abuf)
return -ENOMEM;
@@ -338,10 +338,10 @@ static void apr_rxwq(struct work_struct *work)
}
}
-static int apr_device_match(struct device *dev, struct device_driver *drv)
+static int apr_device_match(struct device *dev, const struct device_driver *drv)
{
struct apr_device *adev = to_apr_device(dev);
- struct apr_driver *adrv = to_apr_driver(drv);
+ const struct apr_driver *adrv = to_apr_driver(drv);
const struct apr_device_id *id = adrv->id_table;
/* Attempt an OF style match first */
@@ -399,7 +399,7 @@ static int apr_uevent(const struct device *dev, struct kobj_uevent_env *env)
return add_uevent_var(env, "MODALIAS=apr:%s", adev->name);
}
-struct bus_type aprbus = {
+const struct bus_type aprbus = {
.name = "aprbus",
.match = apr_device_match,
.probe = apr_device_probe,
@@ -485,11 +485,10 @@ static int of_apr_add_pd_lookups(struct device *dev)
{
const char *service_name, *service_path;
struct packet_router *apr = dev_get_drvdata(dev);
- struct device_node *node;
struct pdr_service *pds;
int ret;
- for_each_child_of_node(dev->of_node, node) {
+ for_each_child_of_node_scoped(dev->of_node, node) {
ret = of_property_read_string_index(node, "qcom,protection-domain",
0, &service_name);
if (ret < 0)
@@ -499,14 +498,12 @@ static int of_apr_add_pd_lookups(struct device *dev)
1, &service_path);
if (ret < 0) {
dev_err(dev, "pdr service path missing: %d\n", ret);
- of_node_put(node);
return ret;
}
pds = pdr_add_lookup(apr->pdr, service_name, service_path);
if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
dev_err(dev, "pdr add lookup failed: %ld\n", PTR_ERR(pds));
- of_node_put(node);
return PTR_ERR(pds);
}
}
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index 34c40368d5b5..ae66c2623d25 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -1,6 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/bitfield.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -17,6 +21,8 @@
#define MAX_SLV_ID 8
#define SLAVE_ID_MASK 0x7
#define SLAVE_ID_SHIFT 16
+#define SLAVE_ID(addr) FIELD_GET(GENMASK(19, 16), addr)
+#define VRM_ADDR(addr) FIELD_GET(GENMASK(19, 4), addr)
/**
* struct entry_header: header for each entry in cmddb
@@ -133,7 +139,7 @@ int cmd_db_ready(void)
return 0;
}
-EXPORT_SYMBOL(cmd_db_ready);
+EXPORT_SYMBOL_GPL(cmd_db_ready);
static int cmd_db_get_header(const char *id, const struct entry_header **eh,
const struct rsc_hdr **rh)
@@ -147,12 +153,7 @@ static int cmd_db_get_header(const char *id, const struct entry_header **eh,
if (ret)
return ret;
- /*
- * Pad out query string to same length as in DB. NOTE: the output
- * query string is not necessarily '\0' terminated if it bumps up
- * against the max size. That's OK and expected.
- */
- strncpy(query, id, sizeof(query));
+ strtomem_pad(query, id, 0);
for (i = 0; i < MAX_SLV_ID; i++) {
rsc_hdr = &cmd_db_header->header[i];
@@ -193,7 +194,7 @@ u32 cmd_db_read_addr(const char *id)
return ret < 0 ? 0 : le32_to_cpu(ent->addr);
}
-EXPORT_SYMBOL(cmd_db_read_addr);
+EXPORT_SYMBOL_GPL(cmd_db_read_addr);
/**
* cmd_db_read_aux_data() - Query command db for aux data.
@@ -218,7 +219,31 @@ const void *cmd_db_read_aux_data(const char *id, size_t *len)
return rsc_offset(rsc_hdr, ent);
}
-EXPORT_SYMBOL(cmd_db_read_aux_data);
+EXPORT_SYMBOL_GPL(cmd_db_read_aux_data);
+
+/**
+ * cmd_db_match_resource_addr() - Compare if both Resource addresses are same
+ *
+ * @addr1: Resource address to compare
+ * @addr2: Resource address to compare
+ *
+ * Return: true if two addresses refer to the same resource, false otherwise
+ */
+bool cmd_db_match_resource_addr(u32 addr1, u32 addr2)
+{
+ /*
+ * Each RPMh VRM accelerator resource has 3 or 4 contiguous 4-byte
+ * aligned addresses associated with it. Ignore the offset to check
+ * for VRM requests.
+ */
+ if (addr1 == addr2)
+ return true;
+ else if (SLAVE_ID(addr1) == CMD_DB_HW_VRM && VRM_ADDR(addr1) == VRM_ADDR(addr2))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(cmd_db_match_resource_addr);
/**
* cmd_db_read_slave_id - Get the slave ID for a given resource address
@@ -240,7 +265,7 @@ enum cmd_db_hw_type cmd_db_read_slave_id(const char *id)
addr = le32_to_cpu(ent->addr);
return (addr >> SLAVE_ID_SHIFT) & SLAVE_ID_MASK;
}
-EXPORT_SYMBOL(cmd_db_read_slave_id);
+EXPORT_SYMBOL_GPL(cmd_db_read_slave_id);
#ifdef CONFIG_DEBUG_FS
static int cmd_db_debugfs_dump(struct seq_file *seq, void *p)
@@ -324,7 +349,7 @@ static int cmd_db_dev_probe(struct platform_device *pdev)
return -EINVAL;
}
- cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WB);
+ cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WC);
if (!cmd_db_header) {
ret = -ENOMEM;
cmd_db_header = NULL;
@@ -362,7 +387,7 @@ static int __init cmd_db_device_init(void)
{
return platform_driver_register(&cmd_db_dev_driver);
}
-arch_initcall(cmd_db_device_init);
+core_initcall(cmd_db_device_init);
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Command DB Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c
index adf2d523f103..597f9025e422 100644
--- a/drivers/soc/qcom/icc-bwmon.c
+++ b/drivers/soc/qcom/icc-bwmon.c
@@ -17,6 +17,8 @@
#include <linux/pm_opp.h>
#include <linux/regmap.h>
#include <linux/sizes.h>
+#define CREATE_TRACE_POINTS
+#include "trace_icc-bwmon.h"
/*
* The BWMON samples data throughput within 'sample_ms' time. With three
@@ -282,7 +284,7 @@ static const struct regmap_config msm8998_bwmon_regmap_cfg = {
* Cache is necessary for using regmap fields with non-readable
* registers.
*/
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config msm8998_bwmon_global_regmap_cfg = {
@@ -301,7 +303,7 @@ static const struct regmap_config msm8998_bwmon_global_regmap_cfg = {
* Cache is necessary for using regmap fields with non-readable
* registers.
*/
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct reg_field sdm845_cpu_bwmon_reg_fields[] = {
@@ -369,7 +371,7 @@ static const struct regmap_config sdm845_cpu_bwmon_regmap_cfg = {
* Cache is necessary for using regmap fields with non-readable
* registers.
*/
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
/* BWMON v5 */
@@ -446,7 +448,7 @@ static const struct regmap_config sdm845_llcc_bwmon_regmap_cfg = {
* Cache is necessary for using regmap fields with non-readable
* registers.
*/
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static void bwmon_clear_counters(struct icc_bwmon *bwmon, bool clear_all)
@@ -565,7 +567,7 @@ static void bwmon_start(struct icc_bwmon *bwmon)
int window;
/* No need to check for errors, as this must have succeeded before. */
- dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_low, 0);
+ dev_pm_opp_put(dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_low, 0));
bwmon_clear_counters(bwmon, true);
@@ -645,14 +647,18 @@ static irqreturn_t bwmon_intr_thread(int irq, void *dev_id)
struct icc_bwmon *bwmon = dev_id;
unsigned int irq_enable = 0;
struct dev_pm_opp *opp, *target_opp;
- unsigned int bw_kbps, up_kbps, down_kbps;
+ unsigned int bw_kbps, up_kbps, down_kbps, meas_kbps;
bw_kbps = bwmon->target_kbps;
+ meas_kbps = bwmon->target_kbps;
target_opp = dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_kbps, 0);
if (IS_ERR(target_opp) && PTR_ERR(target_opp) == -ERANGE)
target_opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0);
+ if (IS_ERR(target_opp))
+ return IRQ_HANDLED;
+
bwmon->target_kbps = bw_kbps;
bw_kbps--;
@@ -679,6 +685,7 @@ static irqreturn_t bwmon_intr_thread(int irq, void *dev_id)
bwmon_clear_irq(bwmon);
bwmon_enable(bwmon, irq_enable);
+ trace_qcom_bwmon_update(dev_name(bwmon->dev), meas_kbps, up_kbps, down_kbps);
if (bwmon->target_kbps == bwmon->current_kbps)
goto out;
@@ -772,18 +779,25 @@ static int bwmon_probe(struct platform_device *pdev)
opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0);
if (IS_ERR(opp))
return dev_err_probe(dev, PTR_ERR(opp), "failed to find max peak bandwidth\n");
+ dev_pm_opp_put(opp);
bwmon->min_bw_kbps = 0;
opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0);
if (IS_ERR(opp))
return dev_err_probe(dev, PTR_ERR(opp), "failed to find min peak bandwidth\n");
+ dev_pm_opp_put(opp);
bwmon->dev = dev;
bwmon_disable(bwmon);
- ret = devm_request_threaded_irq(dev, bwmon->irq, bwmon_intr,
- bwmon_intr_thread,
- IRQF_ONESHOT, dev_name(dev), bwmon);
+
+ /*
+ * SoCs with multiple cpu-bwmon instances can end up using a shared interrupt
+ * line. Using the devm_ variant might result in the IRQ handler being executed
+ * after bwmon_disable in bwmon_remove()
+ */
+ ret = request_threaded_irq(bwmon->irq, bwmon_intr, bwmon_intr_thread,
+ IRQF_ONESHOT | IRQF_SHARED, dev_name(dev), bwmon);
if (ret)
return dev_err_probe(dev, ret, "failed to request IRQ\n");
@@ -793,13 +807,12 @@ static int bwmon_probe(struct platform_device *pdev)
return 0;
}
-static int bwmon_remove(struct platform_device *pdev)
+static void bwmon_remove(struct platform_device *pdev)
{
struct icc_bwmon *bwmon = platform_get_drvdata(pdev);
bwmon_disable(bwmon);
-
- return 0;
+ free_irq(bwmon->irq, bwmon);
}
static const struct icc_bwmon_data msm8998_bwmon_data = {
diff --git a/drivers/soc/qcom/ice.c b/drivers/soc/qcom/ice.c
index fbab7fe5c652..b203bc685cad 100644
--- a/drivers/soc/qcom/ice.c
+++ b/drivers/soc/qcom/ice.c
@@ -8,8 +8,10 @@
*/
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -19,20 +21,76 @@
#include <soc/qcom/ice.h>
-#define AES_256_XTS_KEY_SIZE 64
+#define AES_256_XTS_KEY_SIZE 64 /* for raw keys only */
+
+#define QCOM_ICE_HWKM_V1 1 /* HWKM version 1 */
+#define QCOM_ICE_HWKM_V2 2 /* HWKM version 2 */
+
+#define QCOM_ICE_HWKM_MAX_WRAPPED_KEY_SIZE 100 /* Maximum HWKM wrapped key size */
+
+/*
+ * Wrapped key size depends upon HWKM version:
+ * HWKM version 1 supports 68 bytes
+ * HWKM version 2 supports 100 bytes
+ */
+#define QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(v) ((v) == QCOM_ICE_HWKM_V1 ? 68 : 100)
/* QCOM ICE registers */
+
+#define QCOM_ICE_REG_CONTROL 0x0000
+#define QCOM_ICE_LEGACY_MODE_ENABLED BIT(0)
+
#define QCOM_ICE_REG_VERSION 0x0008
+
#define QCOM_ICE_REG_FUSE_SETTING 0x0010
+#define QCOM_ICE_FUSE_SETTING_MASK BIT(0)
+#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK BIT(1)
+#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK BIT(2)
+
#define QCOM_ICE_REG_BIST_STATUS 0x0070
+#define QCOM_ICE_BIST_STATUS_MASK GENMASK(31, 28)
+
#define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000
-/* BIST ("built-in self-test") status flags */
-#define QCOM_ICE_BIST_STATUS_MASK GENMASK(31, 28)
+#define QCOM_ICE_REG_CRYPTOCFG_BASE 0x4040
+#define QCOM_ICE_REG_CRYPTOCFG_SIZE 0x80
+#define QCOM_ICE_REG_CRYPTOCFG(slot) (QCOM_ICE_REG_CRYPTOCFG_BASE + \
+ QCOM_ICE_REG_CRYPTOCFG_SIZE * (slot))
+union crypto_cfg {
+ __le32 regval;
+ struct {
+ u8 dusize;
+ u8 capidx;
+ u8 reserved;
+#define QCOM_ICE_HWKM_CFG_ENABLE_VAL BIT(7)
+ u8 cfge;
+ };
+};
+
+/* QCOM ICE HWKM (Hardware Key Manager) registers */
+
+#define HWKM_OFFSET 0x8000
-#define QCOM_ICE_FUSE_SETTING_MASK 0x1
-#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2
-#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4
+#define QCOM_ICE_REG_HWKM_TZ_KM_CTL (HWKM_OFFSET + 0x1000)
+#define QCOM_ICE_HWKM_DISABLE_CRC_CHECKS_VAL (BIT(1) | BIT(2))
+/* In HWKM v1 the ICE legacy mode is controlled from HWKM register space */
+#define QCOM_ICE_HWKM_ICE_LEGACY_MODE_ENABLED BIT(5)
+
+#define QCOM_ICE_REG_HWKM_TZ_KM_STATUS (HWKM_OFFSET + 0x1004)
+#define QCOM_ICE_HWKM_KT_CLEAR_DONE BIT(0)
+#define QCOM_ICE_HWKM_BOOT_CMD_LIST0_DONE BIT(1)
+#define QCOM_ICE_HWKM_BOOT_CMD_LIST1_DONE BIT(2)
+#define QCOM_ICE_HWKM_CRYPTO_BIST_DONE(v) (((v) == QCOM_ICE_HWKM_V1) ? BIT(14) : BIT(7))
+#define QCOM_ICE_HWKM_BIST_DONE(v) (((v) == QCOM_ICE_HWKM_V1) ? BIT(16) : BIT(9))
+
+#define QCOM_ICE_REG_HWKM_BANK0_BANKN_IRQ_STATUS (HWKM_OFFSET + 0x2008)
+#define QCOM_ICE_HWKM_RSP_FIFO_CLEAR_VAL BIT(3)
+
+#define QCOM_ICE_REG_HWKM_BANK0_BBAC_0 (HWKM_OFFSET + 0x5000)
+#define QCOM_ICE_REG_HWKM_BANK0_BBAC_1 (HWKM_OFFSET + 0x5004)
+#define QCOM_ICE_REG_HWKM_BANK0_BBAC_2 (HWKM_OFFSET + 0x5008)
+#define QCOM_ICE_REG_HWKM_BANK0_BBAC_3 (HWKM_OFFSET + 0x500C)
+#define QCOM_ICE_REG_HWKM_BANK0_BBAC_4 (HWKM_OFFSET + 0x5010)
#define qcom_ice_writel(engine, val, reg) \
writel((val), (engine)->base + (reg))
@@ -40,12 +98,19 @@
#define qcom_ice_readl(engine, reg) \
readl((engine)->base + (reg))
+static bool qcom_ice_use_wrapped_keys;
+module_param_named(use_wrapped_keys, qcom_ice_use_wrapped_keys, bool, 0660);
+MODULE_PARM_DESC(use_wrapped_keys,
+ "Support wrapped keys instead of raw keys, if available on the platform");
+
struct qcom_ice {
struct device *dev;
void __iomem *base;
- struct device_link *link;
struct clk *core_clk;
+ bool use_hwkm;
+ bool hwkm_init_complete;
+ u8 hwkm_version;
};
static bool qcom_ice_check_supported(struct qcom_ice *ice)
@@ -63,9 +128,24 @@ static bool qcom_ice_check_supported(struct qcom_ice *ice)
return false;
}
+ /* HWKM version v2 is present from ICE 3.2.1 onwards while version v1
+ * is present only in ICE 3.2.0. Earlier ICE version don't have HWKM.
+ */
+ if (major > 3 ||
+ (major == 3 && (minor >= 3 || (minor == 2 && step >= 1))))
+ ice->hwkm_version = QCOM_ICE_HWKM_V2;
+ else if ((major == 3) && (minor == 2))
+ ice->hwkm_version = QCOM_ICE_HWKM_V1;
+ else
+ ice->hwkm_version = 0;
+
dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n",
major, minor, step);
+ if (ice->hwkm_version)
+ dev_info(dev, "QC Hardware Key Manager (HWKM) version v%d\n",
+ ice->hwkm_version);
+
/* If fuses are blown, ICE might not work in the standard way. */
regval = qcom_ice_readl(ice, QCOM_ICE_REG_FUSE_SETTING);
if (regval & (QCOM_ICE_FUSE_SETTING_MASK |
@@ -75,6 +155,34 @@ static bool qcom_ice_check_supported(struct qcom_ice *ice)
return false;
}
+ /*
+ * Check for HWKM support and decide whether to use it or not. ICE
+ * v3.2.1 and later have HWKM v2. ICE v3.2.0 has HWKM v1. Earlier ICE
+ * versions don't have HWKM at all. However, for HWKM to be fully
+ * usable by Linux, the TrustZone software also needs to support certain
+ * SCM calls including the ones to generate and prepare keys. Support
+ * for these SCM calls is present for SoCs with HWKM v2 and is being
+ * added for SoCs with HWKM v1 as well but not every SoC with HWKM v1
+ * currently supports this. So, this driver checks for the SCM call
+ * support before it decides to use HWKM.
+ *
+ * Also, since HWKM and legacy mode are mutually exclusive, and
+ * ICE-capable storage driver(s) need to know early on whether to
+ * advertise support for raw keys or wrapped keys, HWKM cannot be used
+ * unconditionally. A module parameter is used to opt into using it.
+ */
+ if (ice->hwkm_version && qcom_scm_has_wrapped_key_support()) {
+ if (qcom_ice_use_wrapped_keys) {
+ dev_info(dev, "Using HWKM. Supporting wrapped keys only.\n");
+ ice->use_hwkm = true;
+ } else {
+ dev_info(dev, "Not using HWKM. Supporting raw keys only.\n");
+ }
+ } else if (qcom_ice_use_wrapped_keys) {
+ dev_warn(dev, "A supported HWKM is not present. Ignoring qcom_ice.use_wrapped_keys=1.\n");
+ } else {
+ dev_info(dev, "A supported HWKM is not present. Supporting raw keys only.\n");
+ }
return true;
}
@@ -122,17 +230,77 @@ static int qcom_ice_wait_bist_status(struct qcom_ice *ice)
err = readl_poll_timeout(ice->base + QCOM_ICE_REG_BIST_STATUS,
regval, !(regval & QCOM_ICE_BIST_STATUS_MASK),
50, 5000);
- if (err)
+ if (err) {
dev_err(ice->dev, "Timed out waiting for ICE self-test to complete\n");
+ return err;
+ }
- return err;
+ if (ice->use_hwkm &&
+ qcom_ice_readl(ice, QCOM_ICE_REG_HWKM_TZ_KM_STATUS) !=
+ (QCOM_ICE_HWKM_KT_CLEAR_DONE |
+ QCOM_ICE_HWKM_BOOT_CMD_LIST0_DONE |
+ QCOM_ICE_HWKM_BOOT_CMD_LIST1_DONE |
+ QCOM_ICE_HWKM_CRYPTO_BIST_DONE(ice->hwkm_version) |
+ QCOM_ICE_HWKM_BIST_DONE(ice->hwkm_version))) {
+ dev_err(ice->dev, "HWKM self-test error!\n");
+ /*
+ * Too late to revoke use_hwkm here, as it was already
+ * propagated up the stack into the crypto capabilities.
+ */
+ }
+ return 0;
+}
+
+static void qcom_ice_hwkm_init(struct qcom_ice *ice)
+{
+ u32 regval;
+
+ if (!ice->use_hwkm)
+ return;
+
+ BUILD_BUG_ON(QCOM_ICE_HWKM_MAX_WRAPPED_KEY_SIZE >
+ BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE);
+ /*
+ * When ICE is in HWKM mode, it only supports wrapped keys.
+ * When ICE is in legacy mode, it only supports raw keys.
+ *
+ * Put ICE in HWKM mode. ICE defaults to legacy mode.
+ */
+ if (ice->hwkm_version == QCOM_ICE_HWKM_V2) {
+ regval = qcom_ice_readl(ice, QCOM_ICE_REG_CONTROL);
+ regval &= ~QCOM_ICE_LEGACY_MODE_ENABLED;
+ qcom_ice_writel(ice, regval, QCOM_ICE_REG_CONTROL);
+ } else if (ice->hwkm_version == QCOM_ICE_HWKM_V1) {
+ regval = qcom_ice_readl(ice, QCOM_ICE_REG_HWKM_TZ_KM_CTL);
+ regval &= ~QCOM_ICE_HWKM_ICE_LEGACY_MODE_ENABLED;
+ qcom_ice_writel(ice, regval, QCOM_ICE_REG_HWKM_TZ_KM_CTL);
+ }
+
+ /* Disable CRC checks. This HWKM feature is not used. */
+ qcom_ice_writel(ice, QCOM_ICE_HWKM_DISABLE_CRC_CHECKS_VAL,
+ QCOM_ICE_REG_HWKM_TZ_KM_CTL);
+
+ /*
+ * Allow the HWKM slave to read and write the keyslots in the ICE HWKM
+ * slave. Without this, TrustZone cannot program keys into ICE.
+ */
+ qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_0);
+ qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_1);
+ qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_2);
+ qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_3);
+ qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_4);
+
+ /* Clear the HWKM response FIFO. */
+ qcom_ice_writel(ice, QCOM_ICE_HWKM_RSP_FIFO_CLEAR_VAL,
+ QCOM_ICE_REG_HWKM_BANK0_BANKN_IRQ_STATUS);
+ ice->hwkm_init_complete = true;
}
int qcom_ice_enable(struct qcom_ice *ice)
{
qcom_ice_low_power_mode_enable(ice);
qcom_ice_optimization_enable(ice);
-
+ qcom_ice_hwkm_init(ice);
return qcom_ice_wait_bist_status(ice);
}
EXPORT_SYMBOL_GPL(qcom_ice_enable);
@@ -148,7 +316,7 @@ int qcom_ice_resume(struct qcom_ice *ice)
err);
return err;
}
-
+ qcom_ice_hwkm_init(ice);
return qcom_ice_wait_bist_status(ice);
}
EXPORT_SYMBOL_GPL(qcom_ice_resume);
@@ -156,15 +324,58 @@ EXPORT_SYMBOL_GPL(qcom_ice_resume);
int qcom_ice_suspend(struct qcom_ice *ice)
{
clk_disable_unprepare(ice->core_clk);
+ ice->hwkm_init_complete = false;
return 0;
}
EXPORT_SYMBOL_GPL(qcom_ice_suspend);
-int qcom_ice_program_key(struct qcom_ice *ice,
- u8 algorithm_id, u8 key_size,
- const u8 crypto_key[], u8 data_unit_size,
- int slot)
+static unsigned int translate_hwkm_slot(struct qcom_ice *ice, unsigned int slot)
+{
+ return ice->hwkm_version == QCOM_ICE_HWKM_V1 ? slot : slot * 2;
+}
+
+static int qcom_ice_program_wrapped_key(struct qcom_ice *ice, unsigned int slot,
+ const struct blk_crypto_key *bkey)
+{
+ struct device *dev = ice->dev;
+ union crypto_cfg cfg = {
+ .dusize = bkey->crypto_cfg.data_unit_size / 512,
+ .capidx = QCOM_SCM_ICE_CIPHER_AES_256_XTS,
+ .cfge = QCOM_ICE_HWKM_CFG_ENABLE_VAL,
+ };
+ int err;
+
+ if (!ice->use_hwkm) {
+ dev_err_ratelimited(dev, "Got wrapped key when not using HWKM\n");
+ return -EINVAL;
+ }
+ if (!ice->hwkm_init_complete) {
+ dev_err_ratelimited(dev, "HWKM not yet initialized\n");
+ return -EINVAL;
+ }
+
+ /* Clear CFGE before programming the key. */
+ qcom_ice_writel(ice, 0x0, QCOM_ICE_REG_CRYPTOCFG(slot));
+
+ /* Call into TrustZone to program the wrapped key using HWKM. */
+ err = qcom_scm_ice_set_key(translate_hwkm_slot(ice, slot), bkey->bytes,
+ bkey->size, cfg.capidx, cfg.dusize);
+ if (err) {
+ dev_err_ratelimited(dev,
+ "qcom_scm_ice_set_key failed; err=%d, slot=%u\n",
+ err, slot);
+ return err;
+ }
+
+ /* Set CFGE after programming the key. */
+ qcom_ice_writel(ice, le32_to_cpu(cfg.regval),
+ QCOM_ICE_REG_CRYPTOCFG(slot));
+ return 0;
+}
+
+int qcom_ice_program_key(struct qcom_ice *ice, unsigned int slot,
+ const struct blk_crypto_key *blk_key)
{
struct device *dev = ice->dev;
union {
@@ -175,15 +386,26 @@ int qcom_ice_program_key(struct qcom_ice *ice,
int err;
/* Only AES-256-XTS has been tested so far. */
- if (algorithm_id != QCOM_ICE_CRYPTO_ALG_AES_XTS ||
- key_size != QCOM_ICE_CRYPTO_KEY_SIZE_256) {
- dev_err_ratelimited(dev,
- "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n",
- algorithm_id, key_size);
+ if (blk_key->crypto_cfg.crypto_mode !=
+ BLK_ENCRYPTION_MODE_AES_256_XTS) {
+ dev_err_ratelimited(dev, "Unsupported crypto mode: %d\n",
+ blk_key->crypto_cfg.crypto_mode);
return -EINVAL;
}
- memcpy(key.bytes, crypto_key, AES_256_XTS_KEY_SIZE);
+ if (blk_key->crypto_cfg.key_type == BLK_CRYPTO_KEY_TYPE_HW_WRAPPED)
+ return qcom_ice_program_wrapped_key(ice, slot, blk_key);
+
+ if (ice->use_hwkm) {
+ dev_err_ratelimited(dev, "Got raw key when using HWKM\n");
+ return -EINVAL;
+ }
+
+ if (blk_key->size != AES_256_XTS_KEY_SIZE) {
+ dev_err_ratelimited(dev, "Incorrect key size\n");
+ return -EINVAL;
+ }
+ memcpy(key.bytes, blk_key->bytes, AES_256_XTS_KEY_SIZE);
/* The SCM call requires that the key words are encoded in big endian */
for (i = 0; i < ARRAY_SIZE(key.words); i++)
@@ -191,7 +413,7 @@ int qcom_ice_program_key(struct qcom_ice *ice,
err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE,
QCOM_SCM_ICE_CIPHER_AES_256_XTS,
- data_unit_size);
+ blk_key->crypto_cfg.data_unit_size / 512);
memzero_explicit(&key, sizeof(key));
@@ -201,10 +423,132 @@ EXPORT_SYMBOL_GPL(qcom_ice_program_key);
int qcom_ice_evict_key(struct qcom_ice *ice, int slot)
{
+ if (ice->hwkm_init_complete)
+ slot = translate_hwkm_slot(ice, slot);
return qcom_scm_ice_invalidate_key(slot);
}
EXPORT_SYMBOL_GPL(qcom_ice_evict_key);
+/**
+ * qcom_ice_get_supported_key_type() - Get the supported key type
+ * @ice: ICE driver data
+ *
+ * Return: the blk-crypto key type that the ICE driver is configured to use.
+ * This is the key type that ICE-capable storage drivers should advertise as
+ * supported in the crypto capabilities of any disks they register.
+ */
+enum blk_crypto_key_type qcom_ice_get_supported_key_type(struct qcom_ice *ice)
+{
+ if (ice->use_hwkm)
+ return BLK_CRYPTO_KEY_TYPE_HW_WRAPPED;
+ return BLK_CRYPTO_KEY_TYPE_RAW;
+}
+EXPORT_SYMBOL_GPL(qcom_ice_get_supported_key_type);
+
+/**
+ * qcom_ice_derive_sw_secret() - Derive software secret from wrapped key
+ * @ice: ICE driver data
+ * @eph_key: an ephemerally-wrapped key
+ * @eph_key_size: size of @eph_key in bytes
+ * @sw_secret: output buffer for the software secret
+ *
+ * Use HWKM to derive the "software secret" from a hardware-wrapped key that is
+ * given in ephemerally-wrapped form.
+ *
+ * Return: 0 on success; -EBADMSG if the given ephemerally-wrapped key is
+ * invalid; or another -errno value.
+ */
+int qcom_ice_derive_sw_secret(struct qcom_ice *ice,
+ const u8 *eph_key, size_t eph_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+ int err = qcom_scm_derive_sw_secret(eph_key, eph_key_size,
+ sw_secret,
+ BLK_CRYPTO_SW_SECRET_SIZE);
+ if (err == -EIO || err == -EINVAL)
+ err = -EBADMSG; /* probably invalid key */
+ return err;
+}
+EXPORT_SYMBOL_GPL(qcom_ice_derive_sw_secret);
+
+/**
+ * qcom_ice_generate_key() - Generate a wrapped key for inline encryption
+ * @ice: ICE driver data
+ * @lt_key: output buffer for the long-term wrapped key
+ *
+ * Use HWKM to generate a new key and return it as a long-term wrapped key.
+ *
+ * Return: the size of the resulting wrapped key on success; -errno on failure.
+ */
+int qcom_ice_generate_key(struct qcom_ice *ice,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ int err;
+
+ err = qcom_scm_generate_ice_key(lt_key,
+ QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version));
+ if (err)
+ return err;
+
+ return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version);
+}
+EXPORT_SYMBOL_GPL(qcom_ice_generate_key);
+
+/**
+ * qcom_ice_prepare_key() - Prepare a wrapped key for inline encryption
+ * @ice: ICE driver data
+ * @lt_key: a long-term wrapped key
+ * @lt_key_size: size of @lt_key in bytes
+ * @eph_key: output buffer for the ephemerally-wrapped key
+ *
+ * Use HWKM to re-wrap a long-term wrapped key with the per-boot ephemeral key.
+ *
+ * Return: the size of the resulting wrapped key on success; -EBADMSG if the
+ * given long-term wrapped key is invalid; or another -errno value.
+ */
+int qcom_ice_prepare_key(struct qcom_ice *ice,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ int err;
+
+ err = qcom_scm_prepare_ice_key(lt_key, lt_key_size,
+ eph_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version));
+ if (err == -EIO || err == -EINVAL)
+ err = -EBADMSG; /* probably invalid key */
+ if (err)
+ return err;
+
+ return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version);
+}
+EXPORT_SYMBOL_GPL(qcom_ice_prepare_key);
+
+/**
+ * qcom_ice_import_key() - Import a raw key for inline encryption
+ * @ice: ICE driver data
+ * @raw_key: the raw key to import
+ * @raw_key_size: size of @raw_key in bytes
+ * @lt_key: output buffer for the long-term wrapped key
+ *
+ * Use HWKM to import a raw key and return it as a long-term wrapped key.
+ *
+ * Return: the size of the resulting wrapped key on success; -errno on failure.
+ */
+int qcom_ice_import_key(struct qcom_ice *ice,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ int err;
+
+ err = qcom_scm_import_ice_key(raw_key, raw_key_size,
+ lt_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version));
+ if (err)
+ return err;
+
+ return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version);
+}
+EXPORT_SYMBOL_GPL(qcom_ice_import_key);
+
static struct qcom_ice *qcom_ice_create(struct device *dev,
void __iomem *base)
{
@@ -261,13 +605,13 @@ static struct qcom_ice *qcom_ice_create(struct device *dev,
* Return: ICE pointer on success, NULL if there is no ICE data provided by the
* consumer or ERR_PTR() on error.
*/
-struct qcom_ice *of_qcom_ice_get(struct device *dev)
+static struct qcom_ice *of_qcom_ice_get(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct qcom_ice *ice;
- struct device_node *node;
struct resource *res;
void __iomem *base;
+ struct device_link *link;
if (!dev || !dev->of_node)
return ERR_PTR(-ENODEV);
@@ -292,15 +636,15 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev)
* (legacy DT binding), then it must at least provide a phandle
* to the ICE devicetree node, otherwise ICE is not supported.
*/
- node = of_parse_phandle(dev->of_node, "qcom,ice", 0);
+ struct device_node *node __free(device_node) = of_parse_phandle(dev->of_node,
+ "qcom,ice", 0);
if (!node)
return NULL;
pdev = of_find_device_by_node(node);
if (!pdev) {
dev_err(dev, "Cannot find device node %s\n", node->name);
- ice = ERR_PTR(-EPROBE_DEFER);
- goto out;
+ return ERR_PTR(-EPROBE_DEFER);
}
ice = platform_get_drvdata(pdev);
@@ -308,12 +652,11 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev)
dev_err(dev, "Cannot get ice instance from %s\n",
dev_name(&pdev->dev));
platform_device_put(pdev);
- ice = ERR_PTR(-EPROBE_DEFER);
- goto out;
+ return ERR_PTR(-EPROBE_DEFER);
}
- ice->link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
- if (!ice->link) {
+ link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
+ if (!link) {
dev_err(&pdev->dev,
"Failed to create device link to consumer %s\n",
dev_name(dev));
@@ -321,12 +664,55 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev)
ice = ERR_PTR(-EINVAL);
}
-out:
- of_node_put(node);
+ return ice;
+}
+
+static void qcom_ice_put(const struct qcom_ice *ice)
+{
+ struct platform_device *pdev = to_platform_device(ice->dev);
+
+ if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice"))
+ platform_device_put(pdev);
+}
+
+static void devm_of_qcom_ice_put(struct device *dev, void *res)
+{
+ qcom_ice_put(*(struct qcom_ice **)res);
+}
+
+/**
+ * devm_of_qcom_ice_get() - Devres managed helper to get an ICE instance from
+ * a DT node.
+ * @dev: device pointer for the consumer device.
+ *
+ * This function will provide an ICE instance either by creating one for the
+ * consumer device if its DT node provides the 'ice' reg range and the 'ice'
+ * clock (for legacy DT style). On the other hand, if consumer provides a
+ * phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already
+ * be created and so this function will return that instead.
+ *
+ * Return: ICE pointer on success, NULL if there is no ICE data provided by the
+ * consumer or ERR_PTR() on error.
+ */
+struct qcom_ice *devm_of_qcom_ice_get(struct device *dev)
+{
+ struct qcom_ice *ice, **dr;
+
+ dr = devres_alloc(devm_of_qcom_ice_put, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ ice = of_qcom_ice_get(dev);
+ if (!IS_ERR_OR_NULL(ice)) {
+ *dr = ice;
+ devres_add(dev, dr);
+ } else {
+ devres_free(dr);
+ }
return ice;
}
-EXPORT_SYMBOL_GPL(of_qcom_ice_get);
+EXPORT_SYMBOL_GPL(devm_of_qcom_ice_get);
static int qcom_ice_probe(struct platform_device *pdev)
{
diff --git a/drivers/soc/qcom/kryo-l2-accessors.c b/drivers/soc/qcom/kryo-l2-accessors.c
index 7886af4fd726..50cd710c5e82 100644
--- a/drivers/soc/qcom/kryo-l2-accessors.c
+++ b/drivers/soc/qcom/kryo-l2-accessors.c
@@ -32,7 +32,7 @@ void kryo_l2_set_indirect_reg(u64 reg, u64 val)
isb();
raw_spin_unlock_irqrestore(&l2_access_lock, flags);
}
-EXPORT_SYMBOL(kryo_l2_set_indirect_reg);
+EXPORT_SYMBOL_GPL(kryo_l2_set_indirect_reg);
/**
* kryo_l2_get_indirect_reg() - read an L2 register value
@@ -54,4 +54,4 @@ u64 kryo_l2_get_indirect_reg(u64 reg)
return val;
}
-EXPORT_SYMBOL(kryo_l2_get_indirect_reg);
+EXPORT_SYMBOL_GPL(kryo_l2_get_indirect_reg);
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index e32a4161a8d0..13e174267294 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -7,11 +7,13 @@
#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/sizes.h>
@@ -25,14 +27,19 @@
#define ACT_CTRL_OPCODE_ACTIVATE BIT(0)
#define ACT_CTRL_OPCODE_DEACTIVATE BIT(1)
#define ACT_CTRL_ACT_TRIG BIT(0)
-#define ACT_CTRL_OPCODE_SHIFT 0x01
-#define ATTR1_PROBE_TARGET_WAYS_SHIFT 0x02
-#define ATTR1_FIXED_SIZE_SHIFT 0x03
-#define ATTR1_PRIORITY_SHIFT 0x04
-#define ATTR1_MAX_CAP_SHIFT 0x10
+#define ACT_CTRL_OPCODE_SHIFT 1
+#define ATTR1_PROBE_TARGET_WAYS_SHIFT 2
+#define ATTR1_FIXED_SIZE_SHIFT 3
+#define ATTR1_PRIORITY_SHIFT 4
+#define ATTR1_MAX_CAP_SHIFT 16
#define ATTR0_RES_WAYS_MASK GENMASK(15, 0)
#define ATTR0_BONUS_WAYS_MASK GENMASK(31, 16)
-#define ATTR0_BONUS_WAYS_SHIFT 0x10
+#define ATTR0_BONUS_WAYS_SHIFT 16
+#define ATTR2_PROBE_TARGET_WAYS_MASK BIT(4)
+#define ATTR2_FIXED_SIZE_MASK BIT(8)
+#define ATTR2_PRIORITY_MASK GENMASK(14, 12)
+#define ATTR2_PARENT_SCID_MASK GENMASK(21, 16)
+#define ATTR2_IN_A_GROUP_MASK BIT(24)
#define LLCC_STATUS_READ_DELAY 100
#define CACHE_LINE_SIZE_SHIFT 6
@@ -46,7 +53,11 @@
#define LLCC_TRP_STATUSn(n) (4 + n * SZ_4K)
#define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n)
#define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n)
-#define LLCC_TRP_ATTR2_CFGn(n) (0x21100 + SZ_8 * n)
+#define LLCC_TRP_ATTR2_CFGn(n) (0x21100 + SZ_4 * n)
+#define LLCC_V6_TRP_ATTR0_CFGn(n) (cfg->reg_offset[LLCC_TRP_ATTR0_CFG] + SZ_64 * (n))
+#define LLCC_V6_TRP_ATTR1_CFGn(n) (cfg->reg_offset[LLCC_TRP_ATTR1_CFG] + SZ_64 * (n))
+#define LLCC_V6_TRP_ATTR2_CFGn(n) (cfg->reg_offset[LLCC_TRP_ATTR2_CFG] + SZ_64 * (n))
+#define LLCC_V6_TRP_ATTR3_CFGn(n) (cfg->reg_offset[LLCC_TRP_ATTR3_CFG] + SZ_64 * (n))
#define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x21f00
#define LLCC_TRP_PCB_ACT 0x21f04
@@ -64,6 +75,7 @@
#define LLCC_VERSION_2_0_0_0 0x02000000
#define LLCC_VERSION_2_1_0_0 0x02010000
#define LLCC_VERSION_4_1_0_0 0x04010000
+#define LLCC_VERSION_6_0_0_0 0X06000000
/**
* struct llcc_slice_config - Data associated with the llcc slice
@@ -91,6 +103,20 @@
* @write_scid_en: Bit enables write cache support for a given scid.
* @write_scid_cacheable_en: Enables write cache cacheable support for a
* given scid (not supported on v2 or older hardware).
+ * @stale_en: Bit enables stale.
+ * @stale_cap_en: Bit enables stale only if current scid is over-cap.
+ * @mru_uncap_en: Roll-over on reserved cache ways if current scid is
+ * under-cap.
+ * @mru_rollover: Roll-over on reserved cache ways.
+ * @alloc_oneway_en: Allways allocate one way on over-cap even if there's no
+ * same-scid lines for replacement.
+ * @ovcap_en: Once current scid is over-capacity, allocate other over-cap SCID.
+ * @ovcap_prio: Once current scid is over-capacity, allocate other low priority
+ * over-cap scid. Depends on corresponding bit being set in
+ * ovcap_en.
+ * @vict_prio: When current scid is under-capacity, allocate over other
+ * lower-than victim priority-line threshold scid.
+ * @parent_slice_id: For grouped slices, specifies the slice id of the parent.
*/
struct llcc_slice_config {
u32 usecase_id;
@@ -115,245 +141,3612 @@ struct llcc_slice_config {
bool ovcap_en;
bool ovcap_prio;
bool vict_prio;
+ u32 parent_slice_id;
};
struct qcom_llcc_config {
const struct llcc_slice_config *sct_data;
const u32 *reg_offset;
const struct llcc_edac_reg_offset *edac_reg_offset;
+ u32 max_cap_shift; /* instead of ATTR1_MAX_CAP_SHIFT */
+ u32 num_banks;
int size;
- bool need_llcc_cfg;
+ bool skip_llcc_cfg;
bool no_edac;
+ bool irq_configured;
+ bool no_broadcast_register;
+};
+
+struct qcom_sct_config {
+ const struct qcom_llcc_config *llcc_config;
+ int num_config;
};
enum llcc_reg_offset {
LLCC_COMMON_HW_INFO,
LLCC_COMMON_STATUS0,
+ LLCC_TRP_ATTR0_CFG,
+ LLCC_TRP_ATTR1_CFG,
+ LLCC_TRP_ATTR2_CFG,
+ LLCC_TRP_ATTR3_CFG,
+ LLCC_TRP_SID_DIS_CAP_ALLOC,
+ LLCC_TRP_ALGO_STALE_EN,
+ LLCC_TRP_ALGO_STALE_CAP_EN,
+ LLCC_TRP_ALGO_MRU0,
+ LLCC_TRP_ALGO_MRU1,
+ LLCC_TRP_ALGO_ALLOC0,
+ LLCC_TRP_ALGO_ALLOC1,
+ LLCC_TRP_ALGO_ALLOC2,
+ LLCC_TRP_ALGO_ALLOC3,
+ LLCC_TRP_WRS_EN,
+ LLCC_TRP_WRS_CACHEABLE_EN,
+};
+
+static const struct llcc_slice_config ipq5424_data[] = {
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 768,
+ .priority = 1,
+ .bonus_ways = 0xFFFF,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ .write_scid_cacheable_en = true,
+ .stale_en = true,
+ .stale_cap_en = true,
+ .alloc_oneway_en = true,
+ .ovcap_en = true,
+ .ovcap_prio = true,
+ .vict_prio = true,
+ },
+ {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 256,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xF000,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ .write_scid_cacheable_en = true,
+ .stale_en = true,
+ .stale_cap_en = true,
+ },
+};
+
+static const struct llcc_slice_config kaanapali_data[] = {
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 5120,
+ .priority = 1,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ .stale_en = true,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 35,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 25,
+ .max_cap = 1024,
+ .priority = 5,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 34,
+ .max_cap = 4096,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 9,
+ .max_cap = 5632,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .write_scid_cacheable_en = true,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 18,
+ .max_cap = 768,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 7168,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .cache_mode = 2,
+ .stale_en = true,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_MDMHPFX,
+ .slice_id = 24,
+ .max_cap = 1024,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 27,
+ .max_cap = 256,
+ .priority = 5,
+ .bonus_ways = 0xfffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 8,
+ .max_cap = 800,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .ovcap_en = true,
+ .vict_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf0000000,
+ .mru_uncap_en = true,
+ .alloc_oneway_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_CVPFW,
+ .slice_id = 19,
+ .max_cap = 512,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CPUMTE,
+ .slice_id = 7,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_CMPTHCP,
+ .slice_id = 15,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_LCPDARE,
+ .slice_id = 30,
+ .max_cap = 128,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .mru_uncap_en = true,
+ .alloc_oneway_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_AENPU,
+ .slice_id = 3,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .cache_mode = 2,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_ISLAND1,
+ .slice_id = 12,
+ .max_cap = 7936,
+ .priority = 7,
+ .fixed_size = true,
+ .bonus_ways = 0x7fffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_DISP_WB,
+ .slice_id = 23,
+ .max_cap = 512,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_VIDVSP,
+ .slice_id = 4,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_VIDDEC,
+ .slice_id = 5,
+ .max_cap = 512,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .cache_mode = 2,
+ .mru_uncap_en = true,
+ .ovcap_en = true,
+ .vict_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMOFE,
+ .slice_id = 33,
+ .max_cap = 6144,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .mru_uncap_en = true,
+ .ovcap_en = true,
+ .vict_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMRTIP,
+ .slice_id = 13,
+ .max_cap = 6144,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .mru_uncap_en = true,
+ .ovcap_en = true,
+ .vict_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMRTRF,
+ .slice_id = 10,
+ .max_cap = 3584,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .mru_uncap_en = true,
+ .ovcap_en = true,
+ .vict_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMSRTRF,
+ .slice_id = 21,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .mru_uncap_en = true,
+ .ovcap_en = true,
+ .vict_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_VIDEO_APV,
+ .slice_id = 6,
+ .max_cap = 768,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_COMPUTE1,
+ .slice_id = 22,
+ .max_cap = 4096,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_CPUSS_OPP,
+ .slice_id = 32,
+ .max_cap = 0,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_CPUSSMPAM,
+ .slice_id = 17,
+ .max_cap = 2048,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ .stale_en = true,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_CAM_IPE_STROV,
+ .slice_id = 14,
+ .max_cap = 400,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .ovcap_en = true,
+ .vict_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAM_OFE_STROV,
+ .slice_id = 20,
+ .max_cap = 400,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .mru_uncap_en = true,
+ .ovcap_en = true,
+ .vict_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CPUSS_HEU,
+ .slice_id = 28,
+ .max_cap = 0,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0,
+ .mru_uncap_en = true,
+ .ovcap_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_MDM_PNG_FIXED,
+ .slice_id = 26,
+ .max_cap = 256,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xff000000,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ .mru_uncap_en = true,
+ .vict_prio = true,
+ },
+};
+
+static const struct llcc_slice_config sa8775p_data[] = {
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 2048,
+ .priority = 1,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CPUSS1,
+ .slice_id = 3,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CPUHWT,
+ .slice_id = 5,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 4096,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CMPTDMA,
+ .slice_id = 15,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 4096,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_VIDFW,
+ .slice_id = 17,
+ .max_cap = 3072,
+ .priority = 1,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 28,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 1024,
+ .priority = 3,
+ .fixed_size = true,
+ .res_ways = 0xf0,
+ .cache_mode = 1,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ },
+};
+
+static const struct llcc_slice_config sar1130p_data[] = {
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 4096,
+ .priority = 1,
+ .bonus_ways = 0x1fff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0x1fff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0x1fff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x1fff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 0,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x1fff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 3072,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0x1fff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x1fff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 12800,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x1fff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 28,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0x1fff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 26,
+ .max_cap = 2048,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0x0,
+ .res_ways = 0x3,
+ .cache_mode = true,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x1fff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_AENPU,
+ .slice_id = 30,
+ .max_cap = 3072,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0x1fff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_DISP_LEFT,
+ .slice_id = 17,
+ .max_cap = 0,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x0,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_DISP_RIGHT,
+ .slice_id = 18,
+ .max_cap = 0,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x0,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_EVCS_LEFT,
+ .slice_id = 22,
+ .max_cap = 0,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x0,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_EVCS_RIGHT,
+ .slice_id = 23,
+ .max_cap = 0,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x0,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ },
+};
+
+static const struct llcc_slice_config sar2130p_data[] = {
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = 0,
+ .bonus_ways = 0x3fffffff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 128,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0x3fffffff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0x3fffffff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3fffffff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 0,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3fffffff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 1536,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0x3fffffff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3fffffff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 0,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3fffffff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 26,
+ .max_cap = 2048,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0x0,
+ .res_ways = 0x3,
+ .cache_mode = true,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3fffffff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIEYE,
+ .slice_id = 7,
+ .max_cap = 7168,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0x3fffffff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_VIDPTH,
+ .slice_id = 8,
+ .max_cap = 7168,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0x3fffffff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUMV,
+ .slice_id = 9,
+ .max_cap = 2048,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0x3fffffff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_EVA_LEFT,
+ .slice_id = 20,
+ .max_cap = 7168,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0x3ffffffc,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_EVA_RIGHT,
+ .slice_id = 21,
+ .max_cap = 7168,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0x3ffffffc,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_EVAGAIN,
+ .slice_id = 25,
+ .max_cap = 1024,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0x3fffffff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AENPU,
+ .slice_id = 30,
+ .max_cap = 3072,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0x3fffffff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_VIPTH,
+ .slice_id = 29,
+ .max_cap = 1024,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0x3fffffff,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_DISP_LEFT,
+ .slice_id = 17,
+ .max_cap = 0,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x0,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_DISP_RIGHT,
+ .slice_id = 18,
+ .max_cap = 0,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x0,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_EVCS_LEFT,
+ .slice_id = 22,
+ .max_cap = 0,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x0,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_EVCS_RIGHT,
+ .slice_id = 23,
+ .max_cap = 0,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x0,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_SPAD,
+ .slice_id = 24,
+ .max_cap = 7168,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x0,
+ .res_ways = 0x0,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ },
};
static const struct llcc_slice_config sc7180_data[] = {
- { LLCC_CPUSS, 1, 256, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 1 },
- { LLCC_MDM, 8, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_GPUHTW, 11, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_GPU, 12, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 256,
+ .priority = 1,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MDM,
+ .slice_id = 8,
+ .max_cap = 128,
+ .priority = 1,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 128,
+ .priority = 1,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 128,
+ .priority = 1,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ },
};
static const struct llcc_slice_config sc7280_data[] = {
- { LLCC_CPUSS, 1, 768, 1, 0, 0x3f, 0x0, 0, 0, 0, 1, 1, 0},
- { LLCC_MDMHPGRW, 7, 512, 2, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
- { LLCC_CMPT, 10, 768, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
- { LLCC_GPUHTW, 11, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
- { LLCC_GPU, 12, 512, 1, 0, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
- { LLCC_MMUHWT, 13, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 0, 1, 0},
- { LLCC_MDMPNG, 21, 768, 0, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
- { LLCC_WLHW, 24, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
- { LLCC_MODPE, 29, 64, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 768,
+ .priority = 1,
+ .bonus_ways = 0x3f,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 7,
+ .max_cap = 512,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0x3f,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 768,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3f,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3f,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 512,
+ .priority = 1,
+ .bonus_ways = 0x3f,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3f,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 21,
+ .max_cap = 768,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0x3f,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WLHW,
+ .slice_id = 24,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3f,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 64,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3f,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ },
};
static const struct llcc_slice_config sc8180x_data[] = {
- { LLCC_CPUSS, 1, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1 },
- { LLCC_VIDSC0, 2, 512, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_VIDSC1, 3, 512, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MDMHPGRW, 7, 3072, 1, 1, 0x3ff, 0xc00, 0, 0, 0, 1, 0 },
- { LLCC_MDM, 8, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MODHW, 9, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_CMPT, 10, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_GPU, 12, 5120, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1 },
- { LLCC_CMPTDMA, 15, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_DISP, 16, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_VIDFW, 17, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MDMHPFX, 20, 1024, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MDMPNG, 21, 1024, 0, 1, 0xc, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_NPU, 23, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_WLHW, 24, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MODPE, 29, 512, 1, 1, 0xc, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_APTCM, 30, 512, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0 },
- { LLCC_WRCACHE, 31, 128, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0 },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_VIDSC1,
+ .slice_id = 3,
+ .max_cap = 512,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 7,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3ff,
+ .res_ways = 0xc00,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDM,
+ .slice_id = 8,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 9,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 5120,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CMPTDMA,
+ .slice_id = 15,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_VIDFW,
+ .slice_id = 17,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMHPFX,
+ .slice_id = 20,
+ .max_cap = 1024,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 21,
+ .max_cap = 1024,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0xc,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_NPU,
+ .slice_id = 23,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WLHW,
+ .slice_id = 24,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xc,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .res_ways = 0x1,
+ .cache_mode = 1,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 128,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ },
};
static const struct llcc_slice_config sc8280xp_data[] = {
- { LLCC_CPUSS, 1, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
- { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
- { LLCC_CMPT, 10, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
- { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_GPU, 12, 4096, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 1 },
- { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_DISP, 16, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_AUDHW, 22, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_DRE, 26, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0, 0 },
- { LLCC_WRCACHE, 31, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_CVPFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_CPUSS1, 3, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
-};
-
-static const struct llcc_slice_config sdm845_data[] = {
- { LLCC_CPUSS, 1, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 1 },
- { LLCC_VIDSC0, 2, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 },
- { LLCC_VIDSC1, 3, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 },
- { LLCC_ROTATOR, 4, 563, 2, 1, 0x0, 0x00e, 2, 0, 1, 1, 0 },
- { LLCC_VOICE, 5, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
- { LLCC_AUDIO, 6, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
- { LLCC_MDMHPGRW, 7, 1024, 2, 0, 0xfc, 0xf00, 0, 0, 1, 1, 0 },
- { LLCC_MDM, 8, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
- { LLCC_CMPT, 10, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
- { LLCC_GPUHTW, 11, 512, 1, 1, 0xc, 0x0, 0, 0, 1, 1, 0 },
- { LLCC_GPU, 12, 2304, 1, 0, 0xff0, 0x2, 0, 0, 1, 1, 0 },
- { LLCC_MMUHWT, 13, 256, 2, 0, 0x0, 0x1, 0, 0, 1, 0, 1 },
- { LLCC_CMPTDMA, 15, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
- { LLCC_DISP, 16, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
- { LLCC_VIDFW, 17, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
- { LLCC_MDMHPFX, 20, 1024, 2, 1, 0x0, 0xf00, 0, 0, 1, 1, 0 },
- { LLCC_MDMPNG, 21, 1024, 0, 1, 0x1e, 0x0, 0, 0, 1, 1, 0 },
- { LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 4096,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 2048,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_ECC,
+ .slice_id = 26,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 28,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 1024,
+ .priority = 3,
+ .fixed_size = true,
+ .res_ways = 0x1,
+ .cache_mode = 1,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CVPFW,
+ .slice_id = 17,
+ .max_cap = 512,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CPUSS1,
+ .slice_id = 3,
+ .max_cap = 2048,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CPUHWT,
+ .slice_id = 5,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ },
+};
+
+static const struct llcc_slice_config sdm845_data[] = {{
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 2816,
+ .priority = 1,
+ .bonus_ways = 0xffc,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 2,
+ .fixed_size = true,
+ .res_ways = 0xf0,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_VIDSC1,
+ .slice_id = 3,
+ .max_cap = 512,
+ .priority = 2,
+ .fixed_size = true,
+ .res_ways = 0xf0,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_ROTATOR,
+ .slice_id = 4,
+ .max_cap = 563,
+ .priority = 2,
+ .fixed_size = true,
+ .res_ways = 0xe,
+ .cache_mode = 2,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_VOICE,
+ .slice_id = 5,
+ .max_cap = 2816,
+ .priority = 1,
+ .bonus_ways = 0xffc,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 2816,
+ .priority = 1,
+ .bonus_ways = 0xffc,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 7,
+ .max_cap = 1024,
+ .priority = 2,
+ .bonus_ways = 0xfc,
+ .res_ways = 0xf00,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDM,
+ .slice_id = 8,
+ .max_cap = 2816,
+ .priority = 1,
+ .bonus_ways = 0xffc,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 2816,
+ .priority = 1,
+ .bonus_ways = 0xffc,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xc,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 2304,
+ .priority = 1,
+ .bonus_ways = 0xff0,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 256,
+ .priority = 2,
+ .res_ways = 0x1,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CMPTDMA,
+ .slice_id = 15,
+ .max_cap = 2816,
+ .priority = 1,
+ .bonus_ways = 0xffc,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 2816,
+ .priority = 1,
+ .bonus_ways = 0xffc,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_VIDFW,
+ .slice_id = 17,
+ .max_cap = 2816,
+ .priority = 1,
+ .bonus_ways = 0xffc,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMHPFX,
+ .slice_id = 20,
+ .max_cap = 1024,
+ .priority = 2,
+ .fixed_size = true,
+ .res_ways = 0xf00,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 21,
+ .max_cap = 1024,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0x1e,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffc,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ },
};
static const struct llcc_slice_config sm6350_data[] = {
- { LLCC_CPUSS, 1, 768, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 1 },
- { LLCC_MDM, 8, 512, 2, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_GPUHTW, 11, 256, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_GPU, 12, 512, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_MDMPNG, 21, 768, 0, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_NPU, 23, 768, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_MODPE, 29, 64, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 768,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_MDM,
+ .slice_id = 8,
+ .max_cap = 512,
+ .priority = 2,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 256,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 512,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 21,
+ .max_cap = 768,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_NPU,
+ .slice_id = 23,
+ .max_cap = 768,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 64,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ },
};
static const struct llcc_slice_config sm7150_data[] = {
- { LLCC_CPUSS, 1, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 1 },
- { LLCC_MDM, 8, 128, 2, 0, 0xF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_GPUHTW, 11, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_GPU, 12, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_NPU, 23, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 0 },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 512,
+ .priority = 1,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MDM,
+ .slice_id = 8,
+ .max_cap = 128,
+ .priority = 2,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_NPU,
+ .slice_id = 23,
+ .max_cap = 512,
+ .priority = 1,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ },
};
static const struct llcc_slice_config sm8150_data[] = {
- { LLCC_CPUSS, 1, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 1 },
- { LLCC_VIDSC0, 2, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_VIDSC1, 3, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_AUDIO, 6, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MDMHPGRW, 7, 3072, 1, 0, 0xFF, 0xF00, 0, 0, 0, 1, 0 },
- { LLCC_MDM, 8, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MODHW, 9, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_CMPT, 10, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_GPUHTW , 11, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_GPU, 12, 2560, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MMUHWT, 13, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1 },
- { LLCC_CMPTDMA, 15, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_DISP, 16, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MDMHPFX, 20, 1024, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MDMHPFX, 21, 1024, 0, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_AUDHW, 22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_NPU, 23, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_WLHW, 24, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MODPE, 29, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_APTCM, 30, 256, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0 },
- { LLCC_WRCACHE, 31, 128, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0 },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_VIDSC1,
+ .slice_id = 3,
+ .max_cap = 512,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 7,
+ .max_cap = 3072,
+ .priority = 1,
+ .bonus_ways = 0xff,
+ .res_ways = 0xf00,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDM,
+ .slice_id = 8,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 9,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 2560,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CMPTDMA,
+ .slice_id = 15,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMHPFX,
+ .slice_id = 20,
+ .max_cap = 1024,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMHPFX,
+ .slice_id = 21,
+ .max_cap = 1024,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_NPU,
+ .slice_id = 23,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WLHW,
+ .slice_id = 24,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .res_ways = 0x1,
+ .cache_mode = 1,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 128,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ },
};
static const struct llcc_slice_config sm8250_data[] = {
- { LLCC_CPUSS, 1, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
- { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_AUDIO, 6, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
- { LLCC_CMPT, 10, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
- { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_GPU, 12, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 1 },
- { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_CMPTDMA, 15, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_DISP, 16, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_VIDFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_NPU, 23, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_WLHW, 24, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_CVP, 28, 256, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_APTCM, 30, 128, 3, 0, 0x0, 0x3, 1, 0, 0, 1, 0, 0 },
- { LLCC_WRCACHE, 31, 256, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 1024,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 1024,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CMPTDMA,
+ .slice_id = 15,
+ .max_cap = 1024,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_VIDFW,
+ .slice_id = 17,
+ .max_cap = 512,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_NPU,
+ .slice_id = 23,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WLHW,
+ .slice_id = 24,
+ .max_cap = 1024,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 28,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 128,
+ .priority = 3,
+ .res_ways = 0x3,
+ .cache_mode = 1,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ },
};
static const struct llcc_slice_config sm8350_data[] = {
- { LLCC_CPUSS, 1, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 1 },
- { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
- { LLCC_MDMHPGRW, 7, 1024, 3, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_MODHW, 9, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_CMPT, 10, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_GPU, 12, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
- { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
- { LLCC_DISP, 16, 3072, 2, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_MDMPNG, 21, 1024, 0, 1, 0xf, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_MODPE, 29, 256, 1, 1, 0xf, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 0, 1, 0 },
- { LLCC_WRCACHE, 31, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
- { LLCC_CVPFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_CPUSS1, 3, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 7,
+ .max_cap = 1024,
+ .priority = 3,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 9,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 1024,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 3072,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 21,
+ .max_cap = 1024,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 28,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 1024,
+ .priority = 3,
+ .fixed_size = true,
+ .res_ways = 0x1,
+ .cache_mode = 1,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_CVPFW,
+ .slice_id = 17,
+ .max_cap = 512,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CPUSS1,
+ .slice_id = 3,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CPUHWT,
+ .slice_id = 5,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .write_scid_en = true,
+ },
};
static const struct llcc_slice_config sm8450_data[] = {
- {LLCC_CPUSS, 1, 3072, 1, 0, 0xFFFF, 0x0, 0, 0, 0, 1, 1, 0, 0 },
- {LLCC_VIDSC0, 2, 512, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_AUDIO, 6, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 },
- {LLCC_MDMHPGRW, 7, 1024, 3, 0, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_MODHW, 9, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_CMPT, 10, 4096, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_GPUHTW, 11, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_GPU, 12, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 1, 0 },
- {LLCC_MMUHWT, 13, 768, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0 },
- {LLCC_DISP, 16, 4096, 2, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_MDMPNG, 21, 1024, 1, 1, 0xF000, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 },
- {LLCC_CVP, 28, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_MODPE, 29, 64, 1, 1, 0xF000, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0xF0, 1, 0, 0, 1, 0, 0, 0 },
- {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0 },
- {LLCC_CVPFW, 17, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_CPUSS1, 3, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_CAMEXP0, 4, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_CPUMTE, 23, 256, 1, 1, 0x0FFF, 0x0, 0, 0, 0, 0, 1, 0, 0 },
- {LLCC_CPUHWT, 5, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 1, 0, 0 },
- {LLCC_CAMEXP1, 27, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_AENPU, 8, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 3072,
+ .priority = 1,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 7,
+ .max_cap = 1024,
+ .priority = 3,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 9,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 4096,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 2048,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 768,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 4096,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 21,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf000,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 28,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 64,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf000,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 1024,
+ .priority = 3,
+ .fixed_size = true,
+ .res_ways = 0xf0,
+ .cache_mode = 1,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CVPFW,
+ .slice_id = 17,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CPUSS1,
+ .slice_id = 3,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CAMEXP0,
+ .slice_id = 4,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CPUMTE,
+ .slice_id = 23,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CPUHWT,
+ .slice_id = 5,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CAMEXP1,
+ .slice_id = 27,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AENPU,
+ .slice_id = 8,
+ .max_cap = 2048,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ },
};
static const struct llcc_slice_config sm8550_data[] = {
- {LLCC_CPUSS, 1, 5120, 1, 0, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_VIDSC0, 2, 512, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_AUDIO, 6, 1024, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_MDMHPGRW, 25, 1024, 4, 0, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_MODHW, 26, 1024, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_CMPT, 10, 4096, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_GPUHTW, 11, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_GPU, 9, 3096, 1, 0, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_MMUHWT, 18, 768, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_DISP, 16, 6144, 1, 1, 0xFFFFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_MDMPNG, 27, 1024, 0, 1, 0xF00000, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_CVP, 8, 256, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_MODPE, 29, 64, 1, 1, 0xF00000, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, },
- {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_CAMEXP0, 4, 256, 4, 1, 0xF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_CPUHWT, 5, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_CAMEXP1, 7, 3200, 3, 1, 0xFFFFF0, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_CMPTHCP, 17, 256, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_LCPDARE, 30, 128, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, },
- {LLCC_AENPU, 3, 3072, 1, 1, 0xFE01FF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_ISLAND1, 12, 1792, 7, 1, 0xFE00, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_ISLAND4, 15, 256, 7, 1, 0x10000, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_CAMEXP2, 19, 3200, 3, 1, 0xFFFFF0, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_CAMEXP3, 20, 3200, 2, 1, 0xFFFFF0, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_CAMEXP4, 21, 3200, 2, 1, 0xFFFFF0, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_DISP_WB, 23, 1024, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_DISP_1, 24, 6144, 1, 1, 0xFFFFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_VIDVSP, 28, 256, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 5120,
+ .priority = 1,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 25,
+ .max_cap = 1024,
+ .priority = 4,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 26,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 4096,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 9,
+ .max_cap = 3096,
+ .priority = 1,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .write_scid_en = true,
+ .write_scid_cacheable_en = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 18,
+ .max_cap = 768,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 27,
+ .max_cap = 1024,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0xf00000,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 8,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 64,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf00000,
+ .cache_mode = 0,
+ .alloc_oneway_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CAMEXP0,
+ .slice_id = 4,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CPUHWT,
+ .slice_id = 5,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CAMEXP1,
+ .slice_id = 7,
+ .max_cap = 3200,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfffff0,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_CMPTHCP,
+ .slice_id = 17,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_LCPDARE,
+ .slice_id = 30,
+ .max_cap = 128,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ .alloc_oneway_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_AENPU,
+ .slice_id = 3,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfe01ff,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_ISLAND1,
+ .slice_id = 12,
+ .max_cap = 1792,
+ .priority = 7,
+ .fixed_size = true,
+ .bonus_ways = 0xfe00,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_ISLAND4,
+ .slice_id = 15,
+ .max_cap = 256,
+ .priority = 7,
+ .fixed_size = true,
+ .bonus_ways = 0x10000,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CAMEXP2,
+ .slice_id = 19,
+ .max_cap = 3200,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfffff0,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_CAMEXP3,
+ .slice_id = 20,
+ .max_cap = 3200,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xfffff0,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_CAMEXP4,
+ .slice_id = 21,
+ .max_cap = 3200,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xfffff0,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_DISP_WB,
+ .slice_id = 23,
+ .max_cap = 1024,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_DISP_1,
+ .slice_id = 24,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_VIDVSP,
+ .slice_id = 28,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ },
+};
+
+static const struct llcc_slice_config sm8650_data[] = {
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 5120,
+ .priority = 1,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ .stale_en = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 25,
+ .max_cap = 1024,
+ .priority = 3,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 26,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 4096,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 9,
+ .max_cap = 3096,
+ .priority = 1,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .write_scid_en = true,
+ .write_scid_cacheable_en = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 18,
+ .max_cap = 768,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_MDMHPFX,
+ .slice_id = 24,
+ .max_cap = 1024,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 27,
+ .max_cap = 1024,
+ .priority = 0,
+ .fixed_size = true,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 8,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 128,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf00000,
+ .cache_mode = 0,
+ .alloc_oneway_en = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CAMEXP0,
+ .slice_id = 4,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CAMEXP1,
+ .slice_id = 7,
+ .max_cap = 3200,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfffff0,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_CMPTHCP,
+ .slice_id = 17,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_LCPDARE,
+ .slice_id = 30,
+ .max_cap = 128,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ .alloc_oneway_en = true,
+ }, {
+ .usecase_id = LLCC_AENPU,
+ .slice_id = 3,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_ISLAND1,
+ .slice_id = 12,
+ .max_cap = 5888,
+ .priority = 7,
+ .fixed_size = true,
+ .res_ways = 0x7fffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_DISP_WB,
+ .slice_id = 23,
+ .max_cap = 1024,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_VIDVSP,
+ .slice_id = 28,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ },
+};
+
+static const struct llcc_slice_config sm8750_data[] = {
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 5120,
+ .priority = 1,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_MDMHPFX,
+ .slice_id = 24,
+ .max_cap = 1024,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 35,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 25,
+ .max_cap = 1024,
+ .priority = 5,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 26,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 34,
+ .max_cap = 4096,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 9,
+ .max_cap = 5632,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .write_scid_en = true,
+ .write_scid_cacheable_en = true
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 18,
+ .max_cap = 768,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 7168,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .cache_mode = 2,
+ .stale_en = true,
+ }, {
+ .usecase_id = LLCC_VIDFW,
+ .slice_id = 17,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_CAMFW,
+ .slice_id = 20,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 27,
+ .max_cap = 256,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xf0000000,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 8,
+ .max_cap = 800,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf0000000,
+ .alloc_oneway_en = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CVPFW,
+ .slice_id = 19,
+ .max_cap = 64,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_CMPTHCP,
+ .slice_id = 15,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_LCPDARE,
+ .slice_id = 30,
+ .max_cap = 128,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .alloc_oneway_en = true,
+ }, {
+ .usecase_id = LLCC_AENPU,
+ .slice_id = 3,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_ISLAND1,
+ .slice_id = 12,
+ .max_cap = 7936,
+ .priority = 7,
+ .fixed_size = true,
+ .bonus_ways = 0x7fffffff,
+ }, {
+ .usecase_id = LLCC_DISP_WB,
+ .slice_id = 23,
+ .max_cap = 512,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_VIDVSP,
+ .slice_id = 4,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_VIDDEC,
+ .slice_id = 5,
+ .max_cap = 6144,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .cache_mode = 2,
+ .ovcap_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMOFE,
+ .slice_id = 33,
+ .max_cap = 6144,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .ovcap_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMRTIP,
+ .slice_id = 13,
+ .max_cap = 1024,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .ovcap_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMSRTIP,
+ .slice_id = 14,
+ .max_cap = 6144,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .ovcap_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMRTRF,
+ .slice_id = 7,
+ .max_cap = 3584,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .ovcap_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMSRTRF,
+ .slice_id = 21,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .ovcap_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CPUSSMPAM,
+ .slice_id = 6,
+ .max_cap = 2048,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ },
+};
+
+static const struct llcc_slice_config qcs615_data[] = {
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 128,
+ .priority = 1,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_MDM,
+ .slice_id = 8,
+ .max_cap = 256,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 128,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 128,
+ .priority = 1,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ },
+};
+
+static const struct llcc_slice_config qcs8300_data[] = {
+ {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 128,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 128,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_ECC,
+ .slice_id = 26,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 128,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ },
+};
+
+static const struct llcc_slice_config qdu1000_data_2ch[] = {
+ {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 7,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 9,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 21,
+ .max_cap = 256,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0x3,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_ECC,
+ .slice_id = 26,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffc,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .res_ways = 0xc,
+ .cache_mode = 1,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 128,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ },
+};
+
+static const struct llcc_slice_config qdu1000_data_4ch[] = {
+ {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 7,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 9,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 21,
+ .max_cap = 512,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0x3,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_ECC,
+ .slice_id = 26,
+ .max_cap = 1024,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffc,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .res_ways = 0xc,
+ .cache_mode = 1,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ },
+};
+
+static const struct llcc_slice_config qdu1000_data_8ch[] = {
+ {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 7,
+ .max_cap = 2048,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 9,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 21,
+ .max_cap = 1024,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0x3,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_ECC,
+ .slice_id = 26,
+ .max_cap = 2048,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffc,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 1024,
+ .priority = 3,
+ .fixed_size = true,
+ .res_ways = 0xc,
+ .cache_mode = 1,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ },
+};
+
+static const struct llcc_slice_config x1e80100_data[] = {
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 9,
+ .max_cap = 4608,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .write_scid_en = true,
+ .write_scid_cacheable_en = true,
+ .stale_en = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 18,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 8,
+ .max_cap = 512,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CAMEXP0,
+ .slice_id = 4,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0x3,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CAMEXP1,
+ .slice_id = 7,
+ .max_cap = 3072,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffc,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_LCPDARE,
+ .slice_id = 30,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ .alloc_oneway_en = true,
+ }, {
+ .usecase_id = LLCC_AENPU,
+ .slice_id = 3,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_ISLAND1,
+ .slice_id = 12,
+ .max_cap = 2048,
+ .priority = 7,
+ .fixed_size = true,
+ .res_ways = 0xf,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CAMEXP2,
+ .slice_id = 19,
+ .max_cap = 3072,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffc,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_CAMEXP3,
+ .slice_id = 20,
+ .max_cap = 3072,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xffc,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_CAMEXP4,
+ .slice_id = 21,
+ .max_cap = 3072,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xffc,
+ .cache_mode = 2,
+ },
};
static const struct llcc_edac_reg_offset llcc_v1_edac_reg_offset = {
@@ -410,6 +3803,33 @@ static const struct llcc_edac_reg_offset llcc_v2_1_edac_reg_offset = {
.drp_ecc_db_err_syn0 = 0x52120,
};
+static const struct llcc_edac_reg_offset llcc_v6_edac_reg_offset = {
+ .trp_ecc_error_status0 = 0x47448,
+ .trp_ecc_error_status1 = 0x47450,
+ .trp_ecc_sb_err_syn0 = 0x47490,
+ .trp_ecc_db_err_syn0 = 0x474d0,
+ .trp_ecc_error_cntr_clear = 0x47444,
+ .trp_interrupt_0_status = 0x47600,
+ .trp_interrupt_0_clear = 0x47604,
+ .trp_interrupt_0_enable = 0x47608,
+
+ /* LLCC Common registers */
+ .cmn_status0 = 0x6400c,
+ .cmn_interrupt_0_enable = 0x6401c,
+ .cmn_interrupt_2_enable = 0x6403c,
+
+ /* LLCC DRP registers */
+ .drp_ecc_error_cfg = 0x80000,
+ .drp_ecc_error_cntr_clear = 0x80004,
+ .drp_interrupt_status = 0x80020,
+ .drp_interrupt_clear = 0x80028,
+ .drp_interrupt_enable = 0x8002c,
+ .drp_ecc_error_status0 = 0x820f4,
+ .drp_ecc_error_status1 = 0x820f8,
+ .drp_ecc_sb_err_syn0 = 0x820fc,
+ .drp_ecc_db_err_syn0 = 0x82120,
+};
+
/* LLCC register offset starting from v1.0.0 */
static const u32 llcc_v1_reg_offset[] = {
[LLCC_COMMON_HW_INFO] = 0x00030000,
@@ -422,101 +3842,375 @@ static const u32 llcc_v2_1_reg_offset[] = {
[LLCC_COMMON_STATUS0] = 0x0003400c,
};
-static const struct qcom_llcc_config sc7180_cfg = {
- .sct_data = sc7180_data,
- .size = ARRAY_SIZE(sc7180_data),
- .need_llcc_cfg = true,
- .reg_offset = llcc_v1_reg_offset,
- .edac_reg_offset = &llcc_v1_edac_reg_offset,
+/* LLCC register offset starting from v6.0.0 */
+static const u32 llcc_v6_reg_offset[] = {
+ [LLCC_COMMON_HW_INFO] = 0x00064000,
+ [LLCC_COMMON_STATUS0] = 0x0006400c,
+ [LLCC_TRP_ATTR0_CFG] = 0x00041000,
+ [LLCC_TRP_ATTR1_CFG] = 0x00041008,
+ [LLCC_TRP_ATTR2_CFG] = 0x00041010,
+ [LLCC_TRP_ATTR3_CFG] = 0x00041014,
+ [LLCC_TRP_SID_DIS_CAP_ALLOC] = 0x00042000,
+ [LLCC_TRP_ALGO_STALE_EN] = 0x00042008,
+ [LLCC_TRP_ALGO_STALE_CAP_EN] = 0x00042010,
+ [LLCC_TRP_ALGO_MRU0] = 0x00042018,
+ [LLCC_TRP_ALGO_MRU1] = 0x00042020,
+ [LLCC_TRP_ALGO_ALLOC0] = 0x00042028,
+ [LLCC_TRP_ALGO_ALLOC1] = 0x00042030,
+ [LLCC_TRP_ALGO_ALLOC2] = 0x00042038,
+ [LLCC_TRP_ALGO_ALLOC3] = 0x00042040,
+ [LLCC_TRP_WRS_EN] = 0x00042080,
+ [LLCC_TRP_WRS_CACHEABLE_EN] = 0x00042088,
+};
+
+static const struct qcom_llcc_config kaanapali_cfg[] = {
+ {
+ .sct_data = kaanapali_data,
+ .size = ARRAY_SIZE(kaanapali_data),
+ .reg_offset = llcc_v6_reg_offset,
+ .edac_reg_offset = &llcc_v6_edac_reg_offset,
+ },
+};
+
+static const struct qcom_llcc_config qcs615_cfg[] = {
+ {
+ .sct_data = qcs615_data,
+ .size = ARRAY_SIZE(qcs615_data),
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+ },
+};
+
+static const struct qcom_llcc_config qcs8300_cfg[] = {
+ {
+ .sct_data = qcs8300_data,
+ .size = ARRAY_SIZE(qcs8300_data),
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ .num_banks = 4,
+ },
+};
+
+static const struct qcom_llcc_config qdu1000_cfg[] = {
+ {
+ .sct_data = qdu1000_data_8ch,
+ .size = ARRAY_SIZE(qdu1000_data_8ch),
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ },
+ {
+ .sct_data = qdu1000_data_4ch,
+ .size = ARRAY_SIZE(qdu1000_data_4ch),
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ },
+ {
+ .sct_data = qdu1000_data_4ch,
+ .size = ARRAY_SIZE(qdu1000_data_4ch),
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ },
+ {
+ .sct_data = qdu1000_data_2ch,
+ .size = ARRAY_SIZE(qdu1000_data_2ch),
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ },
+};
+
+static const struct qcom_llcc_config ipq5424_cfg[] = {
+ {
+ .sct_data = ipq5424_data,
+ .size = ARRAY_SIZE(ipq5424_data),
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ .no_broadcast_register = true,
+ },
+};
+
+static const struct qcom_llcc_config sa8775p_cfg[] = {
+ {
+ .sct_data = sa8775p_data,
+ .size = ARRAY_SIZE(sa8775p_data),
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ },
+};
+
+static const struct qcom_llcc_config sar1130p_cfg[] = {
+ {
+ .sct_data = sar1130p_data,
+ .size = ARRAY_SIZE(sar1130p_data),
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ .max_cap_shift = 14,
+ .num_banks = 2,
+ },
+};
+
+static const struct qcom_llcc_config sar2130p_cfg[] = {
+ {
+ .sct_data = sar2130p_data,
+ .size = ARRAY_SIZE(sar2130p_data),
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ .max_cap_shift = 14,
+ .num_banks = 2,
+ },
+};
+
+static const struct qcom_llcc_config sc7180_cfg[] = {
+ {
+ .sct_data = sc7180_data,
+ .size = ARRAY_SIZE(sc7180_data),
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+ },
+};
+
+static const struct qcom_llcc_config sc7280_cfg[] = {
+ {
+ .sct_data = sc7280_data,
+ .size = ARRAY_SIZE(sc7280_data),
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+ },
+};
+
+static const struct qcom_llcc_config sc8180x_cfg[] = {
+ {
+ .sct_data = sc8180x_data,
+ .size = ARRAY_SIZE(sc8180x_data),
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+ },
+};
+
+static const struct qcom_llcc_config sc8280xp_cfg[] = {
+ {
+ .sct_data = sc8280xp_data,
+ .size = ARRAY_SIZE(sc8280xp_data),
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+ },
+};
+
+static const struct qcom_llcc_config sdm845_cfg[] = {
+ {
+ .sct_data = sdm845_data,
+ .size = ARRAY_SIZE(sdm845_data),
+ .skip_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+ .no_edac = true,
+ },
+};
+
+static const struct qcom_llcc_config sm6350_cfg[] = {
+ {
+ .sct_data = sm6350_data,
+ .size = ARRAY_SIZE(sm6350_data),
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+ },
+};
+
+static const struct qcom_llcc_config sm7150_cfg[] = {
+ {
+ .sct_data = sm7150_data,
+ .size = ARRAY_SIZE(sm7150_data),
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+ },
+};
+
+static const struct qcom_llcc_config sm8150_cfg[] = {
+ {
+ .sct_data = sm8150_data,
+ .size = ARRAY_SIZE(sm8150_data),
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+ },
+};
+
+static const struct qcom_llcc_config sm8250_cfg[] = {
+ {
+ .sct_data = sm8250_data,
+ .size = ARRAY_SIZE(sm8250_data),
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+ },
+};
+
+static const struct qcom_llcc_config sm8350_cfg[] = {
+ {
+ .sct_data = sm8350_data,
+ .size = ARRAY_SIZE(sm8350_data),
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+ },
+};
+
+static const struct qcom_llcc_config sm8450_cfg[] = {
+ {
+ .sct_data = sm8450_data,
+ .size = ARRAY_SIZE(sm8450_data),
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ },
+};
+
+static const struct qcom_llcc_config sm8550_cfg[] = {
+ {
+ .sct_data = sm8550_data,
+ .size = ARRAY_SIZE(sm8550_data),
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ },
+};
+
+static const struct qcom_llcc_config sm8650_cfg[] = {
+ {
+ .sct_data = sm8650_data,
+ .size = ARRAY_SIZE(sm8650_data),
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ },
+};
+
+static const struct qcom_llcc_config sm8750_cfg[] = {
+ {
+ .sct_data = sm8750_data,
+ .size = ARRAY_SIZE(sm8750_data),
+ .skip_llcc_cfg = false,
+ .reg_offset = llcc_v6_reg_offset,
+ .edac_reg_offset = &llcc_v6_edac_reg_offset,
+ },
+};
+
+static const struct qcom_llcc_config x1e80100_cfg[] = {
+ {
+ .sct_data = x1e80100_data,
+ .size = ARRAY_SIZE(x1e80100_data),
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ .irq_configured = true,
+ },
+};
+
+static const struct qcom_sct_config kaanapali_cfgs = {
+ .llcc_config = kaanapali_cfg,
+ .num_config = ARRAY_SIZE(kaanapali_cfg),
+};
+
+static const struct qcom_sct_config qcs615_cfgs = {
+ .llcc_config = qcs615_cfg,
+ .num_config = ARRAY_SIZE(qcs615_cfg),
+};
+
+static const struct qcom_sct_config qcs8300_cfgs = {
+ .llcc_config = qcs8300_cfg,
+ .num_config = ARRAY_SIZE(qcs8300_cfg),
+};
+
+static const struct qcom_sct_config qdu1000_cfgs = {
+ .llcc_config = qdu1000_cfg,
+ .num_config = ARRAY_SIZE(qdu1000_cfg),
+};
+
+static const struct qcom_sct_config ipq5424_cfgs = {
+ .llcc_config = ipq5424_cfg,
+ .num_config = ARRAY_SIZE(ipq5424_cfg),
+};
+
+static const struct qcom_sct_config sa8775p_cfgs = {
+ .llcc_config = sa8775p_cfg,
+ .num_config = ARRAY_SIZE(sa8775p_cfg),
};
-static const struct qcom_llcc_config sc7280_cfg = {
- .sct_data = sc7280_data,
- .size = ARRAY_SIZE(sc7280_data),
- .need_llcc_cfg = true,
- .reg_offset = llcc_v1_reg_offset,
- .edac_reg_offset = &llcc_v1_edac_reg_offset,
+static const struct qcom_sct_config sar1130p_cfgs = {
+ .llcc_config = sar1130p_cfg,
+ .num_config = ARRAY_SIZE(sar1130p_cfg),
};
-static const struct qcom_llcc_config sc8180x_cfg = {
- .sct_data = sc8180x_data,
- .size = ARRAY_SIZE(sc8180x_data),
- .need_llcc_cfg = true,
- .reg_offset = llcc_v1_reg_offset,
- .edac_reg_offset = &llcc_v1_edac_reg_offset,
+static const struct qcom_sct_config sar2130p_cfgs = {
+ .llcc_config = sar2130p_cfg,
+ .num_config = ARRAY_SIZE(sar2130p_cfg),
};
-static const struct qcom_llcc_config sc8280xp_cfg = {
- .sct_data = sc8280xp_data,
- .size = ARRAY_SIZE(sc8280xp_data),
- .need_llcc_cfg = true,
- .reg_offset = llcc_v1_reg_offset,
- .edac_reg_offset = &llcc_v1_edac_reg_offset,
+static const struct qcom_sct_config sc7180_cfgs = {
+ .llcc_config = sc7180_cfg,
+ .num_config = ARRAY_SIZE(sc7180_cfg),
};
-static const struct qcom_llcc_config sdm845_cfg = {
- .sct_data = sdm845_data,
- .size = ARRAY_SIZE(sdm845_data),
- .need_llcc_cfg = false,
- .reg_offset = llcc_v1_reg_offset,
- .edac_reg_offset = &llcc_v1_edac_reg_offset,
- .no_edac = true,
+static const struct qcom_sct_config sc7280_cfgs = {
+ .llcc_config = sc7280_cfg,
+ .num_config = ARRAY_SIZE(sc7280_cfg),
};
-static const struct qcom_llcc_config sm6350_cfg = {
- .sct_data = sm6350_data,
- .size = ARRAY_SIZE(sm6350_data),
- .need_llcc_cfg = true,
- .reg_offset = llcc_v1_reg_offset,
- .edac_reg_offset = &llcc_v1_edac_reg_offset,
+static const struct qcom_sct_config sc8180x_cfgs = {
+ .llcc_config = sc8180x_cfg,
+ .num_config = ARRAY_SIZE(sc8180x_cfg),
};
-static const struct qcom_llcc_config sm7150_cfg = {
- .sct_data = sm7150_data,
- .size = ARRAY_SIZE(sm7150_data),
- .need_llcc_cfg = true,
- .reg_offset = llcc_v1_reg_offset,
- .edac_reg_offset = &llcc_v1_edac_reg_offset,
+static const struct qcom_sct_config sc8280xp_cfgs = {
+ .llcc_config = sc8280xp_cfg,
+ .num_config = ARRAY_SIZE(sc8280xp_cfg),
};
-static const struct qcom_llcc_config sm8150_cfg = {
- .sct_data = sm8150_data,
- .size = ARRAY_SIZE(sm8150_data),
- .need_llcc_cfg = true,
- .reg_offset = llcc_v1_reg_offset,
- .edac_reg_offset = &llcc_v1_edac_reg_offset,
+static const struct qcom_sct_config sdm845_cfgs = {
+ .llcc_config = sdm845_cfg,
+ .num_config = ARRAY_SIZE(sdm845_cfg),
};
-static const struct qcom_llcc_config sm8250_cfg = {
- .sct_data = sm8250_data,
- .size = ARRAY_SIZE(sm8250_data),
- .need_llcc_cfg = true,
- .reg_offset = llcc_v1_reg_offset,
- .edac_reg_offset = &llcc_v1_edac_reg_offset,
+static const struct qcom_sct_config sm6350_cfgs = {
+ .llcc_config = sm6350_cfg,
+ .num_config = ARRAY_SIZE(sm6350_cfg),
};
-static const struct qcom_llcc_config sm8350_cfg = {
- .sct_data = sm8350_data,
- .size = ARRAY_SIZE(sm8350_data),
- .need_llcc_cfg = true,
- .reg_offset = llcc_v1_reg_offset,
- .edac_reg_offset = &llcc_v1_edac_reg_offset,
+static const struct qcom_sct_config sm7150_cfgs = {
+ .llcc_config = sm7150_cfg,
+ .num_config = ARRAY_SIZE(sm7150_cfg),
};
-static const struct qcom_llcc_config sm8450_cfg = {
- .sct_data = sm8450_data,
- .size = ARRAY_SIZE(sm8450_data),
- .need_llcc_cfg = true,
- .reg_offset = llcc_v2_1_reg_offset,
- .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+static const struct qcom_sct_config sm8150_cfgs = {
+ .llcc_config = sm8150_cfg,
+ .num_config = ARRAY_SIZE(sm8150_cfg),
};
-static const struct qcom_llcc_config sm8550_cfg = {
- .sct_data = sm8550_data,
- .size = ARRAY_SIZE(sm8550_data),
- .need_llcc_cfg = true,
- .reg_offset = llcc_v2_1_reg_offset,
- .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+static const struct qcom_sct_config sm8250_cfgs = {
+ .llcc_config = sm8250_cfg,
+ .num_config = ARRAY_SIZE(sm8250_cfg),
+};
+
+static const struct qcom_sct_config sm8350_cfgs = {
+ .llcc_config = sm8350_cfg,
+ .num_config = ARRAY_SIZE(sm8350_cfg),
+};
+
+static const struct qcom_sct_config sm8450_cfgs = {
+ .llcc_config = sm8450_cfg,
+ .num_config = ARRAY_SIZE(sm8450_cfg),
+};
+
+static const struct qcom_sct_config sm8550_cfgs = {
+ .llcc_config = sm8550_cfg,
+ .num_config = ARRAY_SIZE(sm8550_cfg),
+};
+
+static const struct qcom_sct_config sm8650_cfgs = {
+ .llcc_config = sm8650_cfg,
+ .num_config = ARRAY_SIZE(sm8650_cfg),
+};
+
+static const struct qcom_sct_config sm8750_cfgs = {
+ .llcc_config = sm8750_cfg,
+ .num_config = ARRAY_SIZE(sm8750_cfg),
+};
+
+static const struct qcom_sct_config x1e80100_cfgs = {
+ .llcc_config = x1e80100_cfg,
+ .num_config = ARRAY_SIZE(x1e80100_cfg),
};
static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
@@ -559,7 +4253,7 @@ struct llcc_slice_desc *llcc_slice_getd(u32 uid)
EXPORT_SYMBOL_GPL(llcc_slice_getd);
/**
- * llcc_slice_putd - llcc slice descritpor
+ * llcc_slice_putd - llcc slice descriptor
* @desc: Pointer to llcc slice descriptor
*/
void llcc_slice_putd(struct llcc_slice_desc *desc)
@@ -572,6 +4266,7 @@ EXPORT_SYMBOL_GPL(llcc_slice_putd);
static int llcc_update_act_ctrl(u32 sid,
u32 act_ctrl_reg_val, u32 status)
{
+ struct regmap *regmap;
u32 act_ctrl_reg;
u32 act_clear_reg;
u32 status_reg;
@@ -600,7 +4295,8 @@ static int llcc_update_act_ctrl(u32 sid,
return ret;
if (drv_data->version >= LLCC_VERSION_4_1_0_0) {
- ret = regmap_read_poll_timeout(drv_data->bcast_regmap, status_reg,
+ regmap = drv_data->bcast_and_regmap ?: drv_data->bcast_regmap;
+ ret = regmap_read_poll_timeout(regmap, status_reg,
slice_status, (slice_status & ACT_COMPLETE),
0, LLCC_STATUS_READ_DELAY);
if (ret)
@@ -610,6 +4306,8 @@ static int llcc_update_act_ctrl(u32 sid,
ret = regmap_read_poll_timeout(drv_data->bcast_regmap, status_reg,
slice_status, !(slice_status & status),
0, LLCC_STATUS_READ_DELAY);
+ if (ret)
+ return ret;
if (drv_data->version >= LLCC_VERSION_4_1_0_0)
ret = regmap_write(drv_data->bcast_regmap, act_clear_reg,
@@ -752,7 +4450,10 @@ static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config,
*/
max_cap_cacheline = max_cap_cacheline / drv_data->num_banks;
max_cap_cacheline >>= CACHE_LINE_SIZE_SHIFT;
- attr1_val |= max_cap_cacheline << ATTR1_MAX_CAP_SHIFT;
+ if (cfg->max_cap_shift)
+ attr1_val |= max_cap_cacheline << cfg->max_cap_shift;
+ else
+ attr1_val |= max_cap_cacheline << ATTR1_MAX_CAP_SHIFT;
attr1_cfg = LLCC_TRP_ATTR1_CFGn(config->slice_id);
@@ -781,19 +4482,20 @@ static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config,
return ret;
}
- if (cfg->need_llcc_cfg) {
+ /* At least SDM845 disallows non-secure writes to these registers */
+ if (!cfg->skip_llcc_cfg) {
u32 disable_cap_alloc, retain_pc;
disable_cap_alloc = config->dis_cap_alloc << config->slice_id;
- ret = regmap_write(drv_data->bcast_regmap,
- LLCC_TRP_SCID_DIS_CAP_ALLOC, disable_cap_alloc);
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_SCID_DIS_CAP_ALLOC,
+ BIT(config->slice_id), disable_cap_alloc);
if (ret)
return ret;
if (drv_data->version < LLCC_VERSION_4_1_0_0) {
retain_pc = config->retain_on_pc << config->slice_id;
- ret = regmap_write(drv_data->bcast_regmap,
- LLCC_TRP_PCB_ACT, retain_pc);
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_PCB_ACT,
+ BIT(config->slice_id), retain_pc);
if (ret)
return ret;
}
@@ -886,6 +4588,139 @@ static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config,
return ret;
}
+static int _qcom_llcc_cfg_program_v6(const struct llcc_slice_config *config,
+ const struct qcom_llcc_config *cfg)
+{
+ u32 stale_en, stale_cap_en, mru_uncap_en, mru_rollover;
+ u32 alloc_oneway_en, ovcap_en, ovcap_prio, vict_prio;
+ u32 attr0_cfg, attr1_cfg, attr2_cfg, attr3_cfg;
+ u32 attr0_val, attr1_val, attr2_val, attr3_val;
+ u32 slice_offset, reg_offset;
+ struct llcc_slice_desc *desc;
+ u32 wren, wr_cache_en;
+ int ret;
+
+ attr0_cfg = LLCC_V6_TRP_ATTR0_CFGn(config->slice_id);
+ attr1_cfg = LLCC_V6_TRP_ATTR1_CFGn(config->slice_id);
+ attr2_cfg = LLCC_V6_TRP_ATTR2_CFGn(config->slice_id);
+ attr3_cfg = LLCC_V6_TRP_ATTR3_CFGn(config->slice_id);
+
+ attr0_val = config->res_ways;
+ attr1_val = config->bonus_ways;
+ attr2_val = config->cache_mode;
+ attr2_val |= FIELD_PREP(ATTR2_PROBE_TARGET_WAYS_MASK, config->probe_target_ways);
+ attr2_val |= FIELD_PREP(ATTR2_FIXED_SIZE_MASK, config->fixed_size);
+ attr2_val |= FIELD_PREP(ATTR2_PRIORITY_MASK, config->priority);
+
+ if (config->parent_slice_id && config->fixed_size) {
+ attr2_val |= FIELD_PREP(ATTR2_PARENT_SCID_MASK, config->parent_slice_id);
+ attr2_val |= ATTR2_IN_A_GROUP_MASK;
+ }
+
+ attr3_val = MAX_CAP_TO_BYTES(config->max_cap);
+ attr3_val /= drv_data->num_banks;
+ attr3_val >>= CACHE_LINE_SIZE_SHIFT;
+
+ ret = regmap_write(drv_data->bcast_regmap, attr0_cfg, attr0_val);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(drv_data->bcast_regmap, attr1_cfg, attr1_val);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(drv_data->bcast_regmap, attr2_cfg, attr2_val);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(drv_data->bcast_regmap, attr3_cfg, attr3_val);
+ if (ret)
+ return ret;
+
+ slice_offset = config->slice_id % 32;
+ reg_offset = (config->slice_id / 32) * 4;
+
+ wren = config->write_scid_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_WRS_EN] + reg_offset,
+ BIT(slice_offset), wren);
+ if (ret)
+ return ret;
+
+ wr_cache_en = config->write_scid_cacheable_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_WRS_CACHEABLE_EN] + reg_offset,
+ BIT(slice_offset), wr_cache_en);
+ if (ret)
+ return ret;
+
+ stale_en = config->stale_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_STALE_EN] + reg_offset,
+ BIT(slice_offset), stale_en);
+ if (ret)
+ return ret;
+
+ stale_cap_en = config->stale_cap_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_STALE_CAP_EN] + reg_offset,
+ BIT(slice_offset), stale_cap_en);
+ if (ret)
+ return ret;
+
+ mru_uncap_en = config->mru_uncap_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_MRU0] + reg_offset,
+ BIT(slice_offset), mru_uncap_en);
+ if (ret)
+ return ret;
+
+ mru_rollover = config->mru_rollover << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_MRU1] + reg_offset,
+ BIT(slice_offset), mru_rollover);
+ if (ret)
+ return ret;
+
+ alloc_oneway_en = config->alloc_oneway_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_ALLOC0] + reg_offset,
+ BIT(slice_offset), alloc_oneway_en);
+ if (ret)
+ return ret;
+
+ ovcap_en = config->ovcap_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_ALLOC1] + reg_offset,
+ BIT(slice_offset), ovcap_en);
+ if (ret)
+ return ret;
+
+ ovcap_prio = config->ovcap_prio << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_ALLOC2] + reg_offset,
+ BIT(slice_offset), ovcap_prio);
+ if (ret)
+ return ret;
+
+ vict_prio = config->vict_prio << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_ALLOC3] + reg_offset,
+ BIT(slice_offset), vict_prio);
+ if (ret)
+ return ret;
+
+ if (config->activate_on_init) {
+ desc = llcc_slice_getd(config->usecase_id);
+ if (PTR_ERR_OR_ZERO(desc))
+ return -EINVAL;
+
+ ret = llcc_slice_activate(desc);
+ }
+
+ return ret;
+}
+
static int qcom_llcc_cfg_program(struct platform_device *pdev,
const struct qcom_llcc_config *cfg)
{
@@ -897,20 +4732,45 @@ static int qcom_llcc_cfg_program(struct platform_device *pdev,
sz = drv_data->cfg_size;
llcc_table = drv_data->cfg;
- for (i = 0; i < sz; i++) {
- ret = _qcom_llcc_cfg_program(&llcc_table[i], cfg);
- if (ret)
- return ret;
+ if (drv_data->version >= LLCC_VERSION_6_0_0_0) {
+ for (i = 0; i < sz; i++) {
+ ret = _qcom_llcc_cfg_program_v6(&llcc_table[i], cfg);
+ if (ret)
+ return ret;
+ }
+ } else {
+ for (i = 0; i < sz; i++) {
+ ret = _qcom_llcc_cfg_program(&llcc_table[i], cfg);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int qcom_llcc_get_cfg_index(struct platform_device *pdev, u8 *cfg_index, int num_config)
+{
+ int ret;
+
+ ret = nvmem_cell_read_u8(&pdev->dev, "multi-chan-ddr", cfg_index);
+ if (ret == -ENOENT || ret == -EOPNOTSUPP) {
+ if (num_config > 1)
+ return -EINVAL;
+ *cfg_index = 0;
+ return 0;
}
+ if (!ret && *cfg_index >= num_config)
+ ret = -EINVAL;
+
return ret;
}
-static int qcom_llcc_remove(struct platform_device *pdev)
+static void qcom_llcc_remove(struct platform_device *pdev)
{
/* Set the global pointer to a error code to avoid referencing it */
drv_data = ERR_PTR(-ENODEV);
- return 0;
}
static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev, u8 index,
@@ -921,7 +4781,6 @@ static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev, u8 index
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .fast_io = true,
};
base = devm_platform_ioremap_resource(pdev, index);
@@ -938,12 +4797,17 @@ static int qcom_llcc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int ret, i;
struct platform_device *llcc_edac;
+ const struct qcom_sct_config *cfgs;
const struct qcom_llcc_config *cfg;
const struct llcc_slice_config *llcc_cfg;
u32 sz;
+ u8 cfg_index;
u32 version;
struct regmap *regmap;
+ if (!IS_ERR(drv_data))
+ return -EBUSY;
+
drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
if (!drv_data) {
ret = -ENOMEM;
@@ -957,14 +4821,27 @@ static int qcom_llcc_probe(struct platform_device *pdev)
goto err;
}
- cfg = of_device_get_match_data(&pdev->dev);
-
- ret = regmap_read(regmap, cfg->reg_offset[LLCC_COMMON_STATUS0], &num_banks);
+ cfgs = of_device_get_match_data(&pdev->dev);
+ if (!cfgs) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = qcom_llcc_get_cfg_index(pdev, &cfg_index, cfgs->num_config);
if (ret)
goto err;
+ cfg = &cfgs->llcc_config[cfg_index];
+
+ if (cfg->num_banks) {
+ num_banks = cfg->num_banks;
+ } else {
+ ret = regmap_read(regmap, cfg->reg_offset[LLCC_COMMON_STATUS0], &num_banks);
+ if (ret)
+ goto err;
+
+ num_banks &= LLCC_LB_CNT_MASK;
+ num_banks >>= LLCC_LB_CNT_SHIFT;
+ }
- num_banks &= LLCC_LB_CNT_MASK;
- num_banks >>= LLCC_LB_CNT_SHIFT;
drv_data->num_banks = num_banks;
drv_data->regmaps = devm_kcalloc(dev, num_banks, sizeof(*drv_data->regmaps), GFP_KERNEL);
@@ -977,22 +4854,23 @@ static int qcom_llcc_probe(struct platform_device *pdev)
/* Initialize rest of LLCC bank regmaps */
for (i = 1; i < num_banks; i++) {
- char *base = kasprintf(GFP_KERNEL, "llcc%d_base", i);
+ char *base __free(kfree) = kasprintf(GFP_KERNEL, "llcc%d_base", i);
drv_data->regmaps[i] = qcom_llcc_init_mmio(pdev, i, base);
if (IS_ERR(drv_data->regmaps[i])) {
ret = PTR_ERR(drv_data->regmaps[i]);
- kfree(base);
goto err;
}
-
- kfree(base);
}
drv_data->bcast_regmap = qcom_llcc_init_mmio(pdev, i, "llcc_broadcast_base");
if (IS_ERR(drv_data->bcast_regmap)) {
- ret = PTR_ERR(drv_data->bcast_regmap);
- goto err;
+ if (cfg->no_broadcast_register) {
+ drv_data->bcast_regmap = regmap;
+ } else {
+ ret = PTR_ERR(drv_data->bcast_regmap);
+ goto err;
+ }
}
/* Extract version of the IP */
@@ -1003,6 +4881,18 @@ static int qcom_llcc_probe(struct platform_device *pdev)
drv_data->version = version;
+ /* Applicable only when drv_data->version >= 4.1 */
+ if (drv_data->version >= LLCC_VERSION_4_1_0_0) {
+ drv_data->bcast_and_regmap = qcom_llcc_init_mmio(pdev, i + 1, "llcc_broadcast_and_base");
+ if (IS_ERR(drv_data->bcast_and_regmap)) {
+ ret = PTR_ERR(drv_data->bcast_and_regmap);
+ if (ret == -EINVAL)
+ drv_data->bcast_and_regmap = NULL;
+ else
+ goto err;
+ }
+ }
+
llcc_cfg = cfg->sct_data;
sz = cfg->size;
@@ -1020,6 +4910,7 @@ static int qcom_llcc_probe(struct platform_device *pdev)
drv_data->cfg = llcc_cfg;
drv_data->cfg_size = sz;
drv_data->edac_reg_offset = cfg->edac_reg_offset;
+ drv_data->ecc_irq_configured = cfg->irq_configured;
mutex_init(&drv_data->lock);
platform_set_drvdata(pdev, drv_data);
@@ -1050,18 +4941,29 @@ err:
}
static const struct of_device_id qcom_llcc_of_match[] = {
- { .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfg },
- { .compatible = "qcom,sc7280-llcc", .data = &sc7280_cfg },
- { .compatible = "qcom,sc8180x-llcc", .data = &sc8180x_cfg },
- { .compatible = "qcom,sc8280xp-llcc", .data = &sc8280xp_cfg },
- { .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfg },
- { .compatible = "qcom,sm6350-llcc", .data = &sm6350_cfg },
- { .compatible = "qcom,sm7150-llcc", .data = &sm7150_cfg },
- { .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg },
- { .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfg },
- { .compatible = "qcom,sm8350-llcc", .data = &sm8350_cfg },
- { .compatible = "qcom,sm8450-llcc", .data = &sm8450_cfg },
- { .compatible = "qcom,sm8550-llcc", .data = &sm8550_cfg },
+ { .compatible = "qcom,ipq5424-llcc", .data = &ipq5424_cfgs},
+ { .compatible = "qcom,kaanapali-llcc", .data = &kaanapali_cfgs},
+ { .compatible = "qcom,qcs615-llcc", .data = &qcs615_cfgs},
+ { .compatible = "qcom,qcs8300-llcc", .data = &qcs8300_cfgs},
+ { .compatible = "qcom,qdu1000-llcc", .data = &qdu1000_cfgs},
+ { .compatible = "qcom,sa8775p-llcc", .data = &sa8775p_cfgs },
+ { .compatible = "qcom,sar1130p-llcc", .data = &sar1130p_cfgs },
+ { .compatible = "qcom,sar2130p-llcc", .data = &sar2130p_cfgs },
+ { .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfgs },
+ { .compatible = "qcom,sc7280-llcc", .data = &sc7280_cfgs },
+ { .compatible = "qcom,sc8180x-llcc", .data = &sc8180x_cfgs },
+ { .compatible = "qcom,sc8280xp-llcc", .data = &sc8280xp_cfgs },
+ { .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfgs },
+ { .compatible = "qcom,sm6350-llcc", .data = &sm6350_cfgs },
+ { .compatible = "qcom,sm7150-llcc", .data = &sm7150_cfgs },
+ { .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfgs },
+ { .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfgs },
+ { .compatible = "qcom,sm8350-llcc", .data = &sm8350_cfgs },
+ { .compatible = "qcom,sm8450-llcc", .data = &sm8450_cfgs },
+ { .compatible = "qcom,sm8550-llcc", .data = &sm8550_cfgs },
+ { .compatible = "qcom,sm8650-llcc", .data = &sm8650_cfgs },
+ { .compatible = "qcom,sm8750-llcc", .data = &sm8750_cfgs },
+ { .compatible = "qcom,x1e80100-llcc", .data = &x1e80100_cfgs },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_llcc_of_match);
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
index 6f177e46fa0f..c239107cb930 100644
--- a/drivers/soc/qcom/mdt_loader.c
+++ b/drivers/soc/qcom/mdt_loader.c
@@ -7,6 +7,7 @@
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/elf.h>
#include <linux/firmware.h>
@@ -17,7 +18,40 @@
#include <linux/slab.h>
#include <linux/soc/qcom/mdt_loader.h>
-static bool mdt_phdr_valid(const struct elf32_phdr *phdr)
+static bool mdt_header_valid(const struct firmware *fw)
+{
+ const struct elf32_hdr *ehdr;
+ size_t phend;
+ size_t shend;
+
+ if (fw->size < sizeof(*ehdr))
+ return false;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG))
+ return false;
+
+ if (ehdr->e_phentsize != sizeof(struct elf32_phdr))
+ return false;
+
+ phend = size_add(size_mul(sizeof(struct elf32_phdr), ehdr->e_phnum), ehdr->e_phoff);
+ if (phend > fw->size)
+ return false;
+
+ if (ehdr->e_shentsize || ehdr->e_shnum) {
+ if (ehdr->e_shentsize != sizeof(struct elf32_shdr))
+ return false;
+
+ shend = size_add(size_mul(sizeof(struct elf32_shdr), ehdr->e_shnum), ehdr->e_shoff);
+ if (shend > fw->size)
+ return false;
+ }
+
+ return true;
+}
+
+static bool mdt_phdr_loadable(const struct elf32_phdr *phdr)
{
if (phdr->p_type != PT_LOAD)
return false;
@@ -37,13 +71,12 @@ static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs,
{
const struct elf32_phdr *phdr = &phdrs[segment];
const struct firmware *seg_fw;
- char *seg_name;
ssize_t ret;
if (strlen(fw_name) < 4)
return -EINVAL;
- seg_name = kstrdup(fw_name, GFP_KERNEL);
+ char *seg_name __free(kfree) = kstrdup(fw_name, GFP_KERNEL);
if (!seg_name)
return -ENOMEM;
@@ -52,7 +85,6 @@ static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs,
ptr, phdr->p_filesz);
if (ret) {
dev_err(dev, "error %zd loading %s\n", ret, seg_name);
- kfree(seg_name);
return ret;
}
@@ -64,7 +96,6 @@ static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs,
}
release_firmware(seg_fw);
- kfree(seg_name);
return ret;
}
@@ -84,13 +115,16 @@ ssize_t qcom_mdt_get_size(const struct firmware *fw)
phys_addr_t max_addr = 0;
int i;
+ if (!mdt_header_valid(fw))
+ return -EINVAL;
+
ehdr = (struct elf32_hdr *)fw->data;
- phdrs = (struct elf32_phdr *)(ehdr + 1);
+ phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
for (i = 0; i < ehdr->e_phnum; i++) {
phdr = &phdrs[i];
- if (!mdt_phdr_valid(phdr))
+ if (!mdt_phdr_loadable(phdr))
continue;
if (phdr->p_paddr < min_addr)
@@ -136,8 +170,11 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len,
ssize_t ret;
void *data;
+ if (!mdt_header_valid(fw))
+ return ERR_PTR(-EINVAL);
+
ehdr = (struct elf32_hdr *)fw->data;
- phdrs = (struct elf32_phdr *)(ehdr + 1);
+ phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
if (ehdr->e_phnum < 2)
return ERR_PTR(-EINVAL);
@@ -216,13 +253,16 @@ int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
int ret;
int i;
+ if (!mdt_header_valid(fw))
+ return -EINVAL;
+
ehdr = (struct elf32_hdr *)fw->data;
- phdrs = (struct elf32_phdr *)(ehdr + 1);
+ phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
for (i = 0; i < ehdr->e_phnum; i++) {
phdr = &phdrs[i];
- if (!mdt_phdr_valid(phdr))
+ if (!mdt_phdr_loadable(phdr))
continue;
if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
@@ -264,7 +304,7 @@ out:
}
EXPORT_SYMBOL_GPL(qcom_mdt_pas_init);
-static bool qcom_mdt_bins_are_split(const struct firmware *fw, const char *fw_name)
+static bool qcom_mdt_bins_are_split(const struct firmware *fw)
{
const struct elf32_phdr *phdrs;
const struct elf32_hdr *ehdr;
@@ -272,7 +312,7 @@ static bool qcom_mdt_bins_are_split(const struct firmware *fw, const char *fw_na
int i;
ehdr = (struct elf32_hdr *)fw->data;
- phdrs = (struct elf32_phdr *)(ehdr + 1);
+ phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
for (i = 0; i < ehdr->e_phnum; i++) {
/*
@@ -292,10 +332,22 @@ static bool qcom_mdt_bins_are_split(const struct firmware *fw, const char *fw_na
return false;
}
-static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
- const char *fw_name, int pas_id, void *mem_region,
- phys_addr_t mem_phys, size_t mem_size,
- phys_addr_t *reloc_base, bool pas_init)
+/**
+ * qcom_mdt_load_no_init() - load the firmware which header is loaded as fw
+ * @dev: device handle to associate resources with
+ * @fw: firmware object for the mdt file
+ * @fw_name: name of the firmware, for construction of segment file names
+ * @mem_region: allocated memory region to load firmware into
+ * @mem_phys: physical address of allocated memory region
+ * @mem_size: size of the allocated memory region
+ * @reloc_base: adjusted physical address after relocation
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
+ const char *fw_name, void *mem_region,
+ phys_addr_t mem_phys, size_t mem_size,
+ phys_addr_t *reloc_base)
{
const struct elf32_phdr *phdrs;
const struct elf32_phdr *phdr;
@@ -312,14 +364,17 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
if (!fw || !mem_region || !mem_phys || !mem_size)
return -EINVAL;
- is_split = qcom_mdt_bins_are_split(fw, fw_name);
+ if (!mdt_header_valid(fw))
+ return -EINVAL;
+
+ is_split = qcom_mdt_bins_are_split(fw);
ehdr = (struct elf32_hdr *)fw->data;
- phdrs = (struct elf32_phdr *)(ehdr + 1);
+ phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
for (i = 0; i < ehdr->e_phnum; i++) {
phdr = &phdrs[i];
- if (!mdt_phdr_valid(phdr))
+ if (!mdt_phdr_loadable(phdr))
continue;
if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
@@ -346,7 +401,7 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
for (i = 0; i < ehdr->e_phnum; i++) {
phdr = &phdrs[i];
- if (!mdt_phdr_valid(phdr))
+ if (!mdt_phdr_loadable(phdr))
continue;
offset = phdr->p_paddr - mem_reloc;
@@ -392,12 +447,13 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
return ret;
}
+EXPORT_SYMBOL_GPL(qcom_mdt_load_no_init);
/**
* qcom_mdt_load() - load the firmware which header is loaded as fw
* @dev: device handle to associate resources with
* @fw: firmware object for the mdt file
- * @firmware: name of the firmware, for construction of segment file names
+ * @fw_name: name of the firmware, for construction of segment file names
* @pas_id: PAS identifier
* @mem_region: allocated memory region to load firmware into
* @mem_phys: physical address of allocated memory region
@@ -407,43 +463,20 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
* Returns 0 on success, negative errno otherwise.
*/
int qcom_mdt_load(struct device *dev, const struct firmware *fw,
- const char *firmware, int pas_id, void *mem_region,
+ const char *fw_name, int pas_id, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
phys_addr_t *reloc_base)
{
int ret;
- ret = qcom_mdt_pas_init(dev, fw, firmware, pas_id, mem_phys, NULL);
+ ret = qcom_mdt_pas_init(dev, fw, fw_name, pas_id, mem_phys, NULL);
if (ret)
return ret;
- return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
- mem_size, reloc_base, true);
+ return qcom_mdt_load_no_init(dev, fw, fw_name, mem_region, mem_phys,
+ mem_size, reloc_base);
}
EXPORT_SYMBOL_GPL(qcom_mdt_load);
-/**
- * qcom_mdt_load_no_init() - load the firmware which header is loaded as fw
- * @dev: device handle to associate resources with
- * @fw: firmware object for the mdt file
- * @firmware: name of the firmware, for construction of segment file names
- * @pas_id: PAS identifier
- * @mem_region: allocated memory region to load firmware into
- * @mem_phys: physical address of allocated memory region
- * @mem_size: size of the allocated memory region
- * @reloc_base: adjusted physical address after relocation
- *
- * Returns 0 on success, negative errno otherwise.
- */
-int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
- const char *firmware, int pas_id,
- void *mem_region, phys_addr_t mem_phys,
- size_t mem_size, phys_addr_t *reloc_base)
-{
- return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
- mem_size, reloc_base, false);
-}
-EXPORT_SYMBOL_GPL(qcom_mdt_load_no_init);
-
MODULE_DESCRIPTION("Firmware parser for Qualcomm MDT format");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
index 20f5461d46b9..71130a2f62e9 100644
--- a/drivers/soc/qcom/ocmem.c
+++ b/drivers/soc/qcom/ocmem.c
@@ -10,6 +10,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -185,38 +186,34 @@ static void update_range(struct ocmem *ocmem, struct ocmem_buf *buf,
struct ocmem *of_get_ocmem(struct device *dev)
{
struct platform_device *pdev;
- struct device_node *devnode;
struct ocmem *ocmem;
- devnode = of_parse_phandle(dev->of_node, "sram", 0);
+ struct device_node *devnode __free(device_node) = of_parse_phandle(dev->of_node,
+ "sram", 0);
if (!devnode || !devnode->parent) {
dev_err(dev, "Cannot look up sram phandle\n");
- of_node_put(devnode);
return ERR_PTR(-ENODEV);
}
pdev = of_find_device_by_node(devnode->parent);
if (!pdev) {
dev_err(dev, "Cannot find device node %s\n", devnode->name);
- of_node_put(devnode);
return ERR_PTR(-EPROBE_DEFER);
}
- of_node_put(devnode);
ocmem = platform_get_drvdata(pdev);
+ put_device(&pdev->dev);
if (!ocmem) {
dev_err(dev, "Cannot get ocmem\n");
- put_device(&pdev->dev);
return ERR_PTR(-ENODEV);
}
return ocmem;
}
-EXPORT_SYMBOL(of_get_ocmem);
+EXPORT_SYMBOL_GPL(of_get_ocmem);
struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client,
unsigned long size)
{
- struct ocmem_buf *buf;
int ret;
/* TODO: add support for other clients... */
@@ -229,7 +226,7 @@ struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client,
if (test_and_set_bit_lock(BIT(client), &ocmem->active_allocations))
return ERR_PTR(-EBUSY);
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ struct ocmem_buf *buf __free(kfree) = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto err_unlock;
@@ -247,7 +244,7 @@ struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client,
if (ret) {
dev_err(ocmem->dev, "could not lock: %d\n", ret);
ret = -EINVAL;
- goto err_kfree;
+ goto err_unlock;
}
} else {
ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, buf->offset);
@@ -258,16 +255,14 @@ struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client,
dev_dbg(ocmem->dev, "using %ldK of OCMEM at 0x%08lx for client %d\n",
size / 1024, buf->addr, client);
- return buf;
+ return_ptr(buf);
-err_kfree:
- kfree(buf);
err_unlock:
clear_bit_unlock(BIT(client), &ocmem->active_allocations);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(ocmem_allocate);
+EXPORT_SYMBOL_GPL(ocmem_allocate);
void ocmem_free(struct ocmem *ocmem, enum ocmem_client client,
struct ocmem_buf *buf)
@@ -294,7 +289,7 @@ void ocmem_free(struct ocmem *ocmem, enum ocmem_client client,
clear_bit_unlock(BIT(client), &ocmem->active_allocations);
}
-EXPORT_SYMBOL(ocmem_free);
+EXPORT_SYMBOL_GPL(ocmem_free);
static int ocmem_dev_probe(struct platform_device *pdev)
{
@@ -416,14 +411,12 @@ err_clk_disable:
return ret;
}
-static int ocmem_dev_remove(struct platform_device *pdev)
+static void ocmem_dev_remove(struct platform_device *pdev)
{
struct ocmem *ocmem = platform_get_drvdata(pdev);
clk_disable_unprepare(ocmem->core_clk);
clk_disable_unprepare(ocmem->iface_clk);
-
- return 0;
}
static const struct ocmem_config ocmem_8226_config = {
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
index 0034af927b48..71be378d2e43 100644
--- a/drivers/soc/qcom/pdr_interface.c
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -3,6 +3,7 @@
* Copyright (C) 2020 The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -74,24 +75,18 @@ static int pdr_locator_new_server(struct qmi_handle *qmi,
{
struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
locator_hdl);
- struct pdr_service *pds;
+ mutex_lock(&pdr->lock);
/* Create a local client port for QMI communication */
pdr->locator_addr.sq_family = AF_QIPCRTR;
pdr->locator_addr.sq_node = svc->node;
pdr->locator_addr.sq_port = svc->port;
- mutex_lock(&pdr->lock);
pdr->locator_init_complete = true;
mutex_unlock(&pdr->lock);
/* Service pending lookup requests */
- mutex_lock(&pdr->list_lock);
- list_for_each_entry(pds, &pdr->lookups, node) {
- if (pds->need_locator_lookup)
- schedule_work(&pdr->locator_work);
- }
- mutex_unlock(&pdr->list_lock);
+ schedule_work(&pdr->locator_work);
return 0;
}
@@ -104,10 +99,10 @@ static void pdr_locator_del_server(struct qmi_handle *qmi,
mutex_lock(&pdr->lock);
pdr->locator_init_complete = false;
- mutex_unlock(&pdr->lock);
pdr->locator_addr.sq_node = 0;
pdr->locator_addr.sq_port = 0;
+ mutex_unlock(&pdr->lock);
}
static const struct qmi_ops pdr_locator_ops = {
@@ -365,12 +360,14 @@ static int pdr_get_domain_list(struct servreg_get_domain_list_req *req,
if (ret < 0)
return ret;
+ mutex_lock(&pdr->lock);
ret = qmi_send_request(&pdr->locator_hdl,
&pdr->locator_addr,
&txn, SERVREG_GET_DOMAIN_LIST_REQ,
SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN,
servreg_get_domain_list_req_ei,
req);
+ mutex_unlock(&pdr->lock);
if (ret < 0) {
qmi_txn_cancel(&txn);
return ret;
@@ -394,13 +391,13 @@ static int pdr_get_domain_list(struct servreg_get_domain_list_req *req,
static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
{
- struct servreg_get_domain_list_resp *resp;
struct servreg_get_domain_list_req req;
struct servreg_location_entry *entry;
int domains_read = 0;
int ret, i;
- resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ struct servreg_get_domain_list_resp *resp __free(kfree) = kzalloc(sizeof(*resp),
+ GFP_KERNEL);
if (!resp)
return -ENOMEM;
@@ -413,9 +410,9 @@ static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
req.domain_offset = domains_read;
ret = pdr_get_domain_list(&req, resp, pdr);
if (ret < 0)
- goto out;
+ return ret;
- for (i = domains_read; i < resp->domain_list_len; i++) {
+ for (i = 0; i < resp->domain_list_len; i++) {
entry = &resp->domain_list[i];
if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name))
@@ -425,7 +422,7 @@ static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
pds->service_data_valid = entry->service_data_valid;
pds->service_data = entry->service_data;
pds->instance = entry->instance;
- goto out;
+ return 0;
}
}
@@ -438,8 +435,7 @@ static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
domains_read += resp->domain_list_len;
} while (domains_read < resp->total_domains);
-out:
- kfree(resp);
+
return ret;
}
@@ -515,8 +511,7 @@ struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
const char *service_name,
const char *service_path)
{
- struct pdr_service *pds, *tmp;
- int ret;
+ struct pdr_service *tmp;
if (IS_ERR_OR_NULL(pdr))
return ERR_PTR(-EINVAL);
@@ -525,7 +520,7 @@ struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
!service_path || strlen(service_path) > SERVREG_NAME_LENGTH)
return ERR_PTR(-EINVAL);
- pds = kzalloc(sizeof(*pds), GFP_KERNEL);
+ struct pdr_service *pds __free(kfree) = kzalloc(sizeof(*pds), GFP_KERNEL);
if (!pds)
return ERR_PTR(-ENOMEM);
@@ -540,8 +535,7 @@ struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
continue;
mutex_unlock(&pdr->list_lock);
- ret = -EALREADY;
- goto err;
+ return ERR_PTR(-EALREADY);
}
list_add(&pds->node, &pdr->lookups);
@@ -549,12 +543,9 @@ struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
schedule_work(&pdr->locator_work);
- return pds;
-err:
- kfree(pds);
- return ERR_PTR(ret);
+ return_ptr(pds);
}
-EXPORT_SYMBOL(pdr_add_lookup);
+EXPORT_SYMBOL_GPL(pdr_add_lookup);
/**
* pdr_restart_pd() - restart PD
@@ -634,7 +625,7 @@ int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds)
return 0;
}
-EXPORT_SYMBOL(pdr_restart_pd);
+EXPORT_SYMBOL_GPL(pdr_restart_pd);
/**
* pdr_handle_alloc() - initialize the PDR client handle
@@ -649,13 +640,12 @@ struct pdr_handle *pdr_handle_alloc(void (*status)(int state,
char *service_path,
void *priv), void *priv)
{
- struct pdr_handle *pdr;
int ret;
if (!status)
return ERR_PTR(-EINVAL);
- pdr = kzalloc(sizeof(*pdr), GFP_KERNEL);
+ struct pdr_handle *pdr __free(kfree) = kzalloc(sizeof(*pdr), GFP_KERNEL);
if (!pdr)
return ERR_PTR(-ENOMEM);
@@ -674,10 +664,8 @@ struct pdr_handle *pdr_handle_alloc(void (*status)(int state,
INIT_WORK(&pdr->indack_work, pdr_indack_work);
pdr->notifier_wq = create_singlethread_workqueue("pdr_notifier_wq");
- if (!pdr->notifier_wq) {
- ret = -ENOMEM;
- goto free_pdr_handle;
- }
+ if (!pdr->notifier_wq)
+ return ERR_PTR(-ENOMEM);
pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq", WQ_HIGHPRI);
if (!pdr->indack_wq) {
@@ -702,7 +690,7 @@ struct pdr_handle *pdr_handle_alloc(void (*status)(int state,
if (ret < 0)
goto release_qmi_handle;
- return pdr;
+ return_ptr(pdr);
release_qmi_handle:
qmi_handle_release(&pdr->locator_hdl);
@@ -710,12 +698,10 @@ destroy_indack:
destroy_workqueue(pdr->indack_wq);
destroy_notifier:
destroy_workqueue(pdr->notifier_wq);
-free_pdr_handle:
- kfree(pdr);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(pdr_handle_alloc);
+EXPORT_SYMBOL_GPL(pdr_handle_alloc);
/**
* pdr_handle_release() - release the PDR client handle
@@ -749,7 +735,7 @@ void pdr_handle_release(struct pdr_handle *pdr)
kfree(pdr);
}
-EXPORT_SYMBOL(pdr_handle_release);
+EXPORT_SYMBOL_GPL(pdr_handle_release);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Qualcomm Protection Domain Restart helpers");
diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h
index 03c282b7f17e..039508c1bbf7 100644
--- a/drivers/soc/qcom/pdr_internal.h
+++ b/drivers/soc/qcom/pdr_internal.h
@@ -13,6 +13,8 @@
#define SERVREG_SET_ACK_REQ 0x23
#define SERVREG_RESTART_PD_REQ 0x24
+#define SERVREG_LOC_PFR_REQ 0x24
+
#define SERVREG_DOMAIN_LIST_LENGTH 32
#define SERVREG_RESTART_PD_REQ_MAX_LEN 67
#define SERVREG_REGISTER_LISTENER_REQ_LEN 71
@@ -20,6 +22,7 @@
#define SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN 74
#define SERVREG_STATE_UPDATED_IND_MAX_LEN 79
#define SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN 2389
+#define SERVREG_LOC_PFR_RESP_MAX_LEN 10
struct servreg_location_entry {
char name[SERVREG_NAME_LENGTH + 1];
@@ -28,83 +31,12 @@ struct servreg_location_entry {
u32 instance;
};
-static const struct qmi_elem_info servreg_location_entry_ei[] = {
- {
- .data_type = QMI_STRING,
- .elem_len = SERVREG_NAME_LENGTH + 1,
- .elem_size = sizeof(char),
- .array_type = NO_ARRAY,
- .tlv_type = 0,
- .offset = offsetof(struct servreg_location_entry,
- name),
- },
- {
- .data_type = QMI_UNSIGNED_4_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u32),
- .array_type = NO_ARRAY,
- .tlv_type = 0,
- .offset = offsetof(struct servreg_location_entry,
- instance),
- },
- {
- .data_type = QMI_UNSIGNED_1_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u8),
- .array_type = NO_ARRAY,
- .tlv_type = 0,
- .offset = offsetof(struct servreg_location_entry,
- service_data_valid),
- },
- {
- .data_type = QMI_UNSIGNED_4_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u32),
- .array_type = NO_ARRAY,
- .tlv_type = 0,
- .offset = offsetof(struct servreg_location_entry,
- service_data),
- },
- {}
-};
-
struct servreg_get_domain_list_req {
char service_name[SERVREG_NAME_LENGTH + 1];
u8 domain_offset_valid;
u32 domain_offset;
};
-static const struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
- {
- .data_type = QMI_STRING,
- .elem_len = SERVREG_NAME_LENGTH + 1,
- .elem_size = sizeof(char),
- .array_type = NO_ARRAY,
- .tlv_type = 0x01,
- .offset = offsetof(struct servreg_get_domain_list_req,
- service_name),
- },
- {
- .data_type = QMI_OPT_FLAG,
- .elem_len = 1,
- .elem_size = sizeof(u8),
- .array_type = NO_ARRAY,
- .tlv_type = 0x10,
- .offset = offsetof(struct servreg_get_domain_list_req,
- domain_offset_valid),
- },
- {
- .data_type = QMI_UNSIGNED_4_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u32),
- .array_type = NO_ARRAY,
- .tlv_type = 0x10,
- .offset = offsetof(struct servreg_get_domain_list_req,
- domain_offset),
- },
- {}
-};
-
struct servreg_get_domain_list_resp {
struct qmi_response_type_v01 resp;
u8 total_domains_valid;
@@ -116,264 +48,59 @@ struct servreg_get_domain_list_resp {
struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH];
};
-static const struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
- {
- .data_type = QMI_STRUCT,
- .elem_len = 1,
- .elem_size = sizeof(struct qmi_response_type_v01),
- .array_type = NO_ARRAY,
- .tlv_type = 0x02,
- .offset = offsetof(struct servreg_get_domain_list_resp,
- resp),
- .ei_array = qmi_response_type_v01_ei,
- },
- {
- .data_type = QMI_OPT_FLAG,
- .elem_len = 1,
- .elem_size = sizeof(u8),
- .array_type = NO_ARRAY,
- .tlv_type = 0x10,
- .offset = offsetof(struct servreg_get_domain_list_resp,
- total_domains_valid),
- },
- {
- .data_type = QMI_UNSIGNED_2_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u16),
- .array_type = NO_ARRAY,
- .tlv_type = 0x10,
- .offset = offsetof(struct servreg_get_domain_list_resp,
- total_domains),
- },
- {
- .data_type = QMI_OPT_FLAG,
- .elem_len = 1,
- .elem_size = sizeof(u8),
- .array_type = NO_ARRAY,
- .tlv_type = 0x11,
- .offset = offsetof(struct servreg_get_domain_list_resp,
- db_rev_count_valid),
- },
- {
- .data_type = QMI_UNSIGNED_2_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u16),
- .array_type = NO_ARRAY,
- .tlv_type = 0x11,
- .offset = offsetof(struct servreg_get_domain_list_resp,
- db_rev_count),
- },
- {
- .data_type = QMI_OPT_FLAG,
- .elem_len = 1,
- .elem_size = sizeof(u8),
- .array_type = NO_ARRAY,
- .tlv_type = 0x12,
- .offset = offsetof(struct servreg_get_domain_list_resp,
- domain_list_valid),
- },
- {
- .data_type = QMI_DATA_LEN,
- .elem_len = 1,
- .elem_size = sizeof(u8),
- .array_type = NO_ARRAY,
- .tlv_type = 0x12,
- .offset = offsetof(struct servreg_get_domain_list_resp,
- domain_list_len),
- },
- {
- .data_type = QMI_STRUCT,
- .elem_len = SERVREG_DOMAIN_LIST_LENGTH,
- .elem_size = sizeof(struct servreg_location_entry),
- .array_type = VAR_LEN_ARRAY,
- .tlv_type = 0x12,
- .offset = offsetof(struct servreg_get_domain_list_resp,
- domain_list),
- .ei_array = servreg_location_entry_ei,
- },
- {}
-};
-
struct servreg_register_listener_req {
u8 enable;
char service_path[SERVREG_NAME_LENGTH + 1];
};
-static const struct qmi_elem_info servreg_register_listener_req_ei[] = {
- {
- .data_type = QMI_UNSIGNED_1_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u8),
- .array_type = NO_ARRAY,
- .tlv_type = 0x01,
- .offset = offsetof(struct servreg_register_listener_req,
- enable),
- },
- {
- .data_type = QMI_STRING,
- .elem_len = SERVREG_NAME_LENGTH + 1,
- .elem_size = sizeof(char),
- .array_type = NO_ARRAY,
- .tlv_type = 0x02,
- .offset = offsetof(struct servreg_register_listener_req,
- service_path),
- },
- {}
-};
-
struct servreg_register_listener_resp {
struct qmi_response_type_v01 resp;
u8 curr_state_valid;
enum servreg_service_state curr_state;
};
-static const struct qmi_elem_info servreg_register_listener_resp_ei[] = {
- {
- .data_type = QMI_STRUCT,
- .elem_len = 1,
- .elem_size = sizeof(struct qmi_response_type_v01),
- .array_type = NO_ARRAY,
- .tlv_type = 0x02,
- .offset = offsetof(struct servreg_register_listener_resp,
- resp),
- .ei_array = qmi_response_type_v01_ei,
- },
- {
- .data_type = QMI_OPT_FLAG,
- .elem_len = 1,
- .elem_size = sizeof(u8),
- .array_type = NO_ARRAY,
- .tlv_type = 0x10,
- .offset = offsetof(struct servreg_register_listener_resp,
- curr_state_valid),
- },
- {
- .data_type = QMI_SIGNED_4_BYTE_ENUM,
- .elem_len = 1,
- .elem_size = sizeof(enum servreg_service_state),
- .array_type = NO_ARRAY,
- .tlv_type = 0x10,
- .offset = offsetof(struct servreg_register_listener_resp,
- curr_state),
- },
- {}
-};
-
struct servreg_restart_pd_req {
char service_path[SERVREG_NAME_LENGTH + 1];
};
-static const struct qmi_elem_info servreg_restart_pd_req_ei[] = {
- {
- .data_type = QMI_STRING,
- .elem_len = SERVREG_NAME_LENGTH + 1,
- .elem_size = sizeof(char),
- .array_type = NO_ARRAY,
- .tlv_type = 0x01,
- .offset = offsetof(struct servreg_restart_pd_req,
- service_path),
- },
- {}
-};
-
struct servreg_restart_pd_resp {
struct qmi_response_type_v01 resp;
};
-static const struct qmi_elem_info servreg_restart_pd_resp_ei[] = {
- {
- .data_type = QMI_STRUCT,
- .elem_len = 1,
- .elem_size = sizeof(struct qmi_response_type_v01),
- .array_type = NO_ARRAY,
- .tlv_type = 0x02,
- .offset = offsetof(struct servreg_restart_pd_resp,
- resp),
- .ei_array = qmi_response_type_v01_ei,
- },
- {}
-};
-
struct servreg_state_updated_ind {
enum servreg_service_state curr_state;
char service_path[SERVREG_NAME_LENGTH + 1];
u16 transaction_id;
};
-static const struct qmi_elem_info servreg_state_updated_ind_ei[] = {
- {
- .data_type = QMI_SIGNED_4_BYTE_ENUM,
- .elem_len = 1,
- .elem_size = sizeof(u32),
- .array_type = NO_ARRAY,
- .tlv_type = 0x01,
- .offset = offsetof(struct servreg_state_updated_ind,
- curr_state),
- },
- {
- .data_type = QMI_STRING,
- .elem_len = SERVREG_NAME_LENGTH + 1,
- .elem_size = sizeof(char),
- .array_type = NO_ARRAY,
- .tlv_type = 0x02,
- .offset = offsetof(struct servreg_state_updated_ind,
- service_path),
- },
- {
- .data_type = QMI_UNSIGNED_2_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u16),
- .array_type = NO_ARRAY,
- .tlv_type = 0x03,
- .offset = offsetof(struct servreg_state_updated_ind,
- transaction_id),
- },
- {}
-};
-
struct servreg_set_ack_req {
char service_path[SERVREG_NAME_LENGTH + 1];
u16 transaction_id;
};
-static const struct qmi_elem_info servreg_set_ack_req_ei[] = {
- {
- .data_type = QMI_STRING,
- .elem_len = SERVREG_NAME_LENGTH + 1,
- .elem_size = sizeof(char),
- .array_type = NO_ARRAY,
- .tlv_type = 0x01,
- .offset = offsetof(struct servreg_set_ack_req,
- service_path),
- },
- {
- .data_type = QMI_UNSIGNED_2_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u16),
- .array_type = NO_ARRAY,
- .tlv_type = 0x02,
- .offset = offsetof(struct servreg_set_ack_req,
- transaction_id),
- },
- {}
-};
-
struct servreg_set_ack_resp {
struct qmi_response_type_v01 resp;
};
-static const struct qmi_elem_info servreg_set_ack_resp_ei[] = {
- {
- .data_type = QMI_STRUCT,
- .elem_len = 1,
- .elem_size = sizeof(struct qmi_response_type_v01),
- .array_type = NO_ARRAY,
- .tlv_type = 0x02,
- .offset = offsetof(struct servreg_set_ack_resp,
- resp),
- .ei_array = qmi_response_type_v01_ei,
- },
- {}
+struct servreg_loc_pfr_req {
+ char service[SERVREG_NAME_LENGTH + 1];
+ char reason[257];
};
+struct servreg_loc_pfr_resp {
+ struct qmi_response_type_v01 rsp;
+};
+
+extern const struct qmi_elem_info servreg_get_domain_list_req_ei[];
+extern const struct qmi_elem_info servreg_get_domain_list_resp_ei[];
+extern const struct qmi_elem_info servreg_register_listener_req_ei[];
+extern const struct qmi_elem_info servreg_register_listener_resp_ei[];
+extern const struct qmi_elem_info servreg_restart_pd_req_ei[];
+extern const struct qmi_elem_info servreg_restart_pd_resp_ei[];
+extern const struct qmi_elem_info servreg_state_updated_ind_ei[];
+extern const struct qmi_elem_info servreg_set_ack_req_ei[];
+extern const struct qmi_elem_info servreg_set_ack_resp_ei[];
+extern const struct qmi_elem_info servreg_loc_pfr_req_ei[];
+extern const struct qmi_elem_info servreg_loc_pfr_resp_ei[];
+
#endif
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index 61c89ddfc75b..627f96ca322e 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -4,6 +4,8 @@
* Copyright (c) 2022, Linaro Ltd
*/
#include <linux/auxiliary_bus.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -11,6 +13,9 @@
#include <linux/slab.h>
#include <linux/soc/qcom/pdr.h>
#include <linux/soc/qcom/pmic_glink.h>
+#include <linux/spinlock.h>
+
+#define PMIC_GLINK_SEND_TIMEOUT (5 * HZ)
enum {
PMIC_GLINK_CLIENT_BATT = 0,
@@ -18,9 +23,6 @@ enum {
PMIC_GLINK_CLIENT_UCSI,
};
-#define PMIC_GLINK_CLIENT_DEFAULT (BIT(PMIC_GLINK_CLIENT_BATT) | \
- BIT(PMIC_GLINK_CLIENT_ALTMODE))
-
struct pmic_glink {
struct device *dev;
struct pdr_handle *pdr;
@@ -37,9 +39,10 @@ struct pmic_glink {
struct mutex state_lock;
unsigned int client_state;
unsigned int pdr_state;
+ bool pdr_available;
/* serializing clients list updates */
- struct mutex client_lock;
+ spinlock_t client_lock;
struct list_head clients;
};
@@ -61,17 +64,18 @@ static void _devm_pmic_glink_release_client(struct device *dev, void *res)
{
struct pmic_glink_client *client = (struct pmic_glink_client *)res;
struct pmic_glink *pg = client->pg;
+ unsigned long flags;
- mutex_lock(&pg->client_lock);
+ spin_lock_irqsave(&pg->client_lock, flags);
list_del(&client->node);
- mutex_unlock(&pg->client_lock);
+ spin_unlock_irqrestore(&pg->client_lock, flags);
}
-struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
- unsigned int id,
- void (*cb)(const void *, size_t, void *),
- void (*pdr)(void *, int),
- void *priv)
+struct pmic_glink_client *devm_pmic_glink_client_alloc(struct device *dev,
+ unsigned int id,
+ void (*cb)(const void *, size_t, void *),
+ void (*pdr)(void *, int),
+ void *priv)
{
struct pmic_glink_client *client;
struct pmic_glink *pg = dev_get_drvdata(dev->parent);
@@ -85,22 +89,57 @@ struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
client->cb = cb;
client->pdr_notify = pdr;
client->priv = priv;
-
- mutex_lock(&pg->client_lock);
- list_add(&client->node, &pg->clients);
- mutex_unlock(&pg->client_lock);
+ INIT_LIST_HEAD(&client->node);
devres_add(dev, client);
return client;
}
-EXPORT_SYMBOL_GPL(devm_pmic_glink_register_client);
+EXPORT_SYMBOL_GPL(devm_pmic_glink_client_alloc);
+
+void pmic_glink_client_register(struct pmic_glink_client *client)
+{
+ struct pmic_glink *pg = client->pg;
+ unsigned long flags;
+
+ guard(mutex)(&pg->state_lock);
+ spin_lock_irqsave(&pg->client_lock, flags);
+
+ list_add(&client->node, &pg->clients);
+ client->pdr_notify(client->priv, pg->client_state);
+
+ spin_unlock_irqrestore(&pg->client_lock, flags);
+}
+EXPORT_SYMBOL_GPL(pmic_glink_client_register);
int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len)
{
struct pmic_glink *pg = client->pg;
+ bool timeout_reached = false;
+ unsigned long start;
+ int ret;
+
+ guard(mutex)(&pg->state_lock);
+ if (!pg->ept) {
+ return -ECONNRESET;
+ }
+
+ start = jiffies;
+ for (;;) {
+ ret = rpmsg_send(pg->ept, data, len);
+ if (ret != -EAGAIN)
+ break;
+
+ if (timeout_reached) {
+ ret = -ETIMEDOUT;
+ break;
+ }
- return rpmsg_send(pg->ept, data, len);
+ usleep_range(1000, 5000);
+ timeout_reached = time_after(jiffies, start + PMIC_GLINK_SEND_TIMEOUT);
+ }
+
+ return ret;
}
EXPORT_SYMBOL_GPL(pmic_glink_send);
@@ -110,6 +149,7 @@ static int pmic_glink_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
struct pmic_glink_client *client;
struct pmic_glink_hdr *hdr;
struct pmic_glink *pg = dev_get_drvdata(&rpdev->dev);
+ unsigned long flags;
if (len < sizeof(*hdr)) {
dev_warn(pg->dev, "ignoring truncated message\n");
@@ -118,15 +158,20 @@ static int pmic_glink_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
hdr = data;
+ spin_lock_irqsave(&pg->client_lock, flags);
list_for_each_entry(client, &pg->clients, node) {
if (client->id == le32_to_cpu(hdr->owner))
client->cb(data, len, client->priv);
}
+ spin_unlock_irqrestore(&pg->client_lock, flags);
return 0;
}
-static void pmic_glink_aux_release(struct device *dev) {}
+static void pmic_glink_aux_release(struct device *dev)
+{
+ of_node_put(dev->of_node);
+}
static int pmic_glink_add_aux_device(struct pmic_glink *pg,
struct auxiliary_device *aux,
@@ -140,8 +185,10 @@ static int pmic_glink_add_aux_device(struct pmic_glink *pg,
aux->dev.release = pmic_glink_aux_release;
device_set_of_node_from_dev(&aux->dev, parent);
ret = auxiliary_device_init(aux);
- if (ret)
+ if (ret) {
+ of_node_put(aux->dev.of_node);
return ret;
+ }
ret = auxiliary_device_add(aux);
if (ret)
@@ -161,18 +208,21 @@ static void pmic_glink_state_notify_clients(struct pmic_glink *pg)
{
struct pmic_glink_client *client;
unsigned int new_state = pg->client_state;
+ unsigned long flags;
if (pg->client_state != SERVREG_SERVICE_STATE_UP) {
if (pg->pdr_state == SERVREG_SERVICE_STATE_UP && pg->ept)
new_state = SERVREG_SERVICE_STATE_UP;
} else {
- if (pg->pdr_state == SERVREG_SERVICE_STATE_UP && pg->ept)
+ if (pg->pdr_state == SERVREG_SERVICE_STATE_DOWN || !pg->ept)
new_state = SERVREG_SERVICE_STATE_DOWN;
}
if (new_state != pg->client_state) {
+ spin_lock_irqsave(&pg->client_lock, flags);
list_for_each_entry(client, &pg->clients, node)
client->pdr_notify(client->priv, new_state);
+ spin_unlock_irqrestore(&pg->client_lock, flags);
pg->client_state = new_state;
}
}
@@ -181,55 +231,52 @@ static void pmic_glink_pdr_callback(int state, char *svc_path, void *priv)
{
struct pmic_glink *pg = priv;
- mutex_lock(&pg->state_lock);
+ guard(mutex)(&pg->state_lock);
pg->pdr_state = state;
pmic_glink_state_notify_clients(pg);
- mutex_unlock(&pg->state_lock);
}
static int pmic_glink_rpmsg_probe(struct rpmsg_device *rpdev)
{
- struct pmic_glink *pg = __pmic_glink;
- int ret = 0;
+ struct pmic_glink *pg;
- mutex_lock(&__pmic_glink_lock);
- if (!pg) {
- ret = dev_err_probe(&rpdev->dev, -ENODEV, "no pmic_glink device to attach to\n");
- goto out_unlock;
- }
+ guard(mutex)(&__pmic_glink_lock);
+ pg = __pmic_glink;
+ if (!pg)
+ return dev_err_probe(&rpdev->dev, -ENODEV, "no pmic_glink device to attach to\n");
dev_set_drvdata(&rpdev->dev, pg);
+ pg->pdr_available = rpdev->id.driver_data;
- mutex_lock(&pg->state_lock);
+ guard(mutex)(&pg->state_lock);
pg->ept = rpdev->ept;
+ if (!pg->pdr_available)
+ pg->pdr_state = SERVREG_SERVICE_STATE_UP;
pmic_glink_state_notify_clients(pg);
- mutex_unlock(&pg->state_lock);
-out_unlock:
- mutex_unlock(&__pmic_glink_lock);
- return ret;
+ return 0;
}
static void pmic_glink_rpmsg_remove(struct rpmsg_device *rpdev)
{
struct pmic_glink *pg;
- mutex_lock(&__pmic_glink_lock);
+ guard(mutex)(&__pmic_glink_lock);
pg = __pmic_glink;
if (!pg)
- goto out_unlock;
+ return;
- mutex_lock(&pg->state_lock);
+ guard(mutex)(&pg->state_lock);
pg->ept = NULL;
+ if (!pg->pdr_available)
+ pg->pdr_state = SERVREG_SERVICE_STATE_DOWN;
pmic_glink_state_notify_clients(pg);
- mutex_unlock(&pg->state_lock);
-out_unlock:
- mutex_unlock(&__pmic_glink_lock);
}
static const struct rpmsg_device_id pmic_glink_rpmsg_id_match[] = {
- { "PMIC_RTR_ADSP_APPS" },
+ {.name = "PMIC_RTR_ADSP_APPS", .driver_data = true },
+ {.name = "PMIC_RTR_SOCCP_APPS", .driver_data = false },
{}
};
@@ -259,19 +306,26 @@ static int pmic_glink_probe(struct platform_device *pdev)
pg->dev = &pdev->dev;
INIT_LIST_HEAD(&pg->clients);
- mutex_init(&pg->client_lock);
+ spin_lock_init(&pg->client_lock);
mutex_init(&pg->state_lock);
match_data = (unsigned long *)of_device_get_match_data(&pdev->dev);
- if (match_data)
- pg->client_mask = *match_data;
- else
- pg->client_mask = PMIC_GLINK_CLIENT_DEFAULT;
+ if (!match_data)
+ return -EINVAL;
+
+ pg->client_mask = *match_data;
+
+ pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
+ if (IS_ERR(pg->pdr)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr),
+ "failed to initialize pdr\n");
+ return ret;
+ }
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI)) {
ret = pmic_glink_add_aux_device(pg, &pg->ucsi_aux, "ucsi");
if (ret)
- return ret;
+ goto out_release_pdr_handle;
}
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE)) {
ret = pmic_glink_add_aux_device(pg, &pg->altmode_aux, "altmode");
@@ -284,17 +338,11 @@ static int pmic_glink_probe(struct platform_device *pdev)
goto out_release_altmode_aux;
}
- pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
- if (IS_ERR(pg->pdr)) {
- ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr), "failed to initialize pdr\n");
- goto out_release_aux_devices;
- }
-
service = pdr_add_lookup(pg->pdr, "tms/servreg", "msm/adsp/charger_pd");
if (IS_ERR(service)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(service),
"failed adding pdr lookup for charger_pd\n");
- goto out_release_pdr_handle;
+ goto out_release_aux_devices;
}
mutex_lock(&__pmic_glink_lock);
@@ -303,8 +351,6 @@ static int pmic_glink_probe(struct platform_device *pdev)
return 0;
-out_release_pdr_handle:
- pdr_handle_release(pg->pdr);
out_release_aux_devices:
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT))
pmic_glink_del_aux_device(pg, &pg->ps_aux);
@@ -314,11 +360,13 @@ out_release_altmode_aux:
out_release_ucsi_aux:
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI))
pmic_glink_del_aux_device(pg, &pg->ucsi_aux);
+out_release_pdr_handle:
+ pdr_handle_release(pg->pdr);
return ret;
}
-static int pmic_glink_remove(struct platform_device *pdev)
+static void pmic_glink_remove(struct platform_device *pdev)
{
struct pmic_glink *pg = dev_get_drvdata(&pdev->dev);
@@ -331,11 +379,8 @@ static int pmic_glink_remove(struct platform_device *pdev)
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI))
pmic_glink_del_aux_device(pg, &pg->ucsi_aux);
- mutex_lock(&__pmic_glink_lock);
+ guard(mutex)(&__pmic_glink_lock);
__pmic_glink = NULL;
- mutex_unlock(&__pmic_glink_lock);
-
- return 0;
}
static const unsigned long pmic_glink_sm8450_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) |
@@ -343,9 +388,7 @@ static const unsigned long pmic_glink_sm8450_client_mask = BIT(PMIC_GLINK_CLIENT
BIT(PMIC_GLINK_CLIENT_UCSI);
static const struct of_device_id pmic_glink_of_match[] = {
- { .compatible = "qcom,sm8450-pmic-glink", .data = &pmic_glink_sm8450_client_mask },
- { .compatible = "qcom,sm8550-pmic-glink", .data = &pmic_glink_sm8450_client_mask },
- { .compatible = "qcom,pmic-glink" },
+ { .compatible = "qcom,pmic-glink", .data = &pmic_glink_sm8450_client_mask },
{}
};
MODULE_DEVICE_TABLE(of, pmic_glink_of_match);
@@ -361,18 +404,27 @@ static struct platform_driver pmic_glink_driver = {
static int pmic_glink_init(void)
{
- platform_driver_register(&pmic_glink_driver);
- register_rpmsg_driver(&pmic_glink_rpmsg_driver);
+ int ret;
+
+ ret = platform_driver_register(&pmic_glink_driver);
+ if (ret < 0)
+ return ret;
+
+ ret = register_rpmsg_driver(&pmic_glink_rpmsg_driver);
+ if (ret < 0) {
+ platform_driver_unregister(&pmic_glink_driver);
+ return ret;
+ }
return 0;
-};
+}
module_init(pmic_glink_init);
static void pmic_glink_exit(void)
{
unregister_rpmsg_driver(&pmic_glink_rpmsg_driver);
platform_driver_unregister(&pmic_glink_driver);
-};
+}
module_exit(pmic_glink_exit);
MODULE_DESCRIPTION("Qualcomm PMIC GLINK driver");
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
index d05e0d6edf49..7f11acd33323 100644
--- a/drivers/soc/qcom/pmic_glink_altmode.c
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -5,13 +5,14 @@
*/
#include <linux/auxiliary_bus.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/soc/qcom/pdr.h>
-#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
#include <linux/usb/typec_altmode.h>
#include <linux/usb/typec_dp.h>
@@ -20,7 +21,7 @@
#include <linux/soc/qcom/pmic_glink.h>
-#define PMIC_GLINK_MAX_PORTS 2
+#define PMIC_GLINK_MAX_PORTS 3
#define USBC_SC8180X_NOTIFY_IND 0x13
#define USBC_CMD_WRITE_REQ 0x15
@@ -76,7 +77,7 @@ struct pmic_glink_altmode_port {
struct work_struct work;
- struct drm_bridge bridge;
+ struct auxiliary_device *bridge;
enum typec_orientation orientation;
u16 svid;
@@ -114,7 +115,7 @@ static int pmic_glink_altmode_request(struct pmic_glink_altmode *altmode, u32 cm
* The USBC_CMD_WRITE_REQ ack doesn't identify the request, so wait for
* one ack at a time.
*/
- mutex_lock(&altmode->lock);
+ guard(mutex)(&altmode->lock);
req.hdr.owner = cpu_to_le32(altmode->owner_id);
req.hdr.type = cpu_to_le32(PMIC_GLINK_REQ_RESP);
@@ -125,18 +126,16 @@ static int pmic_glink_altmode_request(struct pmic_glink_altmode *altmode, u32 cm
ret = pmic_glink_send(altmode->client, &req, sizeof(req));
if (ret) {
dev_err(altmode->dev, "failed to send altmode request: %#x (%d)\n", cmd, ret);
- goto out_unlock;
+ return ret;
}
left = wait_for_completion_timeout(&altmode->pan_ack, 5 * HZ);
if (!left) {
dev_err(altmode->dev, "timeout waiting for altmode request ack for: %#x\n", cmd);
- ret = -ETIMEDOUT;
+ return -ETIMEDOUT;
}
-out_unlock:
- mutex_unlock(&altmode->lock);
- return ret;
+ return 0;
}
static void pmic_glink_altmode_enable_dp(struct pmic_glink_altmode *altmode,
@@ -160,7 +159,7 @@ static void pmic_glink_altmode_enable_dp(struct pmic_glink_altmode *altmode,
ret = typec_mux_set(port->typec_mux, &port->state);
if (ret)
- dev_err(altmode->dev, "failed to switch mux to DP\n");
+ dev_err(altmode->dev, "failed to switch mux to DP: %d\n", ret);
port->retimer_state.alt = &port->dp_alt;
port->retimer_state.data = &dp_data;
@@ -168,7 +167,7 @@ static void pmic_glink_altmode_enable_dp(struct pmic_glink_altmode *altmode,
ret = typec_retimer_set(port->typec_retimer, &port->retimer_state);
if (ret)
- dev_err(altmode->dev, "failed to setup retimer to DP\n");
+ dev_err(altmode->dev, "failed to setup retimer to DP: %d\n", ret);
}
static void pmic_glink_altmode_enable_usb(struct pmic_glink_altmode *altmode,
@@ -182,7 +181,7 @@ static void pmic_glink_altmode_enable_usb(struct pmic_glink_altmode *altmode,
ret = typec_mux_set(port->typec_mux, &port->state);
if (ret)
- dev_err(altmode->dev, "failed to switch mux to USB\n");
+ dev_err(altmode->dev, "failed to switch mux to USB: %d\n", ret);
port->retimer_state.alt = NULL;
port->retimer_state.data = NULL;
@@ -190,7 +189,7 @@ static void pmic_glink_altmode_enable_usb(struct pmic_glink_altmode *altmode,
ret = typec_retimer_set(port->typec_retimer, &port->retimer_state);
if (ret)
- dev_err(altmode->dev, "failed to setup retimer to USB\n");
+ dev_err(altmode->dev, "failed to setup retimer to USB: %d\n", ret);
}
static void pmic_glink_altmode_safe(struct pmic_glink_altmode *altmode,
@@ -204,7 +203,7 @@ static void pmic_glink_altmode_safe(struct pmic_glink_altmode *altmode,
ret = typec_mux_set(port->typec_mux, &port->state);
if (ret)
- dev_err(altmode->dev, "failed to switch mux to safe mode\n");
+ dev_err(altmode->dev, "failed to switch mux to safe mode: %d\n", ret);
port->retimer_state.alt = NULL;
port->retimer_state.data = NULL;
@@ -212,31 +211,39 @@ static void pmic_glink_altmode_safe(struct pmic_glink_altmode *altmode,
ret = typec_retimer_set(port->typec_retimer, &port->retimer_state);
if (ret)
- dev_err(altmode->dev, "failed to setup retimer to USB\n");
+ dev_err(altmode->dev, "failed to setup retimer to USB: %d\n", ret);
}
static void pmic_glink_altmode_worker(struct work_struct *work)
{
struct pmic_glink_altmode_port *alt_port = work_to_altmode_port(work);
struct pmic_glink_altmode *altmode = alt_port->altmode;
+ enum drm_connector_status conn_status;
typec_switch_set(alt_port->typec_switch, alt_port->orientation);
- if (alt_port->svid == USB_TYPEC_DP_SID && alt_port->mode == 0xff)
- pmic_glink_altmode_safe(altmode, alt_port);
- else if (alt_port->svid == USB_TYPEC_DP_SID)
- pmic_glink_altmode_enable_dp(altmode, alt_port, alt_port->mode,
- alt_port->hpd_state, alt_port->hpd_irq);
- else
- pmic_glink_altmode_enable_usb(altmode, alt_port);
+ if (alt_port->svid == USB_TYPEC_DP_SID) {
+ if (alt_port->mode == 0xff) {
+ pmic_glink_altmode_safe(altmode, alt_port);
+ } else {
+ pmic_glink_altmode_enable_dp(altmode, alt_port,
+ alt_port->mode,
+ alt_port->hpd_state,
+ alt_port->hpd_irq);
+ }
- if (alt_port->hpd_state)
- drm_bridge_hpd_notify(&alt_port->bridge, connector_status_connected);
- else
- drm_bridge_hpd_notify(&alt_port->bridge, connector_status_disconnected);
+ if (alt_port->hpd_state)
+ conn_status = connector_status_connected;
+ else
+ conn_status = connector_status_disconnected;
+
+ drm_aux_hpd_bridge_notify(&alt_port->bridge->dev, conn_status);
+ } else {
+ pmic_glink_altmode_enable_usb(altmode, alt_port);
+ }
pmic_glink_altmode_request(altmode, ALTMODE_PAN_ACK, alt_port->index);
-};
+}
static enum typec_orientation pmic_glink_altmode_orientation(unsigned int orientation)
{
@@ -285,7 +292,7 @@ static void pmic_glink_altmode_sc8180xp_notify(struct pmic_glink_altmode *altmod
svid = mux == 2 ? USB_TYPEC_DP_SID : 0;
- if (!altmode->ports[port].altmode) {
+ if (port >= ARRAY_SIZE(altmode->ports) || !altmode->ports[port].altmode) {
dev_dbg(altmode->dev, "notification on undefined port %d\n", port);
return;
}
@@ -328,7 +335,7 @@ static void pmic_glink_altmode_sc8280xp_notify(struct pmic_glink_altmode *altmod
hpd_state = FIELD_GET(SC8280XP_HPD_STATE_MASK, notify->payload[8]);
hpd_irq = FIELD_GET(SC8280XP_HPD_IRQ_MASK, notify->payload[8]);
- if (!altmode->ports[port].altmode) {
+ if (port >= ARRAY_SIZE(altmode->ports) || !altmode->ports[port].altmode) {
dev_dbg(altmode->dev, "notification on undefined port %d\n", port);
return;
}
@@ -365,16 +372,6 @@ static void pmic_glink_altmode_callback(const void *data, size_t len, void *priv
}
}
-static int pmic_glink_altmode_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
-{
- return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
-}
-
-static const struct drm_bridge_funcs pmic_glink_altmode_bridge_funcs = {
- .attach = pmic_glink_altmode_attach,
-};
-
static void pmic_glink_altmode_put_retimer(void *data)
{
typec_retimer_put(data);
@@ -397,7 +394,7 @@ static void pmic_glink_altmode_enable_worker(struct work_struct *work)
ret = pmic_glink_altmode_request(altmode, ALTMODE_PAN_EN, 0);
if (ret)
- dev_err(altmode->dev, "failed to request altmode notifications\n");
+ dev_err(altmode->dev, "failed to request altmode notifications: %d\n", ret);
}
static void pmic_glink_altmode_pdr_notify(void *priv, int state)
@@ -444,6 +441,7 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
ret = fwnode_property_read_u32(fwnode, "reg", &port);
if (ret < 0) {
dev_err(dev, "missing reg property of %pOFn\n", fwnode);
+ fwnode_handle_put(fwnode);
return ret;
}
@@ -454,6 +452,7 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
if (altmode->ports[port].altmode) {
dev_err(dev, "multiple connector definition for port %u\n", port);
+ fwnode_handle_put(fwnode);
return -EINVAL;
}
@@ -462,59 +461,83 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
alt_port->index = port;
INIT_WORK(&alt_port->work, pmic_glink_altmode_worker);
- alt_port->bridge.funcs = &pmic_glink_altmode_bridge_funcs;
- alt_port->bridge.of_node = to_of_node(fwnode);
- alt_port->bridge.ops = DRM_BRIDGE_OP_HPD;
- alt_port->bridge.type = DRM_MODE_CONNECTOR_USB;
-
- ret = devm_drm_bridge_add(dev, &alt_port->bridge);
- if (ret)
- return ret;
+ alt_port->bridge = devm_drm_dp_hpd_bridge_alloc(dev, to_of_node(fwnode));
+ if (IS_ERR(alt_port->bridge)) {
+ fwnode_handle_put(fwnode);
+ return PTR_ERR(alt_port->bridge);
+ }
alt_port->dp_alt.svid = USB_TYPEC_DP_SID;
alt_port->dp_alt.mode = USB_TYPEC_DP_MODE;
alt_port->dp_alt.active = 1;
alt_port->typec_mux = fwnode_typec_mux_get(fwnode);
- if (IS_ERR(alt_port->typec_mux))
+ if (IS_ERR(alt_port->typec_mux)) {
+ fwnode_handle_put(fwnode);
return dev_err_probe(dev, PTR_ERR(alt_port->typec_mux),
"failed to acquire mode-switch for port: %d\n",
port);
+ }
ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_mux,
alt_port->typec_mux);
- if (ret)
+ if (ret) {
+ fwnode_handle_put(fwnode);
return ret;
+ }
alt_port->typec_retimer = fwnode_typec_retimer_get(fwnode);
- if (IS_ERR(alt_port->typec_retimer))
+ if (IS_ERR(alt_port->typec_retimer)) {
+ fwnode_handle_put(fwnode);
return dev_err_probe(dev, PTR_ERR(alt_port->typec_retimer),
"failed to acquire retimer-switch for port: %d\n",
port);
+ }
ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_retimer,
alt_port->typec_retimer);
- if (ret)
+ if (ret) {
+ fwnode_handle_put(fwnode);
return ret;
+ }
alt_port->typec_switch = fwnode_typec_switch_get(fwnode);
- if (IS_ERR(alt_port->typec_switch))
+ if (IS_ERR(alt_port->typec_switch)) {
+ fwnode_handle_put(fwnode);
return dev_err_probe(dev, PTR_ERR(alt_port->typec_switch),
"failed to acquire orientation-switch for port: %d\n",
port);
+ }
ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_switch,
alt_port->typec_switch);
+ if (ret) {
+ fwnode_handle_put(fwnode);
+ return ret;
+ }
+ }
+
+ for (port = 0; port < ARRAY_SIZE(altmode->ports); port++) {
+ alt_port = &altmode->ports[port];
+ if (!alt_port->bridge)
+ continue;
+
+ ret = devm_drm_dp_hpd_bridge_add(dev, alt_port->bridge);
if (ret)
return ret;
}
- altmode->client = devm_pmic_glink_register_client(dev,
- altmode->owner_id,
- pmic_glink_altmode_callback,
- pmic_glink_altmode_pdr_notify,
- altmode);
- return PTR_ERR_OR_ZERO(altmode->client);
+ altmode->client = devm_pmic_glink_client_alloc(dev,
+ altmode->owner_id,
+ pmic_glink_altmode_callback,
+ pmic_glink_altmode_pdr_notify,
+ altmode);
+ if (IS_ERR(altmode->client))
+ return PTR_ERR(altmode->client);
+
+ pmic_glink_client_register(altmode->client);
+
+ return 0;
}
static const struct auxiliary_device_id pmic_glink_altmode_id_table[] = {
diff --git a/drivers/soc/qcom/pmic_pdcharger_ulog.c b/drivers/soc/qcom/pmic_pdcharger_ulog.c
new file mode 100644
index 000000000000..39f412bbf2c1
--- /dev/null
+++ b/drivers/soc/qcom/pmic_pdcharger_ulog.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2022, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Ltd
+ */
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/pdr.h>
+#include <linux/debugfs.h>
+
+#define CREATE_TRACE_POINTS
+#include "pmic_pdcharger_ulog.h"
+
+#define MSG_OWNER_CHG_ULOG 32778
+#define MSG_TYPE_REQ_RESP 1
+
+#define GET_CHG_ULOG_REQ 0x18
+#define SET_CHG_ULOG_PROP_REQ 0x19
+
+#define LOG_DEFAULT_TIME_MS 1000
+
+#define MAX_ULOG_SIZE 8192
+
+struct pmic_pdcharger_ulog_hdr {
+ __le32 owner;
+ __le32 type;
+ __le32 opcode;
+};
+
+struct pmic_pdcharger_ulog {
+ struct rpmsg_device *rpdev;
+ struct delayed_work ulog_work;
+};
+
+struct get_ulog_req_msg {
+ struct pmic_pdcharger_ulog_hdr hdr;
+ u32 log_size;
+};
+
+struct get_ulog_resp_msg {
+ struct pmic_pdcharger_ulog_hdr hdr;
+ u8 buf[MAX_ULOG_SIZE];
+};
+
+static int pmic_pdcharger_ulog_write_async(struct pmic_pdcharger_ulog *pg, void *data, size_t len)
+{
+ return rpmsg_send(pg->rpdev->ept, data, len);
+}
+
+static int pmic_pdcharger_ulog_request(struct pmic_pdcharger_ulog *pg)
+{
+ struct get_ulog_req_msg req_msg = {
+ .hdr = {
+ .owner = cpu_to_le32(MSG_OWNER_CHG_ULOG),
+ .type = cpu_to_le32(MSG_TYPE_REQ_RESP),
+ .opcode = cpu_to_le32(GET_CHG_ULOG_REQ)
+ },
+ .log_size = MAX_ULOG_SIZE
+ };
+
+ return pmic_pdcharger_ulog_write_async(pg, &req_msg, sizeof(req_msg));
+}
+
+static void pmic_pdcharger_ulog_work(struct work_struct *work)
+{
+ struct pmic_pdcharger_ulog *pg = container_of(work, struct pmic_pdcharger_ulog,
+ ulog_work.work);
+ int rc;
+
+ rc = pmic_pdcharger_ulog_request(pg);
+ if (rc) {
+ dev_err(&pg->rpdev->dev, "Error requesting ulog, rc=%d\n", rc);
+ return;
+ }
+}
+
+static void pmic_pdcharger_ulog_handle_message(struct pmic_pdcharger_ulog *pg,
+ struct get_ulog_resp_msg *resp_msg,
+ size_t len)
+{
+ char *token, *buf = resp_msg->buf;
+
+ if (len != sizeof(*resp_msg)) {
+ dev_err(&pg->rpdev->dev, "Expected data length: %zu, received: %zu\n",
+ sizeof(*resp_msg), len);
+ return;
+ }
+
+ buf[MAX_ULOG_SIZE - 1] = '\0';
+
+ do {
+ token = strsep((char **)&buf, "\n");
+ if (token && strlen(token))
+ trace_pmic_pdcharger_ulog_msg(token);
+ } while (token);
+}
+
+static int pmic_pdcharger_ulog_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 addr)
+{
+ struct pmic_pdcharger_ulog *pg = dev_get_drvdata(&rpdev->dev);
+ struct pmic_pdcharger_ulog_hdr *hdr = data;
+ u32 opcode;
+
+ opcode = le32_to_cpu(hdr->opcode);
+
+ switch (opcode) {
+ case GET_CHG_ULOG_REQ:
+ schedule_delayed_work(&pg->ulog_work, msecs_to_jiffies(LOG_DEFAULT_TIME_MS));
+ pmic_pdcharger_ulog_handle_message(pg, data, len);
+ break;
+ default:
+ dev_err(&pg->rpdev->dev, "Unknown opcode %u\n", opcode);
+ break;
+ }
+
+ return 0;
+}
+
+static int pmic_pdcharger_ulog_rpmsg_probe(struct rpmsg_device *rpdev)
+{
+ struct pmic_pdcharger_ulog *pg;
+ struct device *dev = &rpdev->dev;
+
+ pg = devm_kzalloc(dev, sizeof(*pg), GFP_KERNEL);
+ if (!pg)
+ return -ENOMEM;
+
+ pg->rpdev = rpdev;
+ INIT_DELAYED_WORK(&pg->ulog_work, pmic_pdcharger_ulog_work);
+
+ dev_set_drvdata(dev, pg);
+
+ pmic_pdcharger_ulog_request(pg);
+
+ return 0;
+}
+
+static void pmic_pdcharger_ulog_rpmsg_remove(struct rpmsg_device *rpdev)
+{
+ struct pmic_pdcharger_ulog *pg = dev_get_drvdata(&rpdev->dev);
+
+ cancel_delayed_work_sync(&pg->ulog_work);
+}
+
+static const struct rpmsg_device_id pmic_pdcharger_ulog_rpmsg_id_match[] = {
+ { "PMIC_LOGS_ADSP_APPS" },
+ {}
+};
+/*
+ * No MODULE_DEVICE_TABLE intentionally: that's a debugging module, to be
+ * loaded manually only.
+ */
+
+static struct rpmsg_driver pmic_pdcharger_ulog_rpmsg_driver = {
+ .probe = pmic_pdcharger_ulog_rpmsg_probe,
+ .remove = pmic_pdcharger_ulog_rpmsg_remove,
+ .callback = pmic_pdcharger_ulog_rpmsg_callback,
+ .id_table = pmic_pdcharger_ulog_rpmsg_id_match,
+ .drv = {
+ .name = "qcom_pmic_pdcharger_ulog_rpmsg",
+ },
+};
+
+module_rpmsg_driver(pmic_pdcharger_ulog_rpmsg_driver);
+MODULE_DESCRIPTION("Qualcomm PMIC ChargerPD ULOG driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/pmic_pdcharger_ulog.h b/drivers/soc/qcom/pmic_pdcharger_ulog.h
new file mode 100644
index 000000000000..1cfa58f0e34c
--- /dev/null
+++ b/drivers/soc/qcom/pmic_pdcharger_ulog.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Linaro Ltd
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pmic_pdcharger_ulog
+
+#if !defined(_TRACE_PMIC_PDCHARGER_ULOG_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PMIC_PDCHARGER_ULOG_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(pmic_pdcharger_ulog_msg,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg),
+ TP_STRUCT__entry(
+ __string(msg, msg)
+ ),
+ TP_fast_assign(
+ __assign_str(msg);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+#endif /* _TRACE_PMIC_PDCHARGER_ULOG_H */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE pmic_pdcharger_ulog
+
+#include <trace/define_trace.h>
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index ba788762835f..cd1779b6a91a 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -1,11 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
/* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */
#define __DISABLE_TRACE_MMIO__
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -89,7 +94,6 @@
* @base: Base address of this instance of QUP wrapper core
* @clks: Handle to the primary & optional secondary AHB clocks
* @num_clks: Count of clocks
- * @to_core: Core ICC path
*/
struct geni_wrapper {
struct device *dev;
@@ -111,22 +115,94 @@ struct geni_se_desc {
static const char * const icc_path_names[] = {"qup-core", "qup-config",
"qup-memory"};
-#define QUP_HW_VER_REG 0x4
+static const char * const protocol_name[] = { "None", "SPI", "UART", "I2C", "I3C", "SPI SLAVE" };
+
+/**
+ * struct se_fw_hdr - Serial Engine firmware configuration header
+ *
+ * This structure defines the SE firmware header, which together with the
+ * firmware payload is stored in individual ELF segments.
+ *
+ * @magic: Set to 'SEFW'.
+ * @version: Structure version number.
+ * @core_version: QUPV3 hardware version.
+ * @serial_protocol: Encoded in GENI_FW_REVISION.
+ * @fw_version: Firmware version, from GENI_FW_REVISION.
+ * @cfg_version: Configuration version, from GENI_INIT_CFG_REVISION.
+ * @fw_size_in_items: Number of 32-bit words in GENI_FW_RAM.
+ * @fw_offset: Byte offset to GENI_FW_RAM array.
+ * @cfg_size_in_items: Number of GENI_FW_CFG index/value pairs.
+ * @cfg_idx_offset: Byte offset to GENI_FW_CFG index array.
+ * @cfg_val_offset: Byte offset to GENI_FW_CFG values array.
+ */
+struct se_fw_hdr {
+ __le32 magic;
+ __le32 version;
+ __le32 core_version;
+ __le16 serial_protocol;
+ __le16 fw_version;
+ __le16 cfg_version;
+ __le16 fw_size_in_items;
+ __le16 fw_offset;
+ __le16 cfg_size_in_items;
+ __le16 cfg_idx_offset;
+ __le16 cfg_val_offset;
+};
+
+/*Magic numbers*/
+#define SE_MAGIC_NUM 0x57464553
+
+#define MAX_GENI_CFG_RAMn_CNT 455
+
+#define MI_PBT_NON_PAGED_SEGMENT 0x0
+#define MI_PBT_HASH_SEGMENT 0x2
+#define MI_PBT_NOTUSED_SEGMENT 0x3
+#define MI_PBT_SHARED_SEGMENT 0x4
+
+#define MI_PBT_FLAG_PAGE_MODE BIT(20)
+#define MI_PBT_FLAG_SEGMENT_TYPE GENMASK(26, 24)
+#define MI_PBT_FLAG_ACCESS_TYPE GENMASK(23, 21)
+
+#define MI_PBT_PAGE_MODE_VALUE(x) FIELD_GET(MI_PBT_FLAG_PAGE_MODE, x)
+
+#define MI_PBT_SEGMENT_TYPE_VALUE(x) FIELD_GET(MI_PBT_FLAG_SEGMENT_TYPE, x)
+
+#define MI_PBT_ACCESS_TYPE_VALUE(x) FIELD_GET(MI_PBT_FLAG_ACCESS_TYPE, x)
+
+#define M_COMMON_GENI_M_IRQ_EN (GENMASK(6, 1) | \
+ M_IO_DATA_DEASSERT_EN | \
+ M_IO_DATA_ASSERT_EN | M_RX_FIFO_RD_ERR_EN | \
+ M_RX_FIFO_WR_ERR_EN | M_TX_FIFO_RD_ERR_EN | \
+ M_TX_FIFO_WR_ERR_EN)
+
+/* Common QUPV3 registers */
+#define QUPV3_HW_VER_REG 0x4
+#define QUPV3_SE_AHB_M_CFG 0x118
+#define QUPV3_COMMON_CFG 0x120
+#define QUPV3_COMMON_CGC_CTRL 0x21c
+
+/* QUPV3_COMMON_CFG fields */
+#define FAST_SWITCH_TO_HIGH_DISABLE BIT(0)
+
+/* QUPV3_SE_AHB_M_CFG fields */
+#define AHB_M_CLK_CGC_ON BIT(0)
+
+/* QUPV3_COMMON_CGC_CTRL fields */
+#define COMMON_CSR_SLV_CLK_CGC_ON BIT(0)
/* Common SE registers */
-#define GENI_INIT_CFG_REVISION 0x0
-#define GENI_S_INIT_CFG_REVISION 0x4
-#define GENI_OUTPUT_CTRL 0x24
-#define GENI_CGC_CTRL 0x28
-#define GENI_CLK_CTRL_RO 0x60
-#define GENI_FW_S_REVISION_RO 0x6c
+#define SE_GENI_INIT_CFG_REVISION 0x0
+#define SE_GENI_S_INIT_CFG_REVISION 0x4
+#define SE_GENI_CGC_CTRL 0x28
+#define SE_GENI_CLK_CTRL_RO 0x60
+#define SE_GENI_FW_S_REVISION_RO 0x6c
+#define SE_GENI_CFG_REG0 0x100
#define SE_GENI_BYTE_GRAN 0x254
#define SE_GENI_TX_PACKING_CFG0 0x260
#define SE_GENI_TX_PACKING_CFG1 0x264
#define SE_GENI_RX_PACKING_CFG0 0x284
#define SE_GENI_RX_PACKING_CFG1 0x288
-#define SE_GENI_M_GP_LENGTH 0x910
-#define SE_GENI_S_GP_LENGTH 0x914
+#define SE_GENI_S_IRQ_ENABLE 0x644
#define SE_DMA_TX_PTR_L 0xc30
#define SE_DMA_TX_PTR_H 0xc34
#define SE_DMA_TX_ATTR 0xc38
@@ -143,12 +219,20 @@ static const char * const icc_path_names[] = {"qup-core", "qup-config",
#define SE_DMA_RX_IRQ_EN 0xd48
#define SE_DMA_RX_IRQ_EN_SET 0xd4c
#define SE_DMA_RX_IRQ_EN_CLR 0xd50
-#define SE_DMA_RX_LEN_IN 0xd54
#define SE_DMA_RX_MAX_BURST 0xd5c
#define SE_DMA_RX_FLUSH 0xd60
#define SE_GSI_EVENT_EN 0xe18
#define SE_IRQ_EN 0xe1c
#define SE_DMA_GENERAL_CFG 0xe30
+#define SE_GENI_FW_REVISION 0x1000
+#define SE_GENI_S_FW_REVISION 0x1004
+#define SE_GENI_CFG_RAMN 0x1010
+#define SE_GENI_CLK_CTRL 0x2000
+#define SE_DMA_IF_EN 0x2004
+#define SE_FIFO_IF_DISABLE 0x2008
+
+/* GENI_FW_REVISION_RO fields */
+#define FW_REV_VERSION_MSK GENMASK(7, 0)
/* GENI_OUTPUT_CTRL fields */
#define DEFAULT_IO_OUTPUT_CTRL_MSK GENMASK(6, 0)
@@ -180,13 +264,22 @@ static const char * const icc_path_names[] = {"qup-core", "qup-config",
/* SE_DMA_GENERAL_CFG */
#define DMA_RX_CLK_CGC_ON BIT(0)
#define DMA_TX_CLK_CGC_ON BIT(1)
-#define DMA_AHB_SLV_CFG_ON BIT(2)
+#define DMA_AHB_SLV_CLK_CGC_ON BIT(2)
#define AHB_SEC_SLV_CLK_CGC_ON BIT(3)
#define DUMMY_RX_NON_BUFFERABLE BIT(4)
#define RX_DMA_ZERO_PADDING_EN BIT(5)
#define RX_DMA_IRQ_DELAY_MSK GENMASK(8, 6)
#define RX_DMA_IRQ_DELAY_SHFT 6
+/* GENI_CLK_CTRL fields */
+#define SER_CLK_SEL BIT(0)
+
+/* GENI_DMA_IF_EN fields */
+#define DMA_IF_EN BIT(0)
+
+#define geni_setbits32(_addr, _v) writel(readl(_addr) | (_v), _addr)
+#define geni_clrbits32(_addr, _v) writel(readl(_addr) & ~(_v), _addr)
+
/**
* geni_se_get_qup_hw_version() - Read the QUP wrapper Hardware version
* @se: Pointer to the corresponding serial engine.
@@ -197,9 +290,9 @@ u32 geni_se_get_qup_hw_version(struct geni_se *se)
{
struct geni_wrapper *wrapper = se->wrapper;
- return readl_relaxed(wrapper->base + QUP_HW_VER_REG);
+ return readl_relaxed(wrapper->base + QUPV3_HW_VER_REG);
}
-EXPORT_SYMBOL(geni_se_get_qup_hw_version);
+EXPORT_SYMBOL_GPL(geni_se_get_qup_hw_version);
static void geni_se_io_set_mode(void __iomem *base)
{
@@ -221,12 +314,12 @@ static void geni_se_io_init(void __iomem *base)
{
u32 val;
- val = readl_relaxed(base + GENI_CGC_CTRL);
+ val = readl_relaxed(base + SE_GENI_CGC_CTRL);
val |= DEFAULT_CGC_EN;
- writel_relaxed(val, base + GENI_CGC_CTRL);
+ writel_relaxed(val, base + SE_GENI_CGC_CTRL);
val = readl_relaxed(base + SE_DMA_GENERAL_CFG);
- val |= AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CFG_ON;
+ val |= AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CLK_CGC_ON;
val |= DMA_TX_CLK_CGC_ON | DMA_RX_CLK_CGC_ON;
writel_relaxed(val, base + SE_DMA_GENERAL_CFG);
@@ -272,7 +365,7 @@ void geni_se_init(struct geni_se *se, u32 rx_wm, u32 rx_rfr)
val |= S_COMMON_GENI_S_IRQ_EN;
writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN);
}
-EXPORT_SYMBOL(geni_se_init);
+EXPORT_SYMBOL_GPL(geni_se_init);
static void geni_se_select_fifo_mode(struct geni_se *se)
{
@@ -364,7 +457,7 @@ void geni_se_select_mode(struct geni_se *se, enum geni_se_xfer_mode mode)
break;
}
}
-EXPORT_SYMBOL(geni_se_select_mode);
+EXPORT_SYMBOL_GPL(geni_se_select_mode);
/**
* DOC: Overview
@@ -481,7 +574,7 @@ void geni_se_config_packing(struct geni_se *se, int bpw, int pack_words,
if (pack_words || bpw == 32)
writel_relaxed(bpw / 16, se->base + SE_GENI_BYTE_GRAN);
}
-EXPORT_SYMBOL(geni_se_config_packing);
+EXPORT_SYMBOL_GPL(geni_se_config_packing);
static void geni_se_clks_off(struct geni_se *se)
{
@@ -512,7 +605,7 @@ int geni_se_resources_off(struct geni_se *se)
geni_se_clks_off(se);
return 0;
}
-EXPORT_SYMBOL(geni_se_resources_off);
+EXPORT_SYMBOL_GPL(geni_se_resources_off);
static int geni_se_clks_on(struct geni_se *se)
{
@@ -553,7 +646,7 @@ int geni_se_resources_on(struct geni_se *se)
return ret;
}
-EXPORT_SYMBOL(geni_se_resources_on);
+EXPORT_SYMBOL_GPL(geni_se_resources_on);
/**
* geni_se_clk_tbl_get() - Get the clock table to program DFS
@@ -586,7 +679,8 @@ int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl)
for (i = 0; i < MAX_CLK_PERF_LEVEL; i++) {
freq = clk_round_rate(se->clk, freq + 1);
- if (freq <= 0 || freq == se->clk_perf_tbl[i - 1])
+ if (freq <= 0 ||
+ (i > 0 && freq == se->clk_perf_tbl[i - 1]))
break;
se->clk_perf_tbl[i] = freq;
}
@@ -594,7 +688,7 @@ int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl)
*tbl = se->clk_perf_tbl;
return se->num_clk_levels;
}
-EXPORT_SYMBOL(geni_se_clk_tbl_get);
+EXPORT_SYMBOL_GPL(geni_se_clk_tbl_get);
/**
* geni_se_clk_freq_match() - Get the matching or closest SE clock frequency
@@ -656,11 +750,14 @@ int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq,
return 0;
}
-EXPORT_SYMBOL(geni_se_clk_freq_match);
+EXPORT_SYMBOL_GPL(geni_se_clk_freq_match);
+
+#define GENI_SE_DMA_DONE_EN BIT(0)
+#define GENI_SE_DMA_EOT_EN BIT(1)
+#define GENI_SE_DMA_AHB_ERR_EN BIT(2)
+#define GENI_SE_DMA_RESET_DONE_EN BIT(3)
+#define GENI_SE_DMA_FLUSH_DONE BIT(4)
-#define GENI_SE_DMA_DONE_EN BIT(0)
-#define GENI_SE_DMA_EOT_EN BIT(1)
-#define GENI_SE_DMA_AHB_ERR_EN BIT(2)
#define GENI_SE_DMA_EOT_BUF BIT(0)
/**
@@ -684,7 +781,7 @@ void geni_se_tx_init_dma(struct geni_se *se, dma_addr_t iova, size_t len)
writel_relaxed(GENI_SE_DMA_EOT_BUF, se->base + SE_DMA_TX_ATTR);
writel(len, se->base + SE_DMA_TX_LEN);
}
-EXPORT_SYMBOL(geni_se_tx_init_dma);
+EXPORT_SYMBOL_GPL(geni_se_tx_init_dma);
/**
* geni_se_tx_dma_prep() - Prepare the serial engine for TX DMA transfer
@@ -712,7 +809,7 @@ int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
geni_se_tx_init_dma(se, *iova, len);
return 0;
}
-EXPORT_SYMBOL(geni_se_tx_dma_prep);
+EXPORT_SYMBOL_GPL(geni_se_tx_dma_prep);
/**
* geni_se_rx_init_dma() - Initiate RX DMA transfer on the serial engine
@@ -736,7 +833,7 @@ void geni_se_rx_init_dma(struct geni_se *se, dma_addr_t iova, size_t len)
writel_relaxed(0, se->base + SE_DMA_RX_ATTR);
writel(len, se->base + SE_DMA_RX_LEN);
}
-EXPORT_SYMBOL(geni_se_rx_init_dma);
+EXPORT_SYMBOL_GPL(geni_se_rx_init_dma);
/**
* geni_se_rx_dma_prep() - Prepare the serial engine for RX DMA transfer
@@ -764,7 +861,7 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
geni_se_rx_init_dma(se, *iova, len);
return 0;
}
-EXPORT_SYMBOL(geni_se_rx_dma_prep);
+EXPORT_SYMBOL_GPL(geni_se_rx_dma_prep);
/**
* geni_se_tx_dma_unprep() - Unprepare the serial engine after TX DMA transfer
@@ -781,7 +878,7 @@ void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
if (!dma_mapping_error(wrapper->dev, iova))
dma_unmap_single(wrapper->dev, iova, len, DMA_TO_DEVICE);
}
-EXPORT_SYMBOL(geni_se_tx_dma_unprep);
+EXPORT_SYMBOL_GPL(geni_se_tx_dma_unprep);
/**
* geni_se_rx_dma_unprep() - Unprepare the serial engine after RX DMA transfer
@@ -798,7 +895,7 @@ void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
if (!dma_mapping_error(wrapper->dev, iova))
dma_unmap_single(wrapper->dev, iova, len, DMA_FROM_DEVICE);
}
-EXPORT_SYMBOL(geni_se_rx_dma_unprep);
+EXPORT_SYMBOL_GPL(geni_se_rx_dma_unprep);
int geni_icc_get(struct geni_se *se, const char *icc_ddr)
{
@@ -827,7 +924,7 @@ err:
return err;
}
-EXPORT_SYMBOL(geni_icc_get);
+EXPORT_SYMBOL_GPL(geni_icc_get);
int geni_icc_set_bw(struct geni_se *se)
{
@@ -845,7 +942,7 @@ int geni_icc_set_bw(struct geni_se *se)
return 0;
}
-EXPORT_SYMBOL(geni_icc_set_bw);
+EXPORT_SYMBOL_GPL(geni_icc_set_bw);
void geni_icc_set_tag(struct geni_se *se, u32 tag)
{
@@ -854,7 +951,7 @@ void geni_icc_set_tag(struct geni_se *se, u32 tag)
for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++)
icc_set_tag(se->icc_paths[i].path, tag);
}
-EXPORT_SYMBOL(geni_icc_set_tag);
+EXPORT_SYMBOL_GPL(geni_icc_set_tag);
/* To do: Replace this by icc_bulk_enable once it's implemented in ICC core */
int geni_icc_enable(struct geni_se *se)
@@ -872,7 +969,7 @@ int geni_icc_enable(struct geni_se *se)
return 0;
}
-EXPORT_SYMBOL(geni_icc_enable);
+EXPORT_SYMBOL_GPL(geni_icc_enable);
int geni_icc_disable(struct geni_se *se)
{
@@ -889,12 +986,384 @@ int geni_icc_disable(struct geni_se *se)
return 0;
}
-EXPORT_SYMBOL(geni_icc_disable);
+EXPORT_SYMBOL_GPL(geni_icc_disable);
+
+/**
+ * geni_find_protocol_fw() - Locate and validate SE firmware for a protocol.
+ * @dev: Pointer to the device structure.
+ * @fw: Pointer to the firmware image.
+ * @protocol: Expected serial engine protocol type.
+ *
+ * Identifies the appropriate firmware image or configuration required for a
+ * specific communication protocol instance running on a Qualcomm GENI
+ * controller.
+ *
+ * Return: pointer to a valid 'struct se_fw_hdr' if found, or NULL otherwise.
+ */
+static struct se_fw_hdr *geni_find_protocol_fw(struct device *dev, const struct firmware *fw,
+ enum geni_se_protocol_type protocol)
+{
+ const struct elf32_hdr *ehdr;
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ struct se_fw_hdr *sefw;
+ u32 fw_end, cfg_idx_end, cfg_val_end;
+ u16 fw_size;
+ int i;
+
+ if (!fw || fw->size < sizeof(struct elf32_hdr))
+ return NULL;
+
+ ehdr = (const struct elf32_hdr *)fw->data;
+ phdrs = (const struct elf32_phdr *)(fw->data + ehdr->e_phoff);
+
+ /*
+ * The firmware is expected to have at least two program headers (segments).
+ * One for metadata and the other for the actual protocol-specific firmware.
+ */
+ if (ehdr->e_phnum < 2) {
+ dev_err(dev, "Invalid firmware: less than 2 program headers\n");
+ return NULL;
+ }
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (fw->size < phdr->p_offset + phdr->p_filesz) {
+ dev_err(dev, "Firmware size (%zu) < expected offset (%u) + size (%u)\n",
+ fw->size, phdr->p_offset, phdr->p_filesz);
+ return NULL;
+ }
+
+ if (phdr->p_type != PT_LOAD || !phdr->p_memsz)
+ continue;
+
+ if (MI_PBT_PAGE_MODE_VALUE(phdr->p_flags) != MI_PBT_NON_PAGED_SEGMENT ||
+ MI_PBT_SEGMENT_TYPE_VALUE(phdr->p_flags) == MI_PBT_HASH_SEGMENT ||
+ MI_PBT_ACCESS_TYPE_VALUE(phdr->p_flags) == MI_PBT_NOTUSED_SEGMENT ||
+ MI_PBT_ACCESS_TYPE_VALUE(phdr->p_flags) == MI_PBT_SHARED_SEGMENT)
+ continue;
+
+ if (phdr->p_filesz < sizeof(struct se_fw_hdr))
+ continue;
+
+ sefw = (struct se_fw_hdr *)(fw->data + phdr->p_offset);
+ fw_size = le16_to_cpu(sefw->fw_size_in_items);
+ fw_end = le16_to_cpu(sefw->fw_offset) + fw_size * sizeof(u32);
+ cfg_idx_end = le16_to_cpu(sefw->cfg_idx_offset) +
+ le16_to_cpu(sefw->cfg_size_in_items) * sizeof(u8);
+ cfg_val_end = le16_to_cpu(sefw->cfg_val_offset) +
+ le16_to_cpu(sefw->cfg_size_in_items) * sizeof(u32);
+
+ if (le32_to_cpu(sefw->magic) != SE_MAGIC_NUM || le32_to_cpu(sefw->version) != 1)
+ continue;
+
+ if (le32_to_cpu(sefw->serial_protocol) != protocol)
+ continue;
+
+ if (fw_size % 2 != 0) {
+ fw_size++;
+ sefw->fw_size_in_items = cpu_to_le16(fw_size);
+ }
+
+ if (fw_size >= MAX_GENI_CFG_RAMn_CNT) {
+ dev_err(dev,
+ "Firmware size (%u) exceeds max allowed RAMn count (%u)\n",
+ fw_size, MAX_GENI_CFG_RAMn_CNT);
+ continue;
+ }
+
+ if (fw_end > phdr->p_filesz || cfg_idx_end > phdr->p_filesz ||
+ cfg_val_end > phdr->p_filesz) {
+ dev_err(dev, "Truncated or corrupt SE FW segment found at index %d\n", i);
+ continue;
+ }
+
+ return sefw;
+ }
+
+ dev_err(dev, "Failed to get %s protocol firmware\n", protocol_name[protocol]);
+ return NULL;
+}
+
+/**
+ * geni_configure_xfer_mode() - Set the transfer mode.
+ * @se: Pointer to the concerned serial engine.
+ * @mode: SE data transfer mode.
+ *
+ * Set the transfer mode to either FIFO or DMA according to the mode specified
+ * by the protocol driver.
+ *
+ * Return: 0 if successful, otherwise return an error value.
+ */
+static int geni_configure_xfer_mode(struct geni_se *se, enum geni_se_xfer_mode mode)
+{
+ /* Configure SE FIFO, DMA or GSI mode. */
+ switch (mode) {
+ case GENI_GPI_DMA:
+ geni_setbits32(se->base + SE_GENI_DMA_MODE_EN, GENI_DMA_MODE_EN);
+ writel(0x0, se->base + SE_IRQ_EN);
+ writel(DMA_RX_EVENT_EN | DMA_TX_EVENT_EN | GENI_M_EVENT_EN | GENI_S_EVENT_EN,
+ se->base + SE_GSI_EVENT_EN);
+ break;
+
+ case GENI_SE_FIFO:
+ geni_clrbits32(se->base + SE_GENI_DMA_MODE_EN, GENI_DMA_MODE_EN);
+ writel(DMA_RX_IRQ_EN | DMA_TX_IRQ_EN | GENI_M_IRQ_EN | GENI_S_IRQ_EN,
+ se->base + SE_IRQ_EN);
+ writel(0x0, se->base + SE_GSI_EVENT_EN);
+ break;
+
+ case GENI_SE_DMA:
+ geni_setbits32(se->base + SE_GENI_DMA_MODE_EN, GENI_DMA_MODE_EN);
+ writel(DMA_RX_IRQ_EN | DMA_TX_IRQ_EN | GENI_M_IRQ_EN | GENI_S_IRQ_EN,
+ se->base + SE_IRQ_EN);
+ writel(0x0, se->base + SE_GSI_EVENT_EN);
+ break;
+
+ default:
+ dev_err(se->dev, "Invalid geni-se transfer mode: %d\n", mode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * geni_enable_interrupts() - Enable interrupts.
+ * @se: Pointer to the concerned serial engine.
+ *
+ * Enable the required interrupts during the firmware load process.
+ */
+static void geni_enable_interrupts(struct geni_se *se)
+{
+ u32 val;
+
+ /* Enable required interrupts. */
+ writel(M_COMMON_GENI_M_IRQ_EN, se->base + SE_GENI_M_IRQ_EN);
+
+ val = S_CMD_OVERRUN_EN | S_ILLEGAL_CMD_EN | S_CMD_CANCEL_EN | S_CMD_ABORT_EN |
+ S_GP_IRQ_0_EN | S_GP_IRQ_1_EN | S_GP_IRQ_2_EN | S_GP_IRQ_3_EN |
+ S_RX_FIFO_WR_ERR_EN | S_RX_FIFO_RD_ERR_EN;
+ writel(val, se->base + SE_GENI_S_IRQ_ENABLE);
+
+ /* DMA mode configuration. */
+ val = GENI_SE_DMA_RESET_DONE_EN | GENI_SE_DMA_AHB_ERR_EN | GENI_SE_DMA_DONE_EN;
+ writel(val, se->base + SE_DMA_TX_IRQ_EN_SET);
+ val = GENI_SE_DMA_FLUSH_DONE | GENI_SE_DMA_RESET_DONE_EN | GENI_SE_DMA_AHB_ERR_EN |
+ GENI_SE_DMA_DONE_EN;
+ writel(val, se->base + SE_DMA_RX_IRQ_EN_SET);
+}
+
+/**
+ * geni_write_fw_revision() - Write the firmware revision.
+ * @se: Pointer to the concerned serial engine.
+ * @serial_protocol: serial protocol type.
+ * @fw_version: QUP firmware version.
+ *
+ * Write the firmware revision and protocol into the respective register.
+ */
+static void geni_write_fw_revision(struct geni_se *se, u16 serial_protocol, u16 fw_version)
+{
+ u32 reg;
+
+ reg = FIELD_PREP(FW_REV_PROTOCOL_MSK, serial_protocol);
+ reg |= FIELD_PREP(FW_REV_VERSION_MSK, fw_version);
+
+ writel(reg, se->base + SE_GENI_FW_REVISION);
+ writel(reg, se->base + SE_GENI_S_FW_REVISION);
+}
+
+/**
+ * geni_load_se_fw() - Load Serial Engine specific firmware.
+ * @se: Pointer to the concerned serial engine.
+ * @fw: Pointer to the firmware structure.
+ * @mode: SE data transfer mode.
+ * @protocol: Protocol type to be used with the SE (e.g., UART, SPI, I2C).
+ *
+ * Load the protocol firmware into the IRAM of the Serial Engine.
+ *
+ * Return: 0 if successful, otherwise return an error value.
+ */
+static int geni_load_se_fw(struct geni_se *se, const struct firmware *fw,
+ enum geni_se_xfer_mode mode, enum geni_se_protocol_type protocol)
+{
+ const u32 *fw_data, *cfg_val_arr;
+ const u8 *cfg_idx_arr;
+ u32 i, reg_value;
+ int ret;
+ struct se_fw_hdr *hdr;
+
+ hdr = geni_find_protocol_fw(se->dev, fw, protocol);
+ if (!hdr)
+ return -EINVAL;
+
+ fw_data = (const u32 *)((u8 *)hdr + le16_to_cpu(hdr->fw_offset));
+ cfg_idx_arr = (const u8 *)hdr + le16_to_cpu(hdr->cfg_idx_offset);
+ cfg_val_arr = (const u32 *)((u8 *)hdr + le16_to_cpu(hdr->cfg_val_offset));
+
+ ret = geni_icc_set_bw(se);
+ if (ret)
+ return ret;
+
+ ret = geni_icc_enable(se);
+ if (ret)
+ return ret;
+
+ ret = geni_se_resources_on(se);
+ if (ret)
+ goto out_icc_disable;
+
+ /*
+ * Disable high-priority interrupts until all currently executing
+ * low-priority interrupts have been fully handled.
+ */
+ geni_setbits32(se->wrapper->base + QUPV3_COMMON_CFG, FAST_SWITCH_TO_HIGH_DISABLE);
+
+ /* Set AHB_M_CLK_CGC_ON to indicate hardware controls se-wrapper cgc clock. */
+ geni_setbits32(se->wrapper->base + QUPV3_SE_AHB_M_CFG, AHB_M_CLK_CGC_ON);
+
+ /* Let hardware to control common cgc. */
+ geni_setbits32(se->wrapper->base + QUPV3_COMMON_CGC_CTRL, COMMON_CSR_SLV_CLK_CGC_ON);
+
+ /*
+ * Setting individual bits in GENI_OUTPUT_CTRL activates corresponding output lines,
+ * allowing the hardware to drive data as configured.
+ */
+ writel(0x0, se->base + GENI_OUTPUT_CTRL);
+
+ /* Set SCLK and HCLK to program RAM */
+ geni_setbits32(se->base + SE_GENI_CGC_CTRL, PROG_RAM_SCLK_OFF | PROG_RAM_HCLK_OFF);
+ writel(0x0, se->base + SE_GENI_CLK_CTRL);
+ geni_clrbits32(se->base + SE_GENI_CGC_CTRL, PROG_RAM_SCLK_OFF | PROG_RAM_HCLK_OFF);
+
+ /* Enable required clocks for DMA CSR, TX and RX. */
+ reg_value = AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CLK_CGC_ON |
+ DMA_TX_CLK_CGC_ON | DMA_RX_CLK_CGC_ON;
+ geni_setbits32(se->base + SE_DMA_GENERAL_CFG, reg_value);
+
+ /* Let hardware control CGC by default. */
+ writel(DEFAULT_CGC_EN, se->base + SE_GENI_CGC_CTRL);
+
+ /* Set version of the configuration register part of firmware. */
+ writel(le16_to_cpu(hdr->cfg_version), se->base + SE_GENI_INIT_CFG_REVISION);
+ writel(le16_to_cpu(hdr->cfg_version), se->base + SE_GENI_S_INIT_CFG_REVISION);
+
+ /* Configure GENI primitive table. */
+ for (i = 0; i < le16_to_cpu(hdr->cfg_size_in_items); i++)
+ writel(cfg_val_arr[i],
+ se->base + SE_GENI_CFG_REG0 + (cfg_idx_arr[i] * sizeof(u32)));
+
+ /* Configure condition for assertion of RX_RFR_WATERMARK condition. */
+ reg_value = geni_se_get_rx_fifo_depth(se);
+ writel(reg_value - 2, se->base + SE_GENI_RX_RFR_WATERMARK_REG);
+
+ /* Let hardware control CGC */
+ geni_setbits32(se->base + GENI_OUTPUT_CTRL, DEFAULT_IO_OUTPUT_CTRL_MSK);
+
+ ret = geni_configure_xfer_mode(se, mode);
+ if (ret)
+ goto out_resources_off;
+
+ geni_enable_interrupts(se);
+
+ geni_write_fw_revision(se, le16_to_cpu(hdr->serial_protocol), le16_to_cpu(hdr->fw_version));
+
+ /* Program RAM address space. */
+ memcpy_toio(se->base + SE_GENI_CFG_RAMN, fw_data,
+ le16_to_cpu(hdr->fw_size_in_items) * sizeof(u32));
+
+ /* Put default values on GENI's output pads. */
+ writel_relaxed(0x1, se->base + GENI_FORCE_DEFAULT_REG);
+
+ /* Toggle SCLK/HCLK from high to low to finalize RAM programming and apply config. */
+ geni_setbits32(se->base + SE_GENI_CGC_CTRL, PROG_RAM_SCLK_OFF | PROG_RAM_HCLK_OFF);
+ geni_setbits32(se->base + SE_GENI_CLK_CTRL, SER_CLK_SEL);
+ geni_clrbits32(se->base + SE_GENI_CGC_CTRL, PROG_RAM_SCLK_OFF | PROG_RAM_HCLK_OFF);
+
+ /* Serial engine DMA interface is enabled. */
+ geni_setbits32(se->base + SE_DMA_IF_EN, DMA_IF_EN);
+
+ /* Enable or disable FIFO interface of the serial engine. */
+ if (mode == GENI_SE_FIFO)
+ geni_clrbits32(se->base + SE_FIFO_IF_DISABLE, FIFO_IF_DISABLE);
+ else
+ geni_setbits32(se->base + SE_FIFO_IF_DISABLE, FIFO_IF_DISABLE);
+
+out_resources_off:
+ geni_se_resources_off(se);
+
+out_icc_disable:
+ geni_icc_disable(se);
+ return ret;
+}
+
+/**
+ * geni_load_se_firmware() - Load firmware for SE based on protocol
+ * @se: Pointer to the concerned serial engine.
+ * @protocol: Protocol type to be used with the SE (e.g., UART, SPI, I2C).
+ *
+ * Retrieves the firmware name from device properties and sets the transfer mode
+ * (FIFO or GSI DMA) based on device tree configuration. Enforces FIFO mode for
+ * UART protocol due to lack of GSI DMA support. Requests the firmware and loads
+ * it into the SE.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int geni_load_se_firmware(struct geni_se *se, enum geni_se_protocol_type protocol)
+{
+ const char *fw_name;
+ const struct firmware *fw;
+ enum geni_se_xfer_mode mode = GENI_SE_FIFO;
+ int ret;
+
+ if (protocol >= ARRAY_SIZE(protocol_name)) {
+ dev_err(se->dev, "Invalid geni-se protocol: %d", protocol);
+ return -EINVAL;
+ }
+
+ ret = device_property_read_string(se->wrapper->dev, "firmware-name", &fw_name);
+ if (ret) {
+ dev_err(se->dev, "Failed to read firmware-name property: %d\n", ret);
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(se->dev->of_node, "qcom,enable-gsi-dma"))
+ mode = GENI_GPI_DMA;
+
+ /* GSI mode is not supported by the UART driver; therefore, setting FIFO mode */
+ if (protocol == GENI_SE_UART)
+ mode = GENI_SE_FIFO;
+
+ ret = request_firmware(&fw, fw_name, se->dev);
+ if (ret) {
+ if (ret == -ENOENT)
+ return -EPROBE_DEFER;
+
+ dev_err(se->dev, "Failed to request firmware '%s' for protocol %d: ret: %d\n",
+ fw_name, protocol, ret);
+ return ret;
+ }
+
+ ret = geni_load_se_fw(se, fw, mode, protocol);
+ release_firmware(fw);
+
+ if (ret) {
+ dev_err(se->dev, "Failed to load SE firmware for protocol %d: ret: %d\n",
+ protocol, ret);
+ return ret;
+ }
+
+ dev_dbg(se->dev, "Firmware load for %s protocol is successful for xfer mode: %d\n",
+ protocol_name[protocol], mode);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(geni_load_se_firmware);
static int geni_se_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct geni_wrapper *wrapper;
+ const struct geni_se_desc *desc;
int ret;
wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL);
@@ -906,13 +1375,10 @@ static int geni_se_probe(struct platform_device *pdev)
if (IS_ERR(wrapper->base))
return PTR_ERR(wrapper->base);
- if (!has_acpi_companion(&pdev->dev)) {
- const struct geni_se_desc *desc;
- int i;
+ desc = device_get_match_data(&pdev->dev);
- desc = device_get_match_data(&pdev->dev);
- if (!desc)
- return -EINVAL;
+ if (!has_acpi_companion(&pdev->dev) && desc->num_clks) {
+ int i;
wrapper->num_clks = min_t(unsigned int, desc->num_clks, MAX_CLKS);
@@ -953,6 +1419,8 @@ static const struct geni_se_desc qup_desc = {
.num_clks = ARRAY_SIZE(qup_clks),
};
+static const struct geni_se_desc sa8255p_qup_desc = {};
+
static const char * const i2c_master_hub_clks[] = {
"s-ahb",
};
@@ -965,6 +1433,7 @@ static const struct geni_se_desc i2c_master_hub_desc = {
static const struct of_device_id geni_se_dt_match[] = {
{ .compatible = "qcom,geni-se-qup", .data = &qup_desc },
{ .compatible = "qcom,geni-se-i2c-master-hub", .data = &i2c_master_hub_desc },
+ { .compatible = "qcom,sa8255p-geni-se-qup", .data = &sa8255p_qup_desc },
{}
};
MODULE_DEVICE_TABLE(of, geni_se_dt_match);
diff --git a/drivers/soc/qcom/qcom-pbs.c b/drivers/soc/qcom/qcom-pbs.c
new file mode 100644
index 000000000000..06b4a596e275
--- /dev/null
+++ b/drivers/soc/qcom/qcom-pbs.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/spmi.h>
+#include <linux/soc/qcom/qcom-pbs.h>
+
+#define PBS_CLIENT_TRIG_CTL 0x42
+#define PBS_CLIENT_SW_TRIG_BIT BIT(7)
+#define PBS_CLIENT_SCRATCH1 0x50
+#define PBS_CLIENT_SCRATCH2 0x51
+#define PBS_CLIENT_SCRATCH2_ERROR 0xFF
+
+#define RETRIES 2000
+#define DELAY 1100
+
+struct pbs_dev {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock;
+ struct device_link *link;
+
+ u32 base;
+};
+
+static int qcom_pbs_wait_for_ack(struct pbs_dev *pbs, u8 bit_pos)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read_poll_timeout(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2,
+ val, val & BIT(bit_pos), DELAY, DELAY * RETRIES);
+
+ if (ret < 0) {
+ dev_err(pbs->dev, "Timeout for PBS ACK/NACK for bit %u\n", bit_pos);
+ return -ETIMEDOUT;
+ }
+
+ if (val == PBS_CLIENT_SCRATCH2_ERROR) {
+ ret = regmap_write(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2, 0);
+ dev_err(pbs->dev, "NACK from PBS for bit %u\n", bit_pos);
+ return -EINVAL;
+ }
+
+ dev_dbg(pbs->dev, "PBS sequence for bit %u executed!\n", bit_pos);
+ return 0;
+}
+
+/**
+ * qcom_pbs_trigger_event() - Trigger the PBS RAM sequence
+ * @pbs: Pointer to PBS device
+ * @bitmap: bitmap
+ *
+ * This function is used to trigger the PBS RAM sequence to be
+ * executed by the client driver.
+ *
+ * The PBS trigger sequence involves
+ * 1. setting the PBS sequence bit in PBS_CLIENT_SCRATCH1
+ * 2. Initiating the SW PBS trigger
+ * 3. Checking the equivalent bit in PBS_CLIENT_SCRATCH2 for the
+ * completion of the sequence.
+ * 4. If PBS_CLIENT_SCRATCH2 == 0xFF, the PBS sequence failed to execute
+ *
+ * Return: 0 on success, < 0 on failure
+ */
+int qcom_pbs_trigger_event(struct pbs_dev *pbs, u8 bitmap)
+{
+ unsigned int val;
+ u16 bit_pos;
+ int ret;
+
+ if (WARN_ON(!bitmap))
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(pbs))
+ return -EINVAL;
+
+ guard(mutex)(&pbs->lock);
+ ret = regmap_read(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val == PBS_CLIENT_SCRATCH2_ERROR) {
+ /* PBS error - clear SCRATCH2 register */
+ ret = regmap_write(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (bit_pos = 0; bit_pos < 8; bit_pos++) {
+ if (!(bitmap & BIT(bit_pos)))
+ continue;
+
+ /* Clear the PBS sequence bit position */
+ ret = regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2,
+ BIT(bit_pos), 0);
+ if (ret < 0)
+ break;
+
+ /* Set the PBS sequence bit position */
+ ret = regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH1,
+ BIT(bit_pos), BIT(bit_pos));
+ if (ret < 0)
+ break;
+
+ /* Initiate the SW trigger */
+ ret = regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_TRIG_CTL,
+ PBS_CLIENT_SW_TRIG_BIT, PBS_CLIENT_SW_TRIG_BIT);
+ if (ret < 0)
+ break;
+
+ ret = qcom_pbs_wait_for_ack(pbs, bit_pos);
+ if (ret < 0)
+ break;
+
+ /* Clear the PBS sequence bit position */
+ regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH1, BIT(bit_pos), 0);
+ regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2, BIT(bit_pos), 0);
+ }
+
+ /* Clear all the requested bitmap */
+ return regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH1, bitmap, 0);
+}
+EXPORT_SYMBOL_GPL(qcom_pbs_trigger_event);
+
+/**
+ * get_pbs_client_device() - Get the PBS device used by client
+ * @dev: Client device
+ *
+ * This function is used to get the PBS device that is being
+ * used by the client.
+ *
+ * Return: pbs_dev on success, ERR_PTR on failure
+ */
+struct pbs_dev *get_pbs_client_device(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct pbs_dev *pbs;
+
+ struct device_node *pbs_dev_node __free(device_node) = of_parse_phandle(dev->of_node,
+ "qcom,pbs", 0);
+ if (!pbs_dev_node) {
+ dev_err(dev, "Missing qcom,pbs property\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdev = of_find_device_by_node(pbs_dev_node);
+ if (!pdev) {
+ dev_err(dev, "Unable to find PBS dev_node\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ pbs = platform_get_drvdata(pdev);
+ if (!pbs) {
+ dev_err(dev, "Cannot get pbs instance from %s\n", dev_name(&pdev->dev));
+ platform_device_put(pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ pbs->link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
+ if (!pbs->link) {
+ dev_err(&pdev->dev, "Failed to create device link to consumer %s\n", dev_name(dev));
+ platform_device_put(pdev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ platform_device_put(pdev);
+
+ return pbs;
+}
+EXPORT_SYMBOL_GPL(get_pbs_client_device);
+
+static int qcom_pbs_probe(struct platform_device *pdev)
+{
+ struct pbs_dev *pbs;
+ u32 val;
+ int ret;
+
+ pbs = devm_kzalloc(&pdev->dev, sizeof(*pbs), GFP_KERNEL);
+ if (!pbs)
+ return -ENOMEM;
+
+ pbs->dev = &pdev->dev;
+ pbs->regmap = dev_get_regmap(pbs->dev->parent, NULL);
+ if (!pbs->regmap) {
+ dev_err(pbs->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ ret = device_property_read_u32(pbs->dev, "reg", &val);
+ if (ret < 0) {
+ dev_err(pbs->dev, "Couldn't find reg, ret = %d\n", ret);
+ return ret;
+ }
+ pbs->base = val;
+ mutex_init(&pbs->lock);
+
+ platform_set_drvdata(pdev, pbs);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_pbs_match_table[] = {
+ { .compatible = "qcom,pbs" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_pbs_match_table);
+
+static struct platform_driver qcom_pbs_driver = {
+ .driver = {
+ .name = "qcom-pbs",
+ .of_match_table = qcom_pbs_match_table,
+ },
+ .probe = qcom_pbs_probe,
+};
+module_platform_driver(qcom_pbs_driver)
+
+MODULE_DESCRIPTION("QCOM PBS DRIVER");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index 77f0cf126629..a543ab9bee6c 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -3,6 +3,7 @@
* Copyright (c) 2019, Linaro Ltd
*/
#include <linux/clk-provider.h>
+#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mailbox_client.h>
@@ -11,8 +12,12 @@
#include <linux/platform_device.h>
#include <linux/thermal.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/soc/qcom/qcom_aoss.h>
+#define CREATE_TRACE_POINTS
+#include "trace-aoss.h"
+
#define QMP_DESC_MAGIC 0x0
#define QMP_DESC_VERSION 0x4
#define QMP_DESC_FEATURES 0x8
@@ -44,6 +49,8 @@
#define QMP_NUM_COOLING_RESOURCES 2
+#define QMP_DEBUGFS_FILES 4
+
static bool qmp_cdev_max_state = 1;
struct qmp_cooling_device {
@@ -65,6 +72,8 @@ struct qmp_cooling_device {
* @tx_lock: provides synchronization between multiple callers of qmp_send()
* @qdss_clk: QDSS clock hw struct
* @cooling_devs: thermal cooling devices
+ * @debugfs_root: directory for the developer/tester interface
+ * @debugfs_files: array of individual debugfs entries under debugfs_root
*/
struct qmp {
void __iomem *msgram;
@@ -82,6 +91,8 @@ struct qmp {
struct clk_hw qdss_clk;
struct qmp_cooling_device *cooling_devs;
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_files[QMP_DEBUGFS_FILES];
};
static void qmp_kick(struct qmp *qmp)
@@ -214,7 +225,7 @@ static bool qmp_message_empty(struct qmp *qmp)
*
* Return: 0 on success, negative errno on failure
*/
-int qmp_send(struct qmp *qmp, const char *fmt, ...)
+int __printf(2, 3) qmp_send(struct qmp *qmp, const char *fmt, ...)
{
char buf[QMP_MSG_LEN];
long time_left;
@@ -235,6 +246,8 @@ int qmp_send(struct qmp *qmp, const char *fmt, ...)
mutex_lock(&qmp->tx_lock);
+ trace_aoss_send(buf);
+
/* The message RAM only implements 32-bit accesses */
__iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
buf, sizeof(buf) / sizeof(u32));
@@ -256,11 +269,13 @@ int qmp_send(struct qmp *qmp, const char *fmt, ...)
ret = 0;
}
+ trace_aoss_send_done(buf, ret);
+
mutex_unlock(&qmp->tx_lock);
return ret;
}
-EXPORT_SYMBOL(qmp_send);
+EXPORT_SYMBOL_GPL(qmp_send);
static int qmp_qdss_clk_prepare(struct clk_hw *hw)
{
@@ -344,7 +359,7 @@ static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev,
return 0;
ret = qmp_send(qmp_cdev->qmp, "{class: volt_flr, event:zero_temp, res:%s, value:%s}",
- qmp_cdev->name, cdev_state ? "on" : "off");
+ qmp_cdev->name, str_on_off(cdev_state));
if (!ret)
qmp_cdev->state = cdev_state;
@@ -380,7 +395,7 @@ static int qmp_cooling_device_add(struct qmp *qmp,
static int qmp_cooling_devices_register(struct qmp *qmp)
{
- struct device_node *np, *child;
+ struct device_node *np;
int count = 0;
int ret;
@@ -393,15 +408,13 @@ static int qmp_cooling_devices_register(struct qmp *qmp)
if (!qmp->cooling_devs)
return -ENOMEM;
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
if (!of_property_present(child, "#cooling-cells"))
continue;
ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++],
child);
- if (ret) {
- of_node_put(child);
+ if (ret)
goto unroll;
- }
}
if (!count)
@@ -458,7 +471,7 @@ struct qmp *qmp_get(struct device *dev)
}
return qmp;
}
-EXPORT_SYMBOL(qmp_get);
+EXPORT_SYMBOL_GPL(qmp_get);
/**
* qmp_put() - release a qmp handle
@@ -473,7 +486,92 @@ void qmp_put(struct qmp *qmp)
if (!IS_ERR_OR_NULL(qmp))
put_device(qmp->dev);
}
-EXPORT_SYMBOL(qmp_put);
+EXPORT_SYMBOL_GPL(qmp_put);
+
+struct qmp_debugfs_entry {
+ const char *name;
+ const char *fmt;
+ bool is_bool;
+ const char *true_val;
+ const char *false_val;
+};
+
+static const struct qmp_debugfs_entry qmp_debugfs_entries[QMP_DEBUGFS_FILES] = {
+ { "ddr_frequency_mhz", "{class: ddr, res: fixed, val: %u}", false },
+ { "prevent_aoss_sleep", "{class: aoss_slp, res: sleep: %s}", true, "enable", "disable" },
+ { "prevent_cx_collapse", "{class: cx_mol, res: cx, val: %s}", true, "mol", "off" },
+ { "prevent_ddr_collapse", "{class: ddr_mol, res: ddr, val: %s}", true, "mol", "off" },
+};
+
+static ssize_t qmp_debugfs_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *pos)
+{
+ const struct qmp_debugfs_entry *entry = NULL;
+ struct qmp *qmp = file->private_data;
+ char buf[QMP_MSG_LEN];
+ unsigned int uint_val;
+ const char *str_val;
+ bool bool_val;
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qmp->debugfs_files); i++) {
+ if (qmp->debugfs_files[i] == file->f_path.dentry) {
+ entry = &qmp_debugfs_entries[i];
+ break;
+ }
+ }
+ if (WARN_ON(!entry))
+ return -EFAULT;
+
+ if (entry->is_bool) {
+ ret = kstrtobool_from_user(user_buf, count, &bool_val);
+ if (ret)
+ return ret;
+
+ str_val = bool_val ? entry->true_val : entry->false_val;
+
+ ret = snprintf(buf, sizeof(buf), entry->fmt, str_val);
+ if (ret >= sizeof(buf))
+ return -EINVAL;
+ } else {
+ ret = kstrtou32_from_user(user_buf, count, 0, &uint_val);
+ if (ret)
+ return ret;
+
+ ret = snprintf(buf, sizeof(buf), entry->fmt, uint_val);
+ if (ret >= sizeof(buf))
+ return -EINVAL;
+ }
+
+ ret = qmp_send(qmp, buf);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations qmp_debugfs_fops = {
+ .open = simple_open,
+ .write = qmp_debugfs_write,
+};
+
+static void qmp_debugfs_create(struct qmp *qmp)
+{
+ const struct qmp_debugfs_entry *entry;
+ int i;
+
+ qmp->debugfs_root = debugfs_create_dir("qcom_aoss", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(qmp->debugfs_files); i++) {
+ entry = &qmp_debugfs_entries[i];
+
+ qmp->debugfs_files[i] = debugfs_create_file(entry->name, 0200,
+ qmp->debugfs_root,
+ qmp,
+ &qmp_debugfs_fops);
+ }
+}
static int qmp_probe(struct platform_device *pdev)
{
@@ -523,6 +621,8 @@ static int qmp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qmp);
+ qmp_debugfs_create(qmp);
+
return 0;
err_close_qmp:
@@ -533,17 +633,17 @@ err_free_mbox:
return ret;
}
-static int qmp_remove(struct platform_device *pdev)
+static void qmp_remove(struct platform_device *pdev)
{
struct qmp *qmp = platform_get_drvdata(pdev);
+ debugfs_remove_recursive(qmp->debugfs_root);
+
qmp_qdss_clk_remove(qmp);
qmp_cooling_devices_remove(qmp);
qmp_close(qmp);
mbox_free_channel(qmp->mbox_chan);
-
- return 0;
}
static const struct of_device_id qmp_dt_match[] = {
@@ -565,7 +665,7 @@ static struct platform_driver qmp_driver = {
.suppress_bind_attrs = true,
},
.probe = qmp_probe,
- .remove = qmp_remove,
+ .remove = qmp_remove,
};
module_platform_driver(qmp_driver);
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
index df7907a83aa8..a25d1de592f0 100644
--- a/drivers/soc/qcom/qcom_gsbi.c
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -212,15 +212,6 @@ static int gsbi_probe(struct platform_device *pdev)
return of_platform_populate(node, NULL, NULL, &pdev->dev);
}
-static int gsbi_remove(struct platform_device *pdev)
-{
- struct gsbi_info *gsbi = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(gsbi->hclk);
-
- return 0;
-}
-
static const struct of_device_id gsbi_dt_match[] = {
{ .compatible = "qcom,gsbi-v1.0.0", },
{ },
@@ -234,7 +225,6 @@ static struct platform_driver gsbi_driver = {
.of_match_table = gsbi_dt_match,
},
.probe = gsbi_probe,
- .remove = gsbi_remove,
};
module_platform_driver(gsbi_driver);
diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c
new file mode 100644
index 000000000000..1bcbe69688d2
--- /dev/null
+++ b/drivers/soc/qcom/qcom_pd_mapper.c
@@ -0,0 +1,731 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm Protection Domain mapper
+ *
+ * Copyright (c) 2023 Linaro Ltd.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/refcount.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/qmi.h>
+
+#include "pdr_internal.h"
+
+#define SERVREG_QMI_VERSION 0x101
+#define SERVREG_QMI_INSTANCE 0
+
+#define TMS_SERVREG_SERVICE "tms/servreg"
+
+struct qcom_pdm_domain_data {
+ const char *domain;
+ u32 instance_id;
+ /* NULL-terminated array */
+ const char * services[];
+};
+
+struct qcom_pdm_domain {
+ struct list_head list;
+ const char *name;
+ u32 instance_id;
+};
+
+struct qcom_pdm_service {
+ struct list_head list;
+ struct list_head domains;
+ const char *name;
+};
+
+struct qcom_pdm_data {
+ refcount_t refcnt;
+ struct qmi_handle handle;
+ struct list_head services;
+};
+
+static DEFINE_MUTEX(qcom_pdm_mutex); /* protects __qcom_pdm_data */
+static struct qcom_pdm_data *__qcom_pdm_data;
+
+static struct qcom_pdm_service *qcom_pdm_find(struct qcom_pdm_data *data,
+ const char *name)
+{
+ struct qcom_pdm_service *service;
+
+ list_for_each_entry(service, &data->services, list) {
+ if (!strcmp(service->name, name))
+ return service;
+ }
+
+ return NULL;
+}
+
+static int qcom_pdm_add_service_domain(struct qcom_pdm_data *data,
+ const char *service_name,
+ const char *domain_name,
+ u32 instance_id)
+{
+ struct qcom_pdm_service *service;
+ struct qcom_pdm_domain *domain;
+
+ service = qcom_pdm_find(data, service_name);
+ if (service) {
+ list_for_each_entry(domain, &service->domains, list) {
+ if (!strcmp(domain->name, domain_name))
+ return -EBUSY;
+ }
+ } else {
+ service = kzalloc(sizeof(*service), GFP_KERNEL);
+ if (!service)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&service->domains);
+ service->name = service_name;
+
+ list_add_tail(&service->list, &data->services);
+ }
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain) {
+ if (list_empty(&service->domains)) {
+ list_del(&service->list);
+ kfree(service);
+ }
+
+ return -ENOMEM;
+ }
+
+ domain->name = domain_name;
+ domain->instance_id = instance_id;
+ list_add_tail(&domain->list, &service->domains);
+
+ return 0;
+}
+
+static int qcom_pdm_add_domain(struct qcom_pdm_data *data,
+ const struct qcom_pdm_domain_data *domain)
+{
+ int ret;
+ int i;
+
+ ret = qcom_pdm_add_service_domain(data,
+ TMS_SERVREG_SERVICE,
+ domain->domain,
+ domain->instance_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; domain->services[i]; i++) {
+ ret = qcom_pdm_add_service_domain(data,
+ domain->services[i],
+ domain->domain,
+ domain->instance_id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+
+}
+
+static void qcom_pdm_free_domains(struct qcom_pdm_data *data)
+{
+ struct qcom_pdm_service *service, *tservice;
+ struct qcom_pdm_domain *domain, *tdomain;
+
+ list_for_each_entry_safe(service, tservice, &data->services, list) {
+ list_for_each_entry_safe(domain, tdomain, &service->domains, list) {
+ list_del(&domain->list);
+ kfree(domain);
+ }
+
+ list_del(&service->list);
+ kfree(service);
+ }
+}
+
+static void qcom_pdm_get_domain_list(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct qcom_pdm_data *data = container_of(qmi, struct qcom_pdm_data, handle);
+ const struct servreg_get_domain_list_req *req = decoded;
+ struct servreg_get_domain_list_resp *rsp;
+ struct qcom_pdm_service *service;
+ u32 offset;
+ int ret;
+
+ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+ if (!rsp)
+ return;
+
+ offset = req->domain_offset_valid ? req->domain_offset : 0;
+
+ rsp->resp.result = QMI_RESULT_SUCCESS_V01;
+ rsp->resp.error = QMI_ERR_NONE_V01;
+
+ rsp->db_rev_count_valid = true;
+ rsp->db_rev_count = 1;
+
+ rsp->total_domains_valid = true;
+ rsp->total_domains = 0;
+
+ mutex_lock(&qcom_pdm_mutex);
+
+ service = qcom_pdm_find(data, req->service_name);
+ if (service) {
+ struct qcom_pdm_domain *domain;
+
+ rsp->domain_list_valid = true;
+ rsp->domain_list_len = 0;
+
+ list_for_each_entry(domain, &service->domains, list) {
+ u32 i = rsp->total_domains++;
+
+ if (i >= offset && i < SERVREG_DOMAIN_LIST_LENGTH) {
+ u32 j = rsp->domain_list_len++;
+
+ strscpy(rsp->domain_list[j].name, domain->name,
+ sizeof(rsp->domain_list[i].name));
+ rsp->domain_list[j].instance = domain->instance_id;
+
+ pr_debug("PDM: found %s / %d\n", domain->name,
+ domain->instance_id);
+ }
+ }
+ }
+
+ pr_debug("PDM: service '%s' offset %d returning %d domains (of %d)\n", req->service_name,
+ req->domain_offset_valid ? req->domain_offset : -1, rsp->domain_list_len, rsp->total_domains);
+
+ ret = qmi_send_response(qmi, sq, txn, SERVREG_GET_DOMAIN_LIST_REQ,
+ SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN,
+ servreg_get_domain_list_resp_ei, rsp);
+ if (ret)
+ pr_err("Error sending servreg response: %d\n", ret);
+
+ mutex_unlock(&qcom_pdm_mutex);
+
+ kfree(rsp);
+}
+
+static void qcom_pdm_pfr(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ const struct servreg_loc_pfr_req *req = decoded;
+ struct servreg_loc_pfr_resp rsp = {};
+ int ret;
+
+ pr_warn_ratelimited("PDM: service '%s' crash: '%s'\n", req->service, req->reason);
+
+ rsp.rsp.result = QMI_RESULT_SUCCESS_V01;
+ rsp.rsp.error = QMI_ERR_NONE_V01;
+
+ ret = qmi_send_response(qmi, sq, txn, SERVREG_LOC_PFR_REQ,
+ SERVREG_LOC_PFR_RESP_MAX_LEN,
+ servreg_loc_pfr_resp_ei, &rsp);
+ if (ret)
+ pr_err("Error sending servreg response: %d\n", ret);
+}
+
+static const struct qmi_msg_handler qcom_pdm_msg_handlers[] = {
+ {
+ .type = QMI_REQUEST,
+ .msg_id = SERVREG_GET_DOMAIN_LIST_REQ,
+ .ei = servreg_get_domain_list_req_ei,
+ .decoded_size = sizeof(struct servreg_get_domain_list_req),
+ .fn = qcom_pdm_get_domain_list,
+ },
+ {
+ .type = QMI_REQUEST,
+ .msg_id = SERVREG_LOC_PFR_REQ,
+ .ei = servreg_loc_pfr_req_ei,
+ .decoded_size = sizeof(struct servreg_loc_pfr_req),
+ .fn = qcom_pdm_pfr,
+ },
+ { },
+};
+
+static const struct qcom_pdm_domain_data adsp_audio_pd = {
+ .domain = "msm/adsp/audio_pd",
+ .instance_id = 74,
+ .services = {
+ "avs/audio",
+ NULL,
+ },
+};
+
+static const struct qcom_pdm_domain_data adsp_charger_pd = {
+ .domain = "msm/adsp/charger_pd",
+ .instance_id = 74,
+ .services = { NULL },
+};
+
+static const struct qcom_pdm_domain_data adsp_root_pd = {
+ .domain = "msm/adsp/root_pd",
+ .instance_id = 74,
+ .services = { NULL },
+};
+
+static const struct qcom_pdm_domain_data adsp_root_pd_pdr = {
+ .domain = "msm/adsp/root_pd",
+ .instance_id = 74,
+ .services = {
+ "tms/pdr_enabled",
+ NULL,
+ },
+};
+
+static const struct qcom_pdm_domain_data adsp_sensor_pd = {
+ .domain = "msm/adsp/sensor_pd",
+ .instance_id = 74,
+ .services = { NULL },
+};
+
+static const struct qcom_pdm_domain_data msm8996_adsp_audio_pd = {
+ .domain = "msm/adsp/audio_pd",
+ .instance_id = 4,
+ .services = { NULL },
+};
+
+static const struct qcom_pdm_domain_data msm8996_adsp_root_pd = {
+ .domain = "msm/adsp/root_pd",
+ .instance_id = 4,
+ .services = { NULL },
+};
+
+static const struct qcom_pdm_domain_data cdsp_root_pd = {
+ .domain = "msm/cdsp/root_pd",
+ .instance_id = 76,
+ .services = { NULL },
+};
+
+static const struct qcom_pdm_domain_data slpi_root_pd = {
+ .domain = "msm/slpi/root_pd",
+ .instance_id = 90,
+ .services = { NULL },
+};
+
+static const struct qcom_pdm_domain_data slpi_sensor_pd = {
+ .domain = "msm/slpi/sensor_pd",
+ .instance_id = 90,
+ .services = { NULL },
+};
+
+static const struct qcom_pdm_domain_data mpss_root_pd = {
+ .domain = "msm/modem/root_pd",
+ .instance_id = 180,
+ .services = {
+ NULL,
+ },
+};
+
+static const struct qcom_pdm_domain_data mpss_root_pd_gps = {
+ .domain = "msm/modem/root_pd",
+ .instance_id = 180,
+ .services = {
+ "gps/gps_service",
+ NULL,
+ },
+};
+
+static const struct qcom_pdm_domain_data mpss_root_pd_gps_pdr = {
+ .domain = "msm/modem/root_pd",
+ .instance_id = 180,
+ .services = {
+ "gps/gps_service",
+ "tms/pdr_enabled",
+ NULL,
+ },
+};
+
+static const struct qcom_pdm_domain_data msm8996_mpss_root_pd = {
+ .domain = "msm/modem/root_pd",
+ .instance_id = 100,
+ .services = { NULL },
+};
+
+static const struct qcom_pdm_domain_data mpss_wlan_pd = {
+ .domain = "msm/modem/wlan_pd",
+ .instance_id = 180,
+ .services = {
+ "kernel/elf_loader",
+ "wlan/fw",
+ NULL,
+ },
+};
+
+static const struct qcom_pdm_domain_data *kaanapali_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd_gps,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *msm8996_domains[] = {
+ &msm8996_adsp_audio_pd,
+ &msm8996_adsp_root_pd,
+ &msm8996_mpss_root_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *msm8998_domains[] = {
+ &mpss_root_pd,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *qcm2290_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &mpss_root_pd_gps,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *qcs404_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sc7180_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd_pdr,
+ &adsp_sensor_pd,
+ &mpss_root_pd_gps_pdr,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sc7280_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd_pdr,
+ &adsp_charger_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd_gps_pdr,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sc8180x_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_charger_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd_gps,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sc8280xp_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd_pdr,
+ &adsp_charger_pd,
+ &cdsp_root_pd,
+ NULL,
+};
+
+/* Unlike SDM660, SDM630/636 lack CDSP */
+static const struct qcom_pdm_domain_data *sdm630_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &mpss_root_pd,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sdm660_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sdm670_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sdm845_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd,
+ &mpss_wlan_pd,
+ &slpi_root_pd,
+ &slpi_sensor_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sm6115_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd_gps,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sm6350_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sm7150_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd_gps,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sm8150_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd_gps,
+ &mpss_wlan_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sm8250_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &cdsp_root_pd,
+ &slpi_root_pd,
+ &slpi_sensor_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sm8350_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd_pdr,
+ &adsp_charger_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd_gps,
+ &slpi_root_pd,
+ &slpi_sensor_pd,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *sm8550_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_charger_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd_gps,
+ NULL,
+};
+
+static const struct qcom_pdm_domain_data *x1e80100_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_charger_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ NULL,
+};
+
+static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
+ { .compatible = "qcom,apq8016", .data = NULL, },
+ { .compatible = "qcom,apq8064", .data = NULL, },
+ { .compatible = "qcom,apq8074", .data = NULL, },
+ { .compatible = "qcom,apq8084", .data = NULL, },
+ { .compatible = "qcom,apq8096", .data = msm8996_domains, },
+ { .compatible = "qcom,kaanapali", .data = kaanapali_domains, },
+ { .compatible = "qcom,msm8226", .data = NULL, },
+ { .compatible = "qcom,msm8909", .data = NULL, },
+ { .compatible = "qcom,msm8916", .data = NULL, },
+ { .compatible = "qcom,msm8939", .data = NULL, },
+ { .compatible = "qcom,msm8974", .data = NULL, },
+ { .compatible = "qcom,msm8996", .data = msm8996_domains, },
+ { .compatible = "qcom,msm8998", .data = msm8998_domains, },
+ { .compatible = "qcom,qcm2290", .data = qcm2290_domains, },
+ { .compatible = "qcom,qcm6490", .data = sc7280_domains, },
+ { .compatible = "qcom,qcs404", .data = qcs404_domains, },
+ { .compatible = "qcom,sc7180", .data = sc7180_domains, },
+ { .compatible = "qcom,sc7280", .data = sc7280_domains, },
+ { .compatible = "qcom,sc8180x", .data = sc8180x_domains, },
+ { .compatible = "qcom,sc8280xp", .data = sc8280xp_domains, },
+ { .compatible = "qcom,sdm630", .data = sdm630_domains, },
+ { .compatible = "qcom,sdm636", .data = sdm630_domains, },
+ { .compatible = "qcom,sda660", .data = sdm660_domains, },
+ { .compatible = "qcom,sdm660", .data = sdm660_domains, },
+ { .compatible = "qcom,sdm670", .data = sdm670_domains, },
+ { .compatible = "qcom,sdm845", .data = sdm845_domains, },
+ { .compatible = "qcom,sm4250", .data = sm6115_domains, },
+ { .compatible = "qcom,sm6115", .data = sm6115_domains, },
+ { .compatible = "qcom,sm6350", .data = sm6350_domains, },
+ { .compatible = "qcom,sm7150", .data = sm7150_domains, },
+ { .compatible = "qcom,sm7225", .data = sm6350_domains, },
+ { .compatible = "qcom,sm7325", .data = sc7280_domains, },
+ { .compatible = "qcom,sm8150", .data = sm8150_domains, },
+ { .compatible = "qcom,sm8250", .data = sm8250_domains, },
+ { .compatible = "qcom,sm8350", .data = sm8350_domains, },
+ { .compatible = "qcom,sm8450", .data = sm8350_domains, },
+ { .compatible = "qcom,sm8550", .data = sm8550_domains, },
+ { .compatible = "qcom,sm8650", .data = sm8550_domains, },
+ { .compatible = "qcom,sm8750", .data = sm8550_domains, },
+ { .compatible = "qcom,x1e80100", .data = x1e80100_domains, },
+ { .compatible = "qcom,x1p42100", .data = x1e80100_domains, },
+ {},
+};
+
+static void qcom_pdm_stop(struct qcom_pdm_data *data)
+{
+ qcom_pdm_free_domains(data);
+
+ /* The server is removed automatically */
+ qmi_handle_release(&data->handle);
+
+ kfree(data);
+}
+
+static struct qcom_pdm_data *qcom_pdm_start(void)
+{
+ const struct qcom_pdm_domain_data * const *domains;
+ const struct of_device_id *match;
+ struct qcom_pdm_data *data;
+ struct device_node *root;
+ int ret, i;
+
+ root = of_find_node_by_path("/");
+ if (!root)
+ return ERR_PTR(-ENODEV);
+
+ match = of_match_node(qcom_pdm_domains, root);
+ of_node_put(root);
+ if (!match) {
+ pr_notice("PDM: no support for the platform, userspace daemon might be required.\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ domains = match->data;
+ if (!domains) {
+ pr_debug("PDM: no domains\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&data->services);
+
+ ret = qmi_handle_init(&data->handle, SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN,
+ NULL, qcom_pdm_msg_handlers);
+ if (ret) {
+ kfree(data);
+ return ERR_PTR(ret);
+ }
+
+ refcount_set(&data->refcnt, 1);
+
+ for (i = 0; domains[i]; i++) {
+ ret = qcom_pdm_add_domain(data, domains[i]);
+ if (ret)
+ goto err_stop;
+ }
+
+ ret = qmi_add_server(&data->handle, SERVREG_LOCATOR_SERVICE,
+ SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
+ if (ret) {
+ pr_err("PDM: error adding server %d\n", ret);
+ goto err_stop;
+ }
+
+ return data;
+
+err_stop:
+ qcom_pdm_stop(data);
+
+ return ERR_PTR(ret);
+}
+
+static int qcom_pdm_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+
+{
+ struct qcom_pdm_data *data;
+ int ret = 0;
+
+ mutex_lock(&qcom_pdm_mutex);
+
+ if (!__qcom_pdm_data) {
+ data = qcom_pdm_start();
+
+ if (IS_ERR(data))
+ ret = PTR_ERR(data);
+ else
+ __qcom_pdm_data = data;
+ } else {
+ refcount_inc(&__qcom_pdm_data->refcnt);
+ }
+
+ auxiliary_set_drvdata(auxdev, __qcom_pdm_data);
+
+ mutex_unlock(&qcom_pdm_mutex);
+
+ return ret;
+}
+
+static void qcom_pdm_remove(struct auxiliary_device *auxdev)
+{
+ struct qcom_pdm_data *data;
+
+ data = auxiliary_get_drvdata(auxdev);
+ if (!data)
+ return;
+
+ if (refcount_dec_and_mutex_lock(&data->refcnt, &qcom_pdm_mutex)) {
+ __qcom_pdm_data = NULL;
+ qcom_pdm_stop(data);
+ mutex_unlock(&qcom_pdm_mutex);
+ }
+}
+
+static const struct auxiliary_device_id qcom_pdm_table[] = {
+ { .name = "qcom_common.pd-mapper" },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, qcom_pdm_table);
+
+static struct auxiliary_driver qcom_pdm_drv = {
+ .name = "qcom-pdm-mapper",
+ .id_table = qcom_pdm_table,
+ .probe = qcom_pdm_probe,
+ .remove = qcom_pdm_remove,
+};
+module_auxiliary_driver(qcom_pdm_drv);
+
+MODULE_DESCRIPTION("Qualcomm Protection Domain Mapper");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/qcom_pdr_msg.c b/drivers/soc/qcom/qcom_pdr_msg.c
new file mode 100644
index 000000000000..ca98932140d8
--- /dev/null
+++ b/drivers/soc/qcom/qcom_pdr_msg.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/soc/qcom/qmi.h>
+
+#include "pdr_internal.h"
+
+static const struct qmi_elem_info servreg_location_entry_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ name),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ instance),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ service_data_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ service_data),
+ },
+ {}
+};
+
+const struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ service_name),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ domain_offset_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ domain_offset),
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_get_domain_list_req_ei);
+
+const struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ total_domains_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ total_domains),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ db_rev_count_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ db_rev_count),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = SERVREG_DOMAIN_LIST_LENGTH,
+ .elem_size = sizeof(struct servreg_location_entry),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list),
+ .ei_array = servreg_location_entry_ei,
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_get_domain_list_resp_ei);
+
+const struct qmi_elem_info servreg_register_listener_req_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_register_listener_req,
+ enable),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_register_listener_req,
+ service_path),
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_register_listener_req_ei);
+
+const struct qmi_elem_info servreg_register_listener_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ curr_state_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum servreg_service_state),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ curr_state),
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_register_listener_resp_ei);
+
+const struct qmi_elem_info servreg_restart_pd_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_restart_pd_req,
+ service_path),
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_restart_pd_req_ei);
+
+const struct qmi_elem_info servreg_restart_pd_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_restart_pd_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_restart_pd_resp_ei);
+
+const struct qmi_elem_info servreg_state_updated_ind_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ curr_state),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ service_path),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ transaction_id),
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_state_updated_ind_ei);
+
+const struct qmi_elem_info servreg_set_ack_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_set_ack_req,
+ service_path),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_set_ack_req,
+ transaction_id),
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_set_ack_req_ei);
+
+const struct qmi_elem_info servreg_set_ack_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_set_ack_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_set_ack_resp_ei);
+
+const struct qmi_elem_info servreg_loc_pfr_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_loc_pfr_req, service)
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_loc_pfr_req, reason)
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_loc_pfr_req_ei);
+
+const struct qmi_elem_info servreg_loc_pfr_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof_field(struct servreg_loc_pfr_resp, rsp),
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_loc_pfr_resp, rsp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(servreg_loc_pfr_resp_ei);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Qualcomm Protection Domain messages data");
diff --git a/drivers/soc/qcom/qcom_stats.c b/drivers/soc/qcom/qcom_stats.c
index c207bb96c523..2e380faf9080 100644
--- a/drivers/soc/qcom/qcom_stats.c
+++ b/drivers/soc/qcom/qcom_stats.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2011-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved.
*/
+#include <linux/bitfield.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/io.h>
@@ -11,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/seq_file.h>
+#include <linux/soc/qcom/qcom_aoss.h>
#include <linux/soc/qcom/smem.h>
#include <clocksource/arm_arch_timer.h>
@@ -24,6 +27,19 @@
#define ACCUMULATED_OFFSET 0x18
#define CLIENT_VOTES_OFFSET 0x20
+#define DDR_STATS_MAGIC_KEY 0xA1157A75
+#define DDR_STATS_MAX_NUM_MODES 20
+#define DDR_STATS_MAGIC_KEY_ADDR 0x0
+#define DDR_STATS_NUM_MODES_ADDR 0x4
+#define DDR_STATS_ENTRY_START_ADDR 0x8
+
+#define DDR_STATS_CP_IDX(data) FIELD_GET(GENMASK(4, 0), data)
+#define DDR_STATS_LPM_NAME(data) FIELD_GET(GENMASK(7, 0), data)
+#define DDR_STATS_TYPE(data) FIELD_GET(GENMASK(15, 8), data)
+#define DDR_STATS_FREQ(data) FIELD_GET(GENMASK(31, 16), data)
+
+static struct qmp *qcom_stats_qmp;
+
struct subsystem_data {
const char *name;
u32 smem_item;
@@ -35,21 +51,32 @@ static const struct subsystem_data subsystems[] = {
{ "wpss", 605, 13 },
{ "adsp", 606, 2 },
{ "cdsp", 607, 5 },
+ { "cdsp1", 607, 12 },
+ { "gpdsp0", 607, 17 },
+ { "gpdsp1", 607, 18 },
{ "slpi", 608, 3 },
{ "gpu", 609, 0 },
{ "display", 610, 0 },
{ "adsp_island", 613, 2 },
{ "slpi_island", 613, 3 },
+ { "apss", 631, QCOM_SMEM_HOST_ANY },
};
struct stats_config {
size_t stats_offset;
+ size_t ddr_stats_offset;
size_t num_records;
bool appended_stats_avail;
bool dynamic_offset;
bool subsystem_stats_in_smem;
};
+struct ddr_stats_entry {
+ u32 name;
+ u32 count;
+ u64 duration;
+};
+
struct stats_data {
bool appended_stats_avail;
void __iomem *base;
@@ -118,8 +145,101 @@ static int qcom_soc_sleep_stats_show(struct seq_file *s, void *unused)
return 0;
}
+static void qcom_ddr_stats_print(struct seq_file *s, struct ddr_stats_entry *data)
+{
+ u32 cp_idx;
+
+ /*
+ * DDR statistic have two different types of details encoded.
+ * (1) DDR LPM Stats
+ * (2) DDR Frequency Stats
+ *
+ * The name field have details like which type of DDR stat (bits 8:15)
+ * along with other details as explained below
+ *
+ * In case of DDR LPM stat, name field will be encoded as,
+ * Bits - Meaning
+ * 0:7 - DDR LPM name, can be of 0xd4, 0xd3, 0x11 and 0xd0.
+ * 8:15 - 0x0 (indicates its a LPM stat)
+ * 16:31 - Unused
+ *
+ * In case of DDR FREQ stats, name field will be encoded as,
+ * Bits - Meaning
+ * 0:4 - DDR Clock plan index (CP IDX)
+ * 5:7 - Unused
+ * 8:15 - 0x1 (indicates its Freq stat)
+ * 16:31 - Frequency value in Mhz
+ */
+ switch (DDR_STATS_TYPE(data->name)) {
+ case 0:
+ seq_printf(s, "DDR LPM Stat Name:0x%lx\tcount:%u\tDuration (ticks):%llu\n",
+ DDR_STATS_LPM_NAME(data->name), data->count, data->duration);
+ break;
+ case 1:
+ if (!data->count || !DDR_STATS_FREQ(data->name))
+ return;
+
+ cp_idx = DDR_STATS_CP_IDX(data->name);
+ seq_printf(s, "DDR Freq %luMhz:\tCP IDX:%u\tcount:%u\tDuration (ticks):%llu\n",
+ DDR_STATS_FREQ(data->name), cp_idx, data->count, data->duration);
+ break;
+ }
+}
+
+static int qcom_ddr_stats_show(struct seq_file *s, void *d)
+{
+ struct ddr_stats_entry data[DDR_STATS_MAX_NUM_MODES];
+ void __iomem *reg = (void __iomem *)s->private;
+ u32 entry_count;
+ int i, ret;
+
+ entry_count = readl_relaxed(reg + DDR_STATS_NUM_MODES_ADDR);
+ if (entry_count > DDR_STATS_MAX_NUM_MODES)
+ return -EINVAL;
+
+ if (qcom_stats_qmp) {
+ /*
+ * Recent SoCs (SM8450 onwards) do not have duration field
+ * populated from boot up onwards for both DDR LPM Stats
+ * and DDR Frequency Stats.
+ *
+ * Send QMP message to Always on processor which will
+ * populate duration field into MSG RAM area.
+ *
+ * Sent every time to read latest data.
+ */
+ ret = qmp_send(qcom_stats_qmp, "{class: ddr, action: freqsync}");
+ if (ret)
+ return ret;
+ }
+
+ reg += DDR_STATS_ENTRY_START_ADDR;
+ memcpy_fromio(data, reg, sizeof(struct ddr_stats_entry) * entry_count);
+
+ for (i = 0; i < entry_count; i++)
+ qcom_ddr_stats_print(s, &data[i]);
+
+ return 0;
+}
+
DEFINE_SHOW_ATTRIBUTE(qcom_soc_sleep_stats);
DEFINE_SHOW_ATTRIBUTE(qcom_subsystem_sleep_stats);
+DEFINE_SHOW_ATTRIBUTE(qcom_ddr_stats);
+
+static void qcom_create_ddr_stat_files(struct dentry *root, void __iomem *reg,
+ const struct stats_config *config)
+{
+ u32 key;
+
+ if (!config->ddr_stats_offset)
+ return;
+
+ key = readl_relaxed(reg + config->ddr_stats_offset + DDR_STATS_MAGIC_KEY_ADDR);
+ if (key == DDR_STATS_MAGIC_KEY)
+ debugfs_create_file("ddr_stats", 0400, root,
+ (__force void *)reg + config->ddr_stats_offset,
+ &qcom_ddr_stats_fops);
+}
static void qcom_create_soc_sleep_stat_files(struct dentry *root, void __iomem *reg,
struct stats_data *d,
@@ -203,11 +323,27 @@ static int qcom_stats_probe(struct platform_device *pdev)
for (i = 0; i < config->num_records; i++)
d[i].appended_stats_avail = config->appended_stats_avail;
+ /*
+ * QMP is used for DDR stats syncing to MSG RAM for recent SoCs (SM8450 onwards).
+ * The prior SoCs do not need QMP handle as the required stats are already present
+ * in MSG RAM, provided the DDR_STATS_MAGIC_KEY matches.
+ */
+ qcom_stats_qmp = qmp_get(&pdev->dev);
+ if (IS_ERR(qcom_stats_qmp)) {
+ /* We ignore error if QMP is not defined/needed */
+ if (!of_property_present(pdev->dev.of_node, "qcom,qmp"))
+ qcom_stats_qmp = NULL;
+ else if (PTR_ERR(qcom_stats_qmp) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else
+ return PTR_ERR(qcom_stats_qmp);
+ }
root = debugfs_create_dir("qcom_stats", NULL);
qcom_create_subsystem_stat_files(root, config);
qcom_create_soc_sleep_stat_files(root, reg, d, config);
+ qcom_create_ddr_stat_files(root, reg, config);
platform_set_drvdata(pdev, root);
@@ -216,13 +352,11 @@ static int qcom_stats_probe(struct platform_device *pdev)
return 0;
}
-static int qcom_stats_remove(struct platform_device *pdev)
+static void qcom_stats_remove(struct platform_device *pdev)
{
struct dentry *root = platform_get_drvdata(pdev);
debugfs_remove_recursive(root);
-
- return 0;
}
static const struct stats_config rpm_data = {
@@ -252,6 +386,7 @@ static const struct stats_config rpmh_data_sdm845 = {
static const struct stats_config rpmh_data = {
.stats_offset = 0x48,
+ .ddr_stats_offset = 0xb8,
.num_records = 3,
.appended_stats_avail = false,
.dynamic_offset = false,
diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
index 5c7161b18b72..7660a960fb45 100644
--- a/drivers/soc/qcom/qmi_encdec.c
+++ b/drivers/soc/qcom/qmi_encdec.c
@@ -304,6 +304,8 @@ static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf,
const void *buf_src;
int encode_tlv = 0;
int rc;
+ u8 val8;
+ u16 val16;
if (!ei_array)
return 0;
@@ -338,7 +340,6 @@ static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf,
break;
case QMI_DATA_LEN:
- memcpy(&data_len_value, buf_src, temp_ei->elem_size);
data_len_sz = temp_ei->elem_size == sizeof(u8) ?
sizeof(u8) : sizeof(u16);
/* Check to avoid out of range buffer access */
@@ -348,8 +349,17 @@ static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf,
__func__);
return -ETOOSMALL;
}
- rc = qmi_encode_basic_elem(buf_dst, &data_len_value,
- 1, data_len_sz);
+ if (data_len_sz == sizeof(u8)) {
+ val8 = *(u8 *)buf_src;
+ data_len_value = (u32)val8;
+ rc = qmi_encode_basic_elem(buf_dst, &val8,
+ 1, data_len_sz);
+ } else {
+ val16 = *(u16 *)buf_src;
+ data_len_value = (u32)le16_to_cpu(val16);
+ rc = qmi_encode_basic_elem(buf_dst, &val16,
+ 1, data_len_sz);
+ }
UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
encoded_bytes, tlv_len,
encode_tlv, rc);
@@ -523,14 +533,23 @@ static int qmi_decode_string_elem(const struct qmi_elem_info *ei_array,
u32 string_len = 0;
u32 string_len_sz = 0;
const struct qmi_elem_info *temp_ei = ei_array;
+ u8 val8;
+ u16 val16;
if (dec_level == 1) {
string_len = tlv_len;
} else {
string_len_sz = temp_ei->elem_len <= U8_MAX ?
sizeof(u8) : sizeof(u16);
- rc = qmi_decode_basic_elem(&string_len, buf_src,
- 1, string_len_sz);
+ if (string_len_sz == sizeof(u8)) {
+ rc = qmi_decode_basic_elem(&val8, buf_src,
+ 1, string_len_sz);
+ string_len = (u32)val8;
+ } else {
+ rc = qmi_decode_basic_elem(&val16, buf_src,
+ 1, string_len_sz);
+ string_len = (u32)val16;
+ }
decoded_bytes += rc;
}
@@ -604,6 +623,9 @@ static int qmi_decode(const struct qmi_elem_info *ei_array, void *out_c_struct,
u32 decoded_bytes = 0;
const void *buf_src = in_buf;
int rc;
+ u8 val8;
+ u16 val16;
+ u32 val32;
while (decoded_bytes < in_buf_len) {
if (dec_level >= 2 && temp_ei->data_type == QMI_EOTI)
@@ -642,9 +664,17 @@ static int qmi_decode(const struct qmi_elem_info *ei_array, void *out_c_struct,
if (temp_ei->data_type == QMI_DATA_LEN) {
data_len_sz = temp_ei->elem_size == sizeof(u8) ?
sizeof(u8) : sizeof(u16);
- rc = qmi_decode_basic_elem(&data_len_value, buf_src,
- 1, data_len_sz);
- memcpy(buf_dst, &data_len_value, sizeof(u32));
+ if (data_len_sz == sizeof(u8)) {
+ rc = qmi_decode_basic_elem(&val8, buf_src,
+ 1, data_len_sz);
+ data_len_value = (u32)val8;
+ } else {
+ rc = qmi_decode_basic_elem(&val16, buf_src,
+ 1, data_len_sz);
+ data_len_value = (u32)val16;
+ }
+ val32 = cpu_to_le32(data_len_value);
+ memcpy(buf_dst, &val32, sizeof(u32));
temp_ei = temp_ei + 1;
buf_dst = out_c_struct + temp_ei->offset;
tlv_len -= data_len_sz;
@@ -746,15 +776,15 @@ void *qmi_encode_message(int type, unsigned int msg_id, size_t *len,
hdr = msg;
hdr->type = type;
- hdr->txn_id = txn_id;
- hdr->msg_id = msg_id;
- hdr->msg_len = msglen;
+ hdr->txn_id = cpu_to_le16(txn_id);
+ hdr->msg_id = cpu_to_le16(msg_id);
+ hdr->msg_len = cpu_to_le16(msglen);
*len = sizeof(*hdr) + msglen;
return msg;
}
-EXPORT_SYMBOL(qmi_encode_message);
+EXPORT_SYMBOL_GPL(qmi_encode_message);
/**
* qmi_decode_message() - Decode QMI encoded message to C structure
@@ -778,7 +808,7 @@ int qmi_decode_message(const void *buf, size_t len,
return qmi_decode(ei, c_struct, buf + sizeof(struct qmi_header),
len - sizeof(struct qmi_header), 1);
}
-EXPORT_SYMBOL(qmi_decode_message);
+EXPORT_SYMBOL_GPL(qmi_decode_message);
/* Common header in all QMI responses */
const struct qmi_elem_info qmi_response_type_v01_ei[] = {
@@ -810,7 +840,7 @@ const struct qmi_elem_info qmi_response_type_v01_ei[] = {
.ei_array = NULL,
},
};
-EXPORT_SYMBOL(qmi_response_type_v01_ei);
+EXPORT_SYMBOL_GPL(qmi_response_type_v01_ei);
MODULE_DESCRIPTION("QMI encoder/decoder helper");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index 78d7361fdcf2..6500f863aae5 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -195,8 +195,8 @@ static void qmi_send_new_lookup(struct qmi_handle *qmi, struct qmi_service *svc)
* qmi_add_lookup() - register a new lookup with the name service
* @qmi: qmi handle
* @service: service id of the request
- * @instance: instance id of the request
* @version: version number of the request
+ * @instance: instance id of the request
*
* Registering a lookup query with the name server will cause the name server
* to send NEW_SERVER and DEL_SERVER control messages to this socket as
@@ -223,7 +223,7 @@ int qmi_add_lookup(struct qmi_handle *qmi, unsigned int service,
return 0;
}
-EXPORT_SYMBOL(qmi_add_lookup);
+EXPORT_SYMBOL_GPL(qmi_add_lookup);
static void qmi_send_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
{
@@ -287,7 +287,7 @@ int qmi_add_server(struct qmi_handle *qmi, unsigned int service,
return 0;
}
-EXPORT_SYMBOL(qmi_add_server);
+EXPORT_SYMBOL_GPL(qmi_add_server);
/**
* qmi_txn_init() - allocate transaction id within the given QMI handle
@@ -328,7 +328,7 @@ int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
return ret;
}
-EXPORT_SYMBOL(qmi_txn_init);
+EXPORT_SYMBOL_GPL(qmi_txn_init);
/**
* qmi_txn_wait() - wait for a response on a transaction
@@ -359,7 +359,7 @@ int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout)
else
return txn->result;
}
-EXPORT_SYMBOL(qmi_txn_wait);
+EXPORT_SYMBOL_GPL(qmi_txn_wait);
/**
* qmi_txn_cancel() - cancel an ongoing transaction
@@ -375,7 +375,7 @@ void qmi_txn_cancel(struct qmi_txn *txn)
mutex_unlock(&txn->lock);
mutex_unlock(&qmi->txn_lock);
}
-EXPORT_SYMBOL(qmi_txn_cancel);
+EXPORT_SYMBOL_GPL(qmi_txn_cancel);
/**
* qmi_invoke_handler() - find and invoke a handler for a message
@@ -400,7 +400,7 @@ static void qmi_invoke_handler(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
for (handler = qmi->handlers; handler->fn; handler++) {
if (handler->type == hdr->type &&
- handler->msg_id == hdr->msg_id)
+ handler->msg_id == le16_to_cpu(hdr->msg_id))
break;
}
@@ -488,7 +488,7 @@ static void qmi_handle_message(struct qmi_handle *qmi,
/* If this is a response, find the matching transaction handle */
if (hdr->type == QMI_RESPONSE) {
mutex_lock(&qmi->txn_lock);
- txn = idr_find(&qmi->txns, hdr->txn_id);
+ txn = idr_find(&qmi->txns, le16_to_cpu(hdr->txn_id));
/* Ignore unexpected responses */
if (!txn) {
@@ -514,7 +514,7 @@ static void qmi_handle_message(struct qmi_handle *qmi,
} else {
/* Create a txn based on the txn_id of the incoming message */
memset(&tmp_txn, 0, sizeof(tmp_txn));
- tmp_txn.id = hdr->txn_id;
+ tmp_txn.id = le16_to_cpu(hdr->txn_id);
qmi_invoke_handler(qmi, sq, &tmp_txn, buf, len);
}
@@ -676,7 +676,7 @@ err_free_recv_buf:
return ret;
}
-EXPORT_SYMBOL(qmi_handle_init);
+EXPORT_SYMBOL_GPL(qmi_handle_init);
/**
* qmi_handle_release() - release the QMI client handle
@@ -717,7 +717,7 @@ void qmi_handle_release(struct qmi_handle *qmi)
kfree(svc);
}
}
-EXPORT_SYMBOL(qmi_handle_release);
+EXPORT_SYMBOL_GPL(qmi_handle_release);
/**
* qmi_send_message() - send a QMI message
@@ -796,7 +796,7 @@ ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
return qmi_send_message(qmi, sq, txn, QMI_REQUEST, msg_id, len, ei,
c_struct);
}
-EXPORT_SYMBOL(qmi_send_request);
+EXPORT_SYMBOL_GPL(qmi_send_request);
/**
* qmi_send_response() - send a response QMI message
@@ -817,7 +817,7 @@ ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
return qmi_send_message(qmi, sq, txn, QMI_RESPONSE, msg_id, len, ei,
c_struct);
}
-EXPORT_SYMBOL(qmi_send_response);
+EXPORT_SYMBOL_GPL(qmi_send_response);
/**
* qmi_send_indication() - send an indication QMI message
@@ -851,4 +851,4 @@ ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
return rval;
}
-EXPORT_SYMBOL(qmi_send_indication);
+EXPORT_SYMBOL_GPL(qmi_send_indication);
diff --git a/drivers/soc/qcom/ramp_controller.c b/drivers/soc/qcom/ramp_controller.c
index e9a0cca07189..15782bed2925 100644
--- a/drivers/soc/qcom/ramp_controller.c
+++ b/drivers/soc/qcom/ramp_controller.c
@@ -229,7 +229,6 @@ static const struct regmap_config qrc_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x68,
- .fast_io = true,
};
static const struct reg_sequence msm8976_cfg_dfs_sid[] = {
@@ -331,8 +330,8 @@ static struct platform_driver qcom_ramp_controller_driver = {
.of_match_table = qcom_ramp_controller_match_table,
.suppress_bind_attrs = true,
},
- .probe = qcom_ramp_controller_probe,
- .remove_new = qcom_ramp_controller_remove,
+ .probe = qcom_ramp_controller_probe,
+ .remove = qcom_ramp_controller_remove,
};
static int __init qcom_ramp_controller_init(void)
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
index f83811f51175..1b32469f2789 100644
--- a/drivers/soc/qcom/rmtfs_mem.c
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -125,7 +125,7 @@ static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp)
return 0;
}
-static struct class rmtfs_class = {
+static const struct class rmtfs_class = {
.name = "rmtfs",
};
@@ -200,6 +200,15 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
rmtfs_mem->client_id = client_id;
rmtfs_mem->size = rmem->size;
+ /*
+ * If requested, discard the first and last 4k block in order to ensure
+ * that the rmtfs region isn't adjacent to other protected regions.
+ */
+ if (of_property_read_bool(node, "qcom,use-guard-pages")) {
+ rmtfs_mem->addr += SZ_4K;
+ rmtfs_mem->size -= 2 * SZ_4K;
+ }
+
device_initialize(&rmtfs_mem->dev);
rmtfs_mem->dev.parent = &pdev->dev;
rmtfs_mem->dev.groups = qcom_rmtfs_mem_groups;
@@ -281,7 +290,7 @@ put_device:
return ret;
}
-static int qcom_rmtfs_mem_remove(struct platform_device *pdev)
+static void qcom_rmtfs_mem_remove(struct platform_device *pdev)
{
struct qcom_rmtfs_mem *rmtfs_mem = dev_get_drvdata(&pdev->dev);
struct qcom_scm_vmperm perm;
@@ -296,8 +305,6 @@ static int qcom_rmtfs_mem_remove(struct platform_device *pdev)
cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev);
put_device(&rmtfs_mem->dev);
-
- return 0;
}
static const struct of_device_id qcom_rmtfs_mem_of_match[] = {
diff --git a/drivers/soc/qcom/rpm-proc.c b/drivers/soc/qcom/rpm-proc.c
index 2995d9b90190..2466d0400c2e 100644
--- a/drivers/soc/qcom/rpm-proc.c
+++ b/drivers/soc/qcom/rpm-proc.c
@@ -53,7 +53,7 @@ MODULE_DEVICE_TABLE(of, rpm_proc_of_match);
static struct platform_driver rpm_proc_driver = {
.probe = rpm_proc_probe,
- .remove_new = rpm_proc_remove,
+ .remove = rpm_proc_remove,
.driver = {
.name = "qcom-rpm-proc",
.of_match_table = rpm_proc_of_match,
diff --git a/drivers/soc/qcom/rpm_master_stats.c b/drivers/soc/qcom/rpm_master_stats.c
index 9ca13bcf67d3..c7788337e164 100644
--- a/drivers/soc/qcom/rpm_master_stats.c
+++ b/drivers/soc/qcom/rpm_master_stats.c
@@ -78,7 +78,7 @@ static int master_stats_probe(struct platform_device *pdev)
if (count < 0)
return count;
- data = devm_kzalloc(dev, count * sizeof(*data), GFP_KERNEL);
+ data = devm_kcalloc(dev, count, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -148,10 +148,14 @@ static const struct of_device_id rpm_master_table[] = {
{ .compatible = "qcom,rpm-master-stats" },
{ },
};
+/*
+ * No MODULE_DEVICE_TABLE intentionally: that's a debugging module, to be
+ * loaded manually only.
+ */
static struct platform_driver master_stats_driver = {
.probe = master_stats_probe,
- .remove_new = master_stats_remove,
+ .remove = master_stats_remove,
.driver = {
.name = "qcom_rpm_master_stats",
.of_match_table = rpm_master_table,
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index a021dc71807b..c6f7d5c9c493 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
@@ -452,13 +453,10 @@ static irqreturn_t tcs_tx_done(int irq, void *p)
trace_rpmh_tx_done(drv, i, req);
- /*
- * If wake tcs was re-purposed for sending active
- * votes, clear AMC trigger & enable modes and
+ /* Clear AMC trigger & enable modes and
* disable interrupt for this TCS
*/
- if (!drv->tcs[ACTIVE_TCS].num_tcs)
- __tcs_set_trigger(drv, i, false);
+ __tcs_set_trigger(drv, i, false);
skip:
/* Reclaim the TCS */
write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i, 0);
@@ -557,7 +555,7 @@ static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
addr = read_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], i, j);
for (k = 0; k < msg->num_cmds; k++) {
- if (addr == msg->cmds[k].addr)
+ if (cmd_db_match_resource_addr(msg->cmds[k].addr, addr))
return -EBUSY;
}
}
@@ -645,13 +643,14 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
{
struct tcs_group *tcs;
int tcs_id;
- unsigned long flags;
+
+ might_sleep();
tcs = get_tcs_for_msg(drv, msg);
if (IS_ERR(tcs))
return PTR_ERR(tcs);
- spin_lock_irqsave(&drv->lock, flags);
+ spin_lock_irq(&drv->lock);
/* Wait forever for a free tcs. It better be there eventually! */
wait_event_lock_irq(drv->tcs_wait,
@@ -669,7 +668,7 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, 0);
enable_tcs_irq(drv, tcs_id, true);
}
- spin_unlock_irqrestore(&drv->lock, flags);
+ spin_unlock_irq(&drv->lock);
/*
* These two can be done after the lock is released because:
@@ -1043,12 +1042,9 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
* do. To avoid adding this check to our children we'll do it now.
*/
ret = cmd_db_ready();
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Command DB not available (%d)\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Command DB not available\n");
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
if (!drv)
@@ -1073,7 +1069,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
drv->ver.minor = rsc_id & (MINOR_VER_MASK << MINOR_VER_SHIFT);
drv->ver.minor >>= MINOR_VER_SHIFT;
- if (drv->ver.major == 3)
+ if (drv->ver.major >= 3)
drv->regs = rpmh_rsc_reg_offset_ver_3_0;
else
drv->regs = rpmh_rsc_reg_offset_ver_2_7;
@@ -1154,7 +1150,7 @@ static int __init rpmh_driver_init(void)
{
return platform_driver_register(&rpmh_driver);
}
-arch_initcall(rpmh_driver_init);
+core_initcall(rpmh_driver_init);
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 08e09642d7f5..8903ed956312 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -183,7 +183,6 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state,
}
if (state == RPMH_ACTIVE_ONLY_STATE) {
- WARN_ON(irqs_disabled());
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
} else {
/* Clean up our call by spoofing tx_done */
@@ -239,7 +238,7 @@ int rpmh_write_async(const struct device *dev, enum rpmh_state state,
return __rpmh_write(dev, state, rpm_msg);
}
-EXPORT_SYMBOL(rpmh_write_async);
+EXPORT_SYMBOL_GPL(rpmh_write_async);
/**
* rpmh_write: Write a set of RPMH commands and block until response
@@ -270,7 +269,7 @@ int rpmh_write(const struct device *dev, enum rpmh_state state,
WARN_ON(!ret);
return (ret > 0) ? 0 : -ETIMEDOUT;
}
-EXPORT_SYMBOL(rpmh_write);
+EXPORT_SYMBOL_GPL(rpmh_write);
static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
{
@@ -395,7 +394,7 @@ exit:
return ret;
}
-EXPORT_SYMBOL(rpmh_write_batch);
+EXPORT_SYMBOL_GPL(rpmh_write_batch);
static int is_req_valid(struct cache_req *req)
{
@@ -500,4 +499,4 @@ void rpmh_invalidate(const struct device *dev)
ctrlr->dirty = true;
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
}
-EXPORT_SYMBOL(rpmh_invalidate);
+EXPORT_SYMBOL_GPL(rpmh_invalidate);
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index 13d8c52330d0..f2b3e02abdf1 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -19,7 +19,6 @@
/**
* struct qcom_smd_rpm - state of the rpm device driver
* @rpm_channel: reference to the smd channel
- * @icc: interconnect proxy device
* @dev: rpm device
* @ack: completion for acks
* @lock: mutual exclusion around the send/complete pair
@@ -27,7 +26,6 @@
*/
struct qcom_smd_rpm {
struct rpmsg_endpoint *rpm_channel;
- struct platform_device *icc;
struct device *dev;
struct completion ack;
@@ -144,7 +142,7 @@ out:
mutex_unlock(&rpm->lock);
return ret;
}
-EXPORT_SYMBOL(qcom_rpm_smd_write);
+EXPORT_SYMBOL_GPL(qcom_rpm_smd_write);
static int qcom_smd_rpm_callback(struct rpmsg_device *rpdev,
void *data,
@@ -197,10 +195,6 @@ static int qcom_smd_rpm_callback(struct rpmsg_device *rpdev,
static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev)
{
struct qcom_smd_rpm *rpm;
- int ret;
-
- if (!rpdev->dev.of_node)
- return -EINVAL;
rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL);
if (!rpm)
@@ -213,38 +207,52 @@ static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev)
rpm->rpm_channel = rpdev->ept;
dev_set_drvdata(&rpdev->dev, rpm);
- rpm->icc = platform_device_register_data(&rpdev->dev, "icc_smd_rpm", -1,
- NULL, 0);
- if (IS_ERR(rpm->icc))
- return PTR_ERR(rpm->icc);
-
- ret = of_platform_populate(rpdev->dev.of_node, NULL, NULL, &rpdev->dev);
- if (ret)
- platform_device_unregister(rpm->icc);
-
- return ret;
+ return of_platform_populate(rpdev->dev.of_node, NULL, NULL, &rpdev->dev);
}
static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev)
{
- struct qcom_smd_rpm *rpm = dev_get_drvdata(&rpdev->dev);
-
- platform_device_unregister(rpm->icc);
of_platform_depopulate(&rpdev->dev);
}
-static const struct rpmsg_device_id qcom_smd_rpm_id_table[] = {
- { .name = "rpm_requests", },
- { /* sentinel */ }
+static const struct of_device_id qcom_smd_rpm_of_match[] = {
+ { .compatible = "qcom,glink-smd-rpm" },
+ { .compatible = "qcom,smd-rpm" },
+ /*
+ * Don't add any more compatibles to the list, two previous entryes
+ * should match all defined devices.
+ */
+ { .compatible = "qcom,rpm-apq8084" },
+ { .compatible = "qcom,rpm-ipq6018" },
+ { .compatible = "qcom,rpm-ipq9574" },
+ { .compatible = "qcom,rpm-msm8226" },
+ { .compatible = "qcom,rpm-msm8909" },
+ { .compatible = "qcom,rpm-msm8916" },
+ { .compatible = "qcom,rpm-msm8936" },
+ { .compatible = "qcom,rpm-msm8953" },
+ { .compatible = "qcom,rpm-msm8974" },
+ { .compatible = "qcom,rpm-msm8976" },
+ { .compatible = "qcom,rpm-msm8994" },
+ { .compatible = "qcom,rpm-msm8996" },
+ { .compatible = "qcom,rpm-msm8998" },
+ { .compatible = "qcom,rpm-sdm660" },
+ { .compatible = "qcom,rpm-sm6115" },
+ { .compatible = "qcom,rpm-sm6125" },
+ { .compatible = "qcom,rpm-sm6375" },
+ { .compatible = "qcom,rpm-qcm2290" },
+ { .compatible = "qcom,rpm-qcs404" },
+ {}
};
-MODULE_DEVICE_TABLE(rpmsg, qcom_smd_rpm_id_table);
+MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match);
static struct rpmsg_driver qcom_smd_rpm_driver = {
.probe = qcom_smd_rpm_probe,
.remove = qcom_smd_rpm_remove,
.callback = qcom_smd_rpm_callback,
- .id_table = qcom_smd_rpm_id_table,
- .drv.name = "qcom_smd_rpm",
+ .drv = {
+ .name = "qcom_smd_rpm",
+ .of_match_table = qcom_smd_rpm_of_match,
+ },
};
static int __init qcom_smd_rpm_init(void)
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index d4a89d2bb43b..fef840b54574 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -86,7 +86,7 @@
#define SMEM_GLOBAL_HOST 0xfffe
/* Max number of processors/hosts in a system */
-#define SMEM_HOST_COUNT 20
+#define SMEM_HOST_COUNT 25
/**
* struct smem_proc_comm - proc_comm communication struct (legacy)
@@ -285,7 +285,7 @@ struct qcom_smem {
struct smem_partition partitions[SMEM_HOST_COUNT];
unsigned num_regions;
- struct smem_region regions[];
+ struct smem_region regions[] __counted_by(num_regions);
};
static void *
@@ -353,12 +353,42 @@ static void *cached_entry_to_item(struct smem_private_entry *e)
return p - le32_to_cpu(e->size);
}
-/* Pointer to the one and only smem handle */
-static struct qcom_smem *__smem;
+/*
+ * Pointer to the one and only smem handle.
+ * Init to -EPROBE_DEFER to signal SMEM still has to be probed.
+ * Can be set to -ENODEV if SMEM is not initialized by SBL.
+ */
+static struct qcom_smem *__smem = INIT_ERR_PTR(-EPROBE_DEFER);
/* Timeout (ms) for the trylock of remote spinlocks */
#define HWSPINLOCK_TIMEOUT 1000
+/* The qcom hwspinlock id is always plus one from the smem host id */
+#define SMEM_HOST_ID_TO_HWSPINLOCK_ID(__x) ((__x) + 1)
+
+/**
+ * qcom_smem_bust_hwspin_lock_by_host() - bust the smem hwspinlock for a host
+ * @host: remote processor id
+ *
+ * Busts the hwspin_lock for the given smem host id. This helper is intended
+ * for remoteproc drivers that manage remoteprocs with an equivalent smem
+ * driver instance in the remote firmware. Drivers can force a release of the
+ * smem hwspin_lock if the rproc unexpectedly goes into a bad state.
+ *
+ * Context: Process context.
+ *
+ * Returns: 0 on success, otherwise negative errno.
+ */
+int qcom_smem_bust_hwspin_lock_by_host(unsigned int host)
+{
+ /* This function is for remote procs, so ignore SMEM_HOST_APPS */
+ if (host == SMEM_HOST_APPS || host >= SMEM_HOST_COUNT)
+ return -EINVAL;
+
+ return hwspin_lock_bust(__smem->hwlock, SMEM_HOST_ID_TO_HWSPINLOCK_ID(host));
+}
+EXPORT_SYMBOL_GPL(qcom_smem_bust_hwspin_lock_by_host);
+
/**
* qcom_smem_is_available() - Check if SMEM is available
*
@@ -368,7 +398,7 @@ bool qcom_smem_is_available(void)
{
return !!__smem;
}
-EXPORT_SYMBOL(qcom_smem_is_available);
+EXPORT_SYMBOL_GPL(qcom_smem_is_available);
static int qcom_smem_alloc_private(struct qcom_smem *smem,
struct smem_partition *part,
@@ -473,6 +503,8 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
*
* Allocate space for a given smem item of size @size, given that the item is
* not yet allocated.
+ *
+ * Return: 0 on success, negative errno on failure.
*/
int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
{
@@ -480,8 +512,8 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
unsigned long flags;
int ret;
- if (!__smem)
- return -EPROBE_DEFER;
+ if (IS_ERR(__smem))
+ return PTR_ERR(__smem);
if (item < SMEM_ITEM_LAST_FIXED) {
dev_err(__smem->dev,
@@ -489,7 +521,7 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
return -EINVAL;
}
- if (WARN_ON(item >= __smem->item_count))
+ if (item >= __smem->item_count)
return -EINVAL;
ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
@@ -651,26 +683,20 @@ invalid_canary:
*
* Looks up smem item and returns pointer to it. Size of smem
* item is returned in @size.
+ *
+ * Return: a pointer to an SMEM item on success, ERR_PTR() on failure.
*/
void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
{
struct smem_partition *part;
- unsigned long flags;
- int ret;
- void *ptr = ERR_PTR(-EPROBE_DEFER);
+ void *ptr;
- if (!__smem)
- return ptr;
+ if (IS_ERR(__smem))
+ return __smem;
- if (WARN_ON(item >= __smem->item_count))
+ if (item >= __smem->item_count)
return ERR_PTR(-EINVAL);
- ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
- HWSPINLOCK_TIMEOUT,
- &flags);
- if (ret)
- return ERR_PTR(ret);
-
if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
part = &__smem->partitions[host];
ptr = qcom_smem_get_private(__smem, part, item, size);
@@ -681,10 +707,7 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
ptr = qcom_smem_get_global(__smem, item, size);
}
- hwspin_unlock_irqrestore(__smem->hwlock, &flags);
-
return ptr;
-
}
EXPORT_SYMBOL_GPL(qcom_smem_get);
@@ -694,6 +717,8 @@ EXPORT_SYMBOL_GPL(qcom_smem_get);
*
* To be used by smem clients as a quick way to determine if any new
* allocations has been made.
+ *
+ * Return: number of available bytes on success, negative errno on failure.
*/
int qcom_smem_get_free_space(unsigned host)
{
@@ -702,8 +727,8 @@ int qcom_smem_get_free_space(unsigned host)
struct smem_header *header;
unsigned ret;
- if (!__smem)
- return -EPROBE_DEFER;
+ if (IS_ERR(__smem))
+ return PTR_ERR(__smem);
if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
part = &__smem->partitions[host];
@@ -743,7 +768,7 @@ static bool addr_in_range(void __iomem *base, size_t size, void *addr)
* with an smem item pointer (previously returned by qcom_smem_get()
* @p: the virtual address to convert
*
- * Returns 0 if the pointer provided is not within any smem region.
+ * Return: physical address of the SMEM item (if found), 0 otherwise
*/
phys_addr_t qcom_smem_virt_to_phys(void *p)
{
@@ -806,6 +831,39 @@ int qcom_smem_get_soc_id(u32 *id)
}
EXPORT_SYMBOL_GPL(qcom_smem_get_soc_id);
+/**
+ * qcom_smem_get_feature_code() - return the feature code
+ * @code: On success, return the feature code here.
+ *
+ * Look up the feature code identifier from SMEM and return it.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qcom_smem_get_feature_code(u32 *code)
+{
+ struct socinfo *info;
+ u32 raw_code;
+
+ info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID, NULL);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ /* This only makes sense for socinfo >= 16 */
+ if (__le32_to_cpu(info->fmt) < SOCINFO_VERSION(0, 16))
+ return -EOPNOTSUPP;
+
+ raw_code = __le32_to_cpu(info->feature_code);
+
+ /* Ensure the value makes sense */
+ if (raw_code > SOCINFO_FC_INT_MAX)
+ raw_code = SOCINFO_FC_UNKNOWN;
+
+ *code = raw_code;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_smem_get_feature_code);
+
static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
{
struct smem_header *header;
@@ -844,7 +902,7 @@ static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
if (IS_ERR_OR_NULL(ptable))
return SMEM_ITEM_COUNT;
- info = (struct smem_info *)&ptable->entry[ptable->num_entries];
+ info = (struct smem_info *)&ptable->entry[le32_to_cpu(ptable->num_entries)];
if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
return SMEM_ITEM_COUNT;
@@ -1127,18 +1185,16 @@ static int qcom_smem_probe(struct platform_device *pdev)
header = smem->regions[0].virt_base;
if (le32_to_cpu(header->initialized) != 1 ||
le32_to_cpu(header->reserved)) {
- dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
- return -EINVAL;
+ __smem = ERR_PTR(-ENODEV);
+ return dev_err_probe(&pdev->dev, PTR_ERR(__smem), "SMEM is not initialized by SBL\n");
}
hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
- if (hwlock_id < 0) {
- if (hwlock_id != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to retrieve hwlock\n");
- return hwlock_id;
- }
+ if (hwlock_id < 0)
+ return dev_err_probe(&pdev->dev, hwlock_id,
+ "failed to retrieve hwlock\n");
- smem->hwlock = hwspin_lock_request_specific(hwlock_id);
+ smem->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, hwlock_id);
if (!smem->hwlock)
return -ENXIO;
@@ -1187,14 +1243,11 @@ static int qcom_smem_probe(struct platform_device *pdev)
return 0;
}
-static int qcom_smem_remove(struct platform_device *pdev)
+static void qcom_smem_remove(struct platform_device *pdev)
{
platform_device_unregister(__smem->socinfo);
- hwspin_lock_free(__smem->hwlock);
__smem = NULL;
-
- return 0;
}
static const struct of_device_id qcom_smem_of_match[] = {
diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c
index e848cc9a3cf8..cc5be8019b6a 100644
--- a/drivers/soc/qcom/smem_state.c
+++ b/drivers/soc/qcom/smem_state.c
@@ -3,6 +3,7 @@
* Copyright (c) 2015, Sony Mobile Communications Inc.
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -60,20 +61,15 @@ static struct qcom_smem_state *of_node_to_state(struct device_node *np)
{
struct qcom_smem_state *state;
- mutex_lock(&list_lock);
+ guard(mutex)(&list_lock);
list_for_each_entry(state, &smem_states, list) {
if (state->of_node == np) {
kref_get(&state->refcount);
- goto unlock;
+ return state;
}
}
- state = ERR_PTR(-EPROBE_DEFER);
-
-unlock:
- mutex_unlock(&list_lock);
-
- return state;
+ return ERR_PTR(-EPROBE_DEFER);
}
/**
@@ -116,7 +112,8 @@ struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
if (args.args_count != 1) {
dev_err(dev, "invalid #qcom,smem-state-cells\n");
- return ERR_PTR(-EINVAL);
+ state = ERR_PTR(-EINVAL);
+ goto put;
}
state = of_node_to_state(args.np);
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index e9c8030d50ee..cb515c2340c1 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
+#include <linux/seq_file.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
#include <linux/spinlock.h>
@@ -58,8 +59,8 @@
* @valid_entries: number of allocated entries
* @flags:
* @entries: individual communication entries
- * @name: name of the entry
- * @value: content of the entry
+ * @entries.name: name of the entry
+ * @entries.value: content of the entry
*/
struct smp2p_smem_item {
u32 magic;
@@ -160,6 +161,9 @@ struct qcom_smp2p {
struct list_head outbound;
};
+#define CREATE_TRACE_POINTS
+#include "trace-smp2p.h"
+
static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
{
/* Make sure any updated data is written before the kick */
@@ -191,6 +195,7 @@ static void qcom_smp2p_do_ssr_ack(struct qcom_smp2p *smp2p)
struct smp2p_smem_item *out = smp2p->out;
u32 val;
+ trace_smp2p_ssr_ack(smp2p->dev);
smp2p->ssr_ack = !smp2p->ssr_ack;
val = out->flags & ~BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
@@ -213,6 +218,7 @@ static void qcom_smp2p_negotiate(struct qcom_smp2p *smp2p)
smp2p->ssr_ack_enabled = true;
smp2p->negotiation_done = true;
+ trace_smp2p_negotiate(smp2p->dev, out->features);
}
}
@@ -251,6 +257,8 @@ static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p)
status = val ^ entry->last_value;
entry->last_value = val;
+ trace_smp2p_notify_in(entry, status, val);
+
/* No changes of this entry? */
if (!status)
continue;
@@ -275,6 +283,8 @@ static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p)
*
* Handle notifications from the remote side to handle newly allocated entries
* or any changes to the state bits of existing entries.
+ *
+ * Return: %IRQ_HANDLED
*/
static irqreturn_t qcom_smp2p_intr(int irq, void *data)
{
@@ -351,11 +361,19 @@ static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type)
return 0;
}
+static void smp2p_irq_print_chip(struct irq_data *irqd, struct seq_file *p)
+{
+ struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+
+ seq_printf(p, "%8s", dev_name(entry->smp2p->dev));
+}
+
static struct irq_chip smp2p_irq_chip = {
.name = "smp2p",
.irq_mask = smp2p_mask_irq,
.irq_unmask = smp2p_unmask_irq,
.irq_set_type = smp2p_set_irq_type,
+ .irq_print_chip = smp2p_irq_print_chip,
};
static int smp2p_irq_map(struct irq_domain *d,
@@ -381,7 +399,7 @@ static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
struct smp2p_entry *entry,
struct device_node *node)
{
- entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry);
+ entry->domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &smp2p_irq_ops, entry);
if (!entry->domain) {
dev_err(smp2p->dev, "failed to add irq_domain\n");
return -ENOMEM;
@@ -404,6 +422,8 @@ static int smp2p_update_bits(void *data, u32 mask, u32 value)
writel(val, entry->value);
spin_unlock_irqrestore(&entry->lock, flags);
+ trace_smp2p_update_bits(entry, orig, val);
+
if (val != orig)
qcom_smp2p_kick(entry->smp2p);
@@ -447,12 +467,9 @@ static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p)
int ret;
ret = qcom_smem_alloc(pid, smem_id, sizeof(*out));
- if (ret < 0 && ret != -EEXIST) {
- if (ret != -EPROBE_DEFER)
- dev_err(smp2p->dev,
- "unable to allocate local smp2p item\n");
- return ret;
- }
+ if (ret < 0 && ret != -EEXIST)
+ return dev_err_probe(smp2p->dev, ret,
+ "unable to allocate local smp2p item\n");
out = qcom_smem_get(pid, smem_id, NULL);
if (IS_ERR(out)) {
@@ -519,7 +536,6 @@ static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
static int qcom_smp2p_probe(struct platform_device *pdev)
{
struct smp2p_entry *entry;
- struct device_node *node;
struct qcom_smp2p *smp2p;
const char *key;
int irq;
@@ -559,7 +575,7 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
smp2p->mbox_client.knows_txdone = true;
smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0);
if (IS_ERR(smp2p->mbox_chan)) {
- if (PTR_ERR(smp2p->mbox_chan) != -ENODEV)
+ if (PTR_ERR(smp2p->mbox_chan) != -ENOENT)
return PTR_ERR(smp2p->mbox_chan);
smp2p->mbox_chan = NULL;
@@ -573,11 +589,10 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
if (ret < 0)
goto release_mbox;
- for_each_available_child_of_node(pdev->dev.of_node, node) {
+ for_each_available_child_of_node_scoped(pdev->dev.of_node, node) {
entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL);
if (!entry) {
ret = -ENOMEM;
- of_node_put(node);
goto unwind_interfaces;
}
@@ -585,25 +600,19 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
spin_lock_init(&entry->lock);
ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
- if (ret < 0) {
- of_node_put(node);
+ if (ret < 0)
goto unwind_interfaces;
- }
if (of_property_read_bool(node, "interrupt-controller")) {
ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
- if (ret < 0) {
- of_node_put(node);
+ if (ret < 0)
goto unwind_interfaces;
- }
list_add(&entry->node, &smp2p->inbound);
} else {
ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
- if (ret < 0) {
- of_node_put(node);
+ if (ret < 0)
goto unwind_interfaces;
- }
list_add(&entry->node, &smp2p->outbound);
}
@@ -615,7 +624,7 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(&pdev->dev, irq,
NULL, qcom_smp2p_intr,
IRQF_ONESHOT,
- "smp2p", (void *)smp2p);
+ NULL, (void *)smp2p);
if (ret) {
dev_err(&pdev->dev, "failed to request interrupt\n");
goto unwind_interfaces;
@@ -660,7 +669,7 @@ report_read_failure:
return -EINVAL;
}
-static int qcom_smp2p_remove(struct platform_device *pdev)
+static void qcom_smp2p_remove(struct platform_device *pdev)
{
struct qcom_smp2p *smp2p = platform_get_drvdata(pdev);
struct smp2p_entry *entry;
@@ -676,8 +685,6 @@ static int qcom_smp2p_remove(struct platform_device *pdev)
mbox_free_channel(smp2p->mbox_chan);
smp2p->out->valid_entries = 0;
-
- return 0;
}
static const struct of_device_id qcom_smp2p_of_match[] = {
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
index c58cfff64856..021e9d1f61dc 100644
--- a/drivers/soc/qcom/smsm.c
+++ b/drivers/soc/qcom/smsm.c
@@ -5,6 +5,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_irq.h>
@@ -71,6 +72,7 @@ struct smsm_host;
* @lock: spinlock for read-modify-write of the outgoing state
* @entries: context for each of the entries
* @hosts: context for each of the hosts
+ * @mbox_client: mailbox client handle
*/
struct qcom_smsm {
struct device *dev;
@@ -88,6 +90,8 @@ struct qcom_smsm {
struct smsm_entry *entries;
struct smsm_host *hosts;
+
+ struct mbox_client mbox_client;
};
/**
@@ -120,11 +124,14 @@ struct smsm_entry {
* @ipc_regmap: regmap for outgoing interrupt
* @ipc_offset: offset in @ipc_regmap for outgoing interrupt
* @ipc_bit: bit in @ipc_regmap + @ipc_offset for outgoing interrupt
+ * @mbox_chan: apcs ipc mailbox channel handle
*/
struct smsm_host {
struct regmap *ipc_regmap;
int ipc_offset;
int ipc_bit;
+
+ struct mbox_chan *mbox_chan;
};
/**
@@ -172,7 +179,13 @@ static int smsm_update_bits(void *data, u32 mask, u32 value)
hostp = &smsm->hosts[host];
val = readl(smsm->subscription + host);
- if (val & changes && hostp->ipc_regmap) {
+ if (!(val & changes))
+ continue;
+
+ if (hostp->mbox_chan) {
+ mbox_send_message(hostp->mbox_chan, NULL);
+ mbox_client_txdone(hostp->mbox_chan, 0);
+ } else if (hostp->ipc_regmap) {
regmap_write(hostp->ipc_regmap,
hostp->ipc_offset,
BIT(hostp->ipc_bit));
@@ -353,6 +366,28 @@ static const struct irq_domain_ops smsm_irq_ops = {
};
/**
+ * smsm_parse_mbox() - requests an mbox channel
+ * @smsm: smsm driver context
+ * @host_id: index of the remote host to be resolved
+ *
+ * Requests the desired channel using the mbox interface which is needed for
+ * sending the outgoing interrupts to a remove hosts - identified by @host_id.
+ */
+static int smsm_parse_mbox(struct qcom_smsm *smsm, unsigned int host_id)
+{
+ struct smsm_host *host = &smsm->hosts[host_id];
+ int ret = 0;
+
+ host->mbox_chan = mbox_request_channel(&smsm->mbox_client, host_id);
+ if (IS_ERR(host->mbox_chan)) {
+ ret = PTR_ERR(host->mbox_chan);
+ host->mbox_chan = NULL;
+ }
+
+ return ret;
+}
+
+/**
* smsm_parse_ipc() - parses a qcom,ipc-%d device tree property
* @smsm: smsm driver context
* @host_id: index of the remote host to be resolved
@@ -421,7 +456,7 @@ static int smsm_inbound_entry(struct qcom_smsm *smsm,
return ret;
}
- entry->domain = irq_domain_add_linear(node, 32, &smsm_irq_ops, entry);
+ entry->domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &smsm_irq_ops, entry);
if (!entry->domain) {
dev_err(smsm->dev, "failed to add irq_domain\n");
return -ENOMEM;
@@ -521,8 +556,16 @@ static int qcom_smsm_probe(struct platform_device *pdev)
"qcom,local-host",
&smsm->local_host);
+ smsm->mbox_client.dev = &pdev->dev;
+ smsm->mbox_client.knows_txdone = true;
+
/* Parse the host properties */
for (id = 0; id < smsm->num_hosts; id++) {
+ /* Try using mbox interface first, otherwise fall back to syscon */
+ ret = smsm_parse_mbox(smsm, id);
+ if (!ret)
+ continue;
+
ret = smsm_parse_ipc(smsm, id);
if (ret < 0)
goto out_put;
@@ -609,11 +652,14 @@ unwind_interfaces:
qcom_smem_state_unregister(smsm->state);
out_put:
+ for (id = 0; id < smsm->num_hosts; id++)
+ mbox_free_channel(smsm->hosts[id].mbox_chan);
+
of_node_put(local_node);
return ret;
}
-static int qcom_smsm_remove(struct platform_device *pdev)
+static void qcom_smsm_remove(struct platform_device *pdev)
{
struct qcom_smsm *smsm = platform_get_drvdata(pdev);
unsigned id;
@@ -622,9 +668,10 @@ static int qcom_smsm_remove(struct platform_device *pdev)
if (smsm->entries[id].domain)
irq_domain_remove(smsm->entries[id].domain);
- qcom_smem_state_unregister(smsm->state);
+ for (id = 0; id < smsm->num_hosts; id++)
+ mbox_free_channel(smsm->hosts[id].mbox_chan);
- return 0;
+ qcom_smem_state_unregister(smsm->state);
}
static const struct of_device_id qcom_smsm_of_match[] = {
@@ -636,8 +683,8 @@ MODULE_DEVICE_TABLE(of, qcom_smsm_of_match);
static struct platform_driver qcom_smsm_driver = {
.probe = qcom_smsm_probe,
.remove = qcom_smsm_remove,
- .driver = {
- .name = "qcom-smsm",
+ .driver = {
+ .name = "qcom-smsm",
.of_match_table = qcom_smsm_of_match,
},
};
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 497cfb720fcb..003a2304d535 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -17,18 +17,10 @@
#include <linux/sys_soc.h>
#include <linux/types.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <dt-bindings/arm/qcom,ids.h>
-/*
- * SoC version type with major number in the upper 16 bits and minor
- * number in the lower 16 bits.
- */
-#define SOCINFO_MAJOR(ver) (((ver) >> 16) & 0xffff)
-#define SOCINFO_MINOR(ver) ((ver) & 0xffff)
-#define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff))
-
/* Helper macros to create soc_id table */
#define qcom_board_id(id) QCOM_ID_ ## id, __stringify(id)
#define qcom_board_id_named(id, name) QCOM_ID_ ## id, (name)
@@ -45,26 +37,92 @@
*/
#define SMEM_IMAGE_TABLE_BOOT_INDEX 0
#define SMEM_IMAGE_TABLE_TZ_INDEX 1
+#define SMEM_IMAGE_TABLE_TZSECAPP_INDEX 2
#define SMEM_IMAGE_TABLE_RPM_INDEX 3
+#define SMEM_IMAGE_TABLE_SDI_INDEX 4
+#define SMEM_IMAGE_TABLE_HYP_INDEX 5
+#define SMEM_IMAGE_TABLE_ADSP1_INDEX 6
+#define SMEM_IMAGE_TABLE_ADSP2_INDEX 7
+#define SMEM_IMAGE_TABLE_CDSP2_INDEX 8
+#define SMEM_IMAGE_TABLE_APPSBL_INDEX 9
#define SMEM_IMAGE_TABLE_APPS_INDEX 10
#define SMEM_IMAGE_TABLE_MPSS_INDEX 11
#define SMEM_IMAGE_TABLE_ADSP_INDEX 12
#define SMEM_IMAGE_TABLE_CNSS_INDEX 13
#define SMEM_IMAGE_TABLE_VIDEO_INDEX 14
+#define SMEM_IMAGE_TABLE_DSPS_INDEX 15
+#define SMEM_IMAGE_TABLE_CDSP_INDEX 16
+#define SMEM_IMAGE_TABLE_NPU_INDEX 17
+#define SMEM_IMAGE_TABLE_WPSS_INDEX 18
+#define SMEM_IMAGE_TABLE_CDSP1_INDEX 19
+#define SMEM_IMAGE_TABLE_GPDSP_INDEX 20
+#define SMEM_IMAGE_TABLE_GPDSP1_INDEX 21
+#define SMEM_IMAGE_TABLE_SENSORPD_INDEX 22
+#define SMEM_IMAGE_TABLE_AUDIOPD_INDEX 23
+#define SMEM_IMAGE_TABLE_OEMPD_INDEX 24
+#define SMEM_IMAGE_TABLE_CHARGERPD_INDEX 25
+#define SMEM_IMAGE_TABLE_OISPD_INDEX 26
+#define SMEM_IMAGE_TABLE_SOCCP_INDEX 27
+#define SMEM_IMAGE_TABLE_TME_INDEX 28
+#define SMEM_IMAGE_TABLE_GEARVM_INDEX 29
+#define SMEM_IMAGE_TABLE_UEFI_INDEX 30
+#define SMEM_IMAGE_TABLE_CDSP3_INDEX 31
+#define SMEM_IMAGE_TABLE_AUDIOPD_ADSP1_INDEX 32
+#define SMEM_IMAGE_TABLE_AUDIOPD_ADSP2_INDEX 33
+#define SMEM_IMAGE_TABLE_DCP_INDEX 34
+#define SMEM_IMAGE_TABLE_OOBS_INDEX 35
+#define SMEM_IMAGE_TABLE_OOBNS_INDEX 36
+#define SMEM_IMAGE_TABLE_DEVCFG_INDEX 37
+#define SMEM_IMAGE_TABLE_BTPD_INDEX 38
+#define SMEM_IMAGE_TABLE_QECP_INDEX 39
+
#define SMEM_IMAGE_VERSION_TABLE 469
+#define SMEM_IMAGE_VERSION_TABLE_2 667
/*
* SMEM Image table names
*/
static const char *const socinfo_image_names[] = {
+ [SMEM_IMAGE_TABLE_ADSP1_INDEX] = "adsp1",
+ [SMEM_IMAGE_TABLE_ADSP2_INDEX] = "adsp2",
[SMEM_IMAGE_TABLE_ADSP_INDEX] = "adsp",
+ [SMEM_IMAGE_TABLE_APPSBL_INDEX] = "appsbl",
[SMEM_IMAGE_TABLE_APPS_INDEX] = "apps",
+ [SMEM_IMAGE_TABLE_AUDIOPD_INDEX] = "audiopd",
+ [SMEM_IMAGE_TABLE_AUDIOPD_ADSP1_INDEX] = "audiopd_adsp1",
+ [SMEM_IMAGE_TABLE_AUDIOPD_ADSP2_INDEX] = "audiopd_adsp2",
[SMEM_IMAGE_TABLE_BOOT_INDEX] = "boot",
+ [SMEM_IMAGE_TABLE_BTPD_INDEX] = "btpd",
+ [SMEM_IMAGE_TABLE_CDSP1_INDEX] = "cdsp1",
+ [SMEM_IMAGE_TABLE_CDSP2_INDEX] = "cdsp2",
+ [SMEM_IMAGE_TABLE_CDSP3_INDEX] = "cdsp3",
+ [SMEM_IMAGE_TABLE_CDSP_INDEX] = "cdsp",
+ [SMEM_IMAGE_TABLE_CHARGERPD_INDEX] = "chargerpd",
[SMEM_IMAGE_TABLE_CNSS_INDEX] = "cnss",
+ [SMEM_IMAGE_TABLE_DCP_INDEX] = "dcp",
+ [SMEM_IMAGE_TABLE_DEVCFG_INDEX] = "devcfg",
+ [SMEM_IMAGE_TABLE_DSPS_INDEX] = "dsps",
+ [SMEM_IMAGE_TABLE_GEARVM_INDEX] = "gearvm",
+ [SMEM_IMAGE_TABLE_GPDSP1_INDEX] = "gpdsp1",
+ [SMEM_IMAGE_TABLE_GPDSP_INDEX] = "gpdsp",
+ [SMEM_IMAGE_TABLE_HYP_INDEX] = "hyp",
[SMEM_IMAGE_TABLE_MPSS_INDEX] = "mpss",
+ [SMEM_IMAGE_TABLE_NPU_INDEX] = "npu",
+ [SMEM_IMAGE_TABLE_OEMPD_INDEX] = "oempd",
+ [SMEM_IMAGE_TABLE_OISPD_INDEX] = "oispd",
+ [SMEM_IMAGE_TABLE_OOBNS_INDEX] = "oobns",
+ [SMEM_IMAGE_TABLE_OOBS_INDEX] = "oobs",
+ [SMEM_IMAGE_TABLE_QECP_INDEX] = "qecp",
[SMEM_IMAGE_TABLE_RPM_INDEX] = "rpm",
+ [SMEM_IMAGE_TABLE_SDI_INDEX] = "sdi",
+ [SMEM_IMAGE_TABLE_SENSORPD_INDEX] = "sensorpd",
+ [SMEM_IMAGE_TABLE_SOCCP_INDEX] = "soccp",
+ [SMEM_IMAGE_TABLE_TME_INDEX] = "tme",
[SMEM_IMAGE_TABLE_TZ_INDEX] = "tz",
+ [SMEM_IMAGE_TABLE_TZSECAPP_INDEX] = "tzsecapp",
+ [SMEM_IMAGE_TABLE_UEFI_INDEX] = "uefi",
[SMEM_IMAGE_TABLE_VIDEO_INDEX] = "video",
+ [SMEM_IMAGE_TABLE_WPSS_INDEX] = "wpss",
};
static const char *const pmic_models[] = {
@@ -93,7 +151,7 @@ static const char *const pmic_models[] = {
[22] = "PM8821",
[23] = "PM8038",
[24] = "PM8005/PM8922",
- [25] = "PM8917",
+ [25] = "PM8917/PM8937",
[26] = "PM660L",
[27] = "PM660",
[30] = "PM8150",
@@ -114,9 +172,22 @@ static const char *const pmic_models[] = {
[50] = "PM8350B",
[51] = "PMR735A",
[52] = "PMR735B",
- [55] = "PM2250",
+ [54] = "PM6350",
+ [55] = "PM4125",
[58] = "PM8450",
[65] = "PM8010",
+ [69] = "PM8550VS",
+ [70] = "PM8550VE",
+ [71] = "PM8550B",
+ [72] = "PMR735D",
+ [73] = "PM8550",
+ [74] = "PMK8550",
+ [78] = "PMM8650AU",
+ [79] = "PMM8650AU_PSAIL",
+ [80] = "PM7550",
+ [82] = "PMC8380",
+ [83] = "SMB2360",
+ [91] = "PMIV0108",
};
struct socinfo_params {
@@ -142,6 +213,7 @@ struct socinfo_params {
u32 num_func_clusters;
u32 boot_cluster;
u32 boot_core;
+ u32 raw_package_type;
};
struct smem_image_version {
@@ -331,6 +403,7 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(SDA630) },
{ qcom_board_id(MSM8905) },
{ qcom_board_id(SDX202) },
+ { qcom_board_id(SDM670) },
{ qcom_board_id(SDM450) },
{ qcom_board_id(SM8150) },
{ qcom_board_id(SDA845) },
@@ -349,6 +422,7 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(SDA439) },
{ qcom_board_id(SDA429) },
{ qcom_board_id(SM7150) },
+ { qcom_board_id(SM7150P) },
{ qcom_board_id(IPQ8070) },
{ qcom_board_id(IPQ8071) },
{ qcom_board_id(QM215) },
@@ -359,6 +433,9 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(SM6125) },
{ qcom_board_id(IPQ8070A) },
{ qcom_board_id(IPQ8071A) },
+ { qcom_board_id(IPQ8172) },
+ { qcom_board_id(IPQ8173) },
+ { qcom_board_id(IPQ8174) },
{ qcom_board_id(IPQ6018) },
{ qcom_board_id(IPQ6028) },
{ qcom_board_id(SDM429W) },
@@ -385,10 +462,14 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(SA8540P) },
{ qcom_board_id(QCM4290) },
{ qcom_board_id(QCS4290) },
+ { qcom_board_id(SM7325) },
{ qcom_board_id_named(SM8450_2, "SM8450") },
{ qcom_board_id_named(SM8450_3, "SM8450") },
{ qcom_board_id(SC7280) },
{ qcom_board_id(SC7180P) },
+ { qcom_board_id(QCM6490) },
+ { qcom_board_id(QCS6490) },
+ { qcom_board_id(SM7325P) },
{ qcom_board_id(IPQ5000) },
{ qcom_board_id(IPQ0509) },
{ qcom_board_id(IPQ0518) },
@@ -403,10 +484,18 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(IPQ9510) },
{ qcom_board_id(QRB4210) },
{ qcom_board_id(QRB2210) },
+ { qcom_board_id(SAR2130P) },
+ { qcom_board_id(SM8475) },
+ { qcom_board_id(SM8475P) },
+ { qcom_board_id(SA8255P) },
{ qcom_board_id(SA8775P) },
{ qcom_board_id(QRU1000) },
+ { qcom_board_id(SM8475_2) },
{ qcom_board_id(QDU1000) },
+ { qcom_board_id(X1E80100) },
+ { qcom_board_id(SM8650) },
{ qcom_board_id(SM4450) },
+ { qcom_board_id(SAR1130P) },
{ qcom_board_id(QDU1010) },
{ qcom_board_id(QRU1032) },
{ qcom_board_id(QRU1052) },
@@ -415,7 +504,24 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(IPQ5322) },
{ qcom_board_id(IPQ5312) },
{ qcom_board_id(IPQ5302) },
+ { qcom_board_id(QCS8550) },
+ { qcom_board_id(QCM8550) },
+ { qcom_board_id(SM8750) },
{ qcom_board_id(IPQ5300) },
+ { qcom_board_id(SM7635) },
+ { qcom_board_id(SM6650) },
+ { qcom_board_id(SM6650P) },
+ { qcom_board_id(IPQ5321) },
+ { qcom_board_id(IPQ5424) },
+ { qcom_board_id(QCM6690) },
+ { qcom_board_id(QCS6690) },
+ { qcom_board_id(SM8850) },
+ { qcom_board_id(IPQ5404) },
+ { qcom_board_id(QCS9100) },
+ { qcom_board_id(QCS8300) },
+ { qcom_board_id(QCS8275) },
+ { qcom_board_id(QCS9075) },
+ { qcom_board_id(QCS615) },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
@@ -558,7 +664,7 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
struct smem_image_version *versions;
struct dentry *dentry;
size_t size;
- int i;
+ int i, j;
unsigned int num_pmics;
unsigned int pmic_array_offset;
@@ -570,6 +676,14 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
&qcom_socinfo->info.fmt);
switch (qcom_socinfo->info.fmt) {
+ case SOCINFO_VERSION(0, 23):
+ case SOCINFO_VERSION(0, 22):
+ case SOCINFO_VERSION(0, 21):
+ case SOCINFO_VERSION(0, 20):
+ qcom_socinfo->info.raw_package_type = __le32_to_cpu(info->raw_package_type);
+ debugfs_create_u32("raw_package_type", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.raw_package_type);
+ fallthrough;
case SOCINFO_VERSION(0, 19):
qcom_socinfo->info.num_func_clusters = __le32_to_cpu(info->num_func_clusters);
qcom_socinfo->info.boot_cluster = __le32_to_cpu(info->boot_cluster);
@@ -702,20 +816,31 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
break;
}
- versions = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_IMAGE_VERSION_TABLE,
- &size);
-
- for (i = 0; i < ARRAY_SIZE(socinfo_image_names); i++) {
+ for (i = 0, j = 0; i < ARRAY_SIZE(socinfo_image_names); i++, j++) {
if (!socinfo_image_names[i])
continue;
+ if (i == 0) {
+ versions = qcom_smem_get(QCOM_SMEM_HOST_ANY,
+ SMEM_IMAGE_VERSION_TABLE,
+ &size);
+ } else if (i == 32) {
+ versions = qcom_smem_get(QCOM_SMEM_HOST_ANY,
+ SMEM_IMAGE_VERSION_TABLE_2,
+ &size);
+ if (IS_ERR(versions))
+ break;
+
+ j = 0;
+ }
+
dentry = debugfs_create_dir(socinfo_image_names[i],
qcom_socinfo->dbg_root);
- debugfs_create_file("name", 0444, dentry, &versions[i],
+ debugfs_create_file("name", 0444, dentry, &versions[j],
&qcom_image_name_ops);
- debugfs_create_file("variant", 0444, dentry, &versions[i],
+ debugfs_create_file("variant", 0444, dentry, &versions[j],
&qcom_image_variant_ops);
- debugfs_create_file("oem", 0444, dentry, &versions[i],
+ debugfs_create_file("oem", 0444, dentry, &versions[j],
&qcom_image_oem_ops);
}
}
@@ -757,10 +882,16 @@ static int qcom_socinfo_probe(struct platform_device *pdev)
qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u",
SOCINFO_MAJOR(le32_to_cpu(info->ver)),
SOCINFO_MINOR(le32_to_cpu(info->ver)));
- if (offsetof(struct socinfo, serial_num) <= item_size)
+ if (!qs->attr.soc_id || !qs->attr.revision)
+ return -ENOMEM;
+
+ if (offsetofend(struct socinfo, serial_num) <= item_size) {
qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"%u",
le32_to_cpu(info->serial_num));
+ if (!qs->attr.serial_number)
+ return -ENOMEM;
+ }
qs->soc_dev = soc_device_register(&qs->attr);
if (IS_ERR(qs->soc_dev))
@@ -776,15 +907,13 @@ static int qcom_socinfo_probe(struct platform_device *pdev)
return 0;
}
-static int qcom_socinfo_remove(struct platform_device *pdev)
+static void qcom_socinfo_remove(struct platform_device *pdev)
{
struct qcom_socinfo *qs = platform_get_drvdata(pdev);
soc_device_unregister(qs->soc_dev);
socinfo_debugfs_exit(qs);
-
- return 0;
}
static struct platform_driver qcom_socinfo_driver = {
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
index 2f0b1bfe7658..f75659fff287 100644
--- a/drivers/soc/qcom/spm.c
+++ b/drivers/soc/qcom/spm.c
@@ -6,20 +6,40 @@
* SAW power controller driver
*/
-#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/linear_range.h>
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+
+#include <linux/regulator/driver.h>
+
#include <soc/qcom/spm.h>
+#define FIELD_SET(current, mask, val) \
+ (((current) & ~(mask)) | FIELD_PREP((mask), (val)))
+
#define SPM_CTL_INDEX 0x7f
#define SPM_CTL_INDEX_SHIFT 4
#define SPM_CTL_EN BIT(0)
+/* These registers might be specific to SPM 1.1 */
+#define SPM_VCTL_VLVL GENMASK(7, 0)
+#define SPM_PMIC_DATA_0_VLVL GENMASK(7, 0)
+#define SPM_PMIC_DATA_1_MIN_VSEL GENMASK(5, 0)
+#define SPM_PMIC_DATA_1_MAX_VSEL GENMASK(21, 16)
+
+#define SPM_1_1_AVS_CTL_AVS_ENABLED BIT(27)
+#define SPM_AVS_CTL_MAX_VLVL GENMASK(22, 17)
+#define SPM_AVS_CTL_MIN_VLVL GENMASK(15, 10)
+
enum spm_reg {
SPM_REG_CFG,
SPM_REG_SPM_CTL,
@@ -29,13 +49,44 @@ enum spm_reg {
SPM_REG_PMIC_DATA_1,
SPM_REG_VCTL,
SPM_REG_SEQ_ENTRY,
- SPM_REG_SPM_STS,
+ SPM_REG_STS0,
+ SPM_REG_STS1,
SPM_REG_PMIC_STS,
SPM_REG_AVS_CTL,
SPM_REG_AVS_LIMIT,
+ SPM_REG_RST,
SPM_REG_NR,
};
+#define MAX_PMIC_DATA 2
+#define MAX_SEQ_DATA 64
+
+struct spm_reg_data {
+ const u16 *reg_offset;
+ u32 spm_cfg;
+ u32 spm_dly;
+ u32 pmic_dly;
+ u32 pmic_data[MAX_PMIC_DATA];
+ u32 avs_ctl;
+ u32 avs_limit;
+ u8 seq[MAX_SEQ_DATA];
+ u8 start_index[PM_SLEEP_MODE_NR];
+
+ smp_call_func_t set_vdd;
+ /* for now we support only a single range */
+ struct linear_range *range;
+ unsigned int ramp_delay;
+ unsigned int init_uV;
+};
+
+struct spm_driver_data {
+ void __iomem *reg_base;
+ const struct spm_reg_data *reg_data;
+ struct device *dev;
+ unsigned int volt_sel;
+ int reg_cpu;
+};
+
static const u16 spm_reg_offset_v4_1[SPM_REG_NR] = {
[SPM_REG_AVS_CTL] = 0x904,
[SPM_REG_AVS_LIMIT] = 0x908,
@@ -169,6 +220,10 @@ static const struct spm_reg_data spm_reg_8226_cpu = {
static const u16 spm_reg_offset_v1_1[SPM_REG_NR] = {
[SPM_REG_CFG] = 0x08,
+ [SPM_REG_STS0] = 0x0c,
+ [SPM_REG_STS1] = 0x10,
+ [SPM_REG_VCTL] = 0x14,
+ [SPM_REG_AVS_CTL] = 0x18,
[SPM_REG_SPM_CTL] = 0x20,
[SPM_REG_PMIC_DLY] = 0x24,
[SPM_REG_PMIC_DATA_0] = 0x28,
@@ -176,7 +231,12 @@ static const u16 spm_reg_offset_v1_1[SPM_REG_NR] = {
[SPM_REG_SEQ_ENTRY] = 0x80,
};
+static void smp_set_vdd_v1_1(void *data);
+
/* SPM register data for 8064 */
+static struct linear_range spm_v1_1_regulator_range =
+ REGULATOR_LINEAR_RANGE(700000, 0, 56, 12500);
+
static const struct spm_reg_data spm_reg_8064_cpu = {
.reg_offset = spm_reg_offset_v1_1,
.spm_cfg = 0x1F,
@@ -187,6 +247,10 @@ static const struct spm_reg_data spm_reg_8064_cpu = {
0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F },
.start_index[PM_SLEEP_MODE_STBY] = 0,
.start_index[PM_SLEEP_MODE_SPC] = 2,
+ .set_vdd = smp_set_vdd_v1_1,
+ .range = &spm_v1_1_regulator_range,
+ .init_uV = 1300000,
+ .ramp_delay = 1250,
};
static inline void spm_register_write(struct spm_driver_data *drv,
@@ -238,6 +302,178 @@ void spm_set_low_power_mode(struct spm_driver_data *drv,
spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val);
}
+static int spm_set_voltage_sel(struct regulator_dev *rdev, unsigned int selector)
+{
+ struct spm_driver_data *drv = rdev_get_drvdata(rdev);
+
+ drv->volt_sel = selector;
+
+ /* Always do the SAW register writes on the corresponding CPU */
+ return smp_call_function_single(drv->reg_cpu, drv->reg_data->set_vdd, drv, true);
+}
+
+static int spm_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct spm_driver_data *drv = rdev_get_drvdata(rdev);
+
+ return drv->volt_sel;
+}
+
+static const struct regulator_ops spm_reg_ops = {
+ .set_voltage_sel = spm_set_voltage_sel,
+ .get_voltage_sel = spm_get_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static void smp_set_vdd_v1_1(void *data)
+{
+ struct spm_driver_data *drv = data;
+ unsigned int vctl, data0, data1, avs_ctl, sts;
+ unsigned int vlevel, volt_sel;
+ bool avs_enabled;
+
+ volt_sel = drv->volt_sel;
+ vlevel = volt_sel | 0x80; /* band */
+
+ avs_ctl = spm_register_read(drv, SPM_REG_AVS_CTL);
+ vctl = spm_register_read(drv, SPM_REG_VCTL);
+ data0 = spm_register_read(drv, SPM_REG_PMIC_DATA_0);
+ data1 = spm_register_read(drv, SPM_REG_PMIC_DATA_1);
+
+ avs_enabled = avs_ctl & SPM_1_1_AVS_CTL_AVS_ENABLED;
+
+ /* If AVS is enabled, switch it off during the voltage change */
+ if (avs_enabled) {
+ avs_ctl &= ~SPM_1_1_AVS_CTL_AVS_ENABLED;
+ spm_register_write(drv, SPM_REG_AVS_CTL, avs_ctl);
+ }
+
+ /* Kick the state machine back to idle */
+ spm_register_write(drv, SPM_REG_RST, 1);
+
+ vctl = FIELD_SET(vctl, SPM_VCTL_VLVL, vlevel);
+ data0 = FIELD_SET(data0, SPM_PMIC_DATA_0_VLVL, vlevel);
+ data1 = FIELD_SET(data1, SPM_PMIC_DATA_1_MIN_VSEL, volt_sel);
+ data1 = FIELD_SET(data1, SPM_PMIC_DATA_1_MAX_VSEL, volt_sel);
+
+ spm_register_write(drv, SPM_REG_VCTL, vctl);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_0, data0);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_1, data1);
+
+ if (read_poll_timeout_atomic(spm_register_read,
+ sts, sts == vlevel,
+ 1, 200, false,
+ drv, SPM_REG_STS1)) {
+ dev_err_ratelimited(drv->dev, "timeout setting the voltage (%x %x)!\n", sts, vlevel);
+ goto enable_avs;
+ }
+
+ if (avs_enabled) {
+ unsigned int max_avs = volt_sel;
+ unsigned int min_avs = max(max_avs, 4U) - 4;
+
+ avs_ctl = FIELD_SET(avs_ctl, SPM_AVS_CTL_MIN_VLVL, min_avs);
+ avs_ctl = FIELD_SET(avs_ctl, SPM_AVS_CTL_MAX_VLVL, max_avs);
+ spm_register_write(drv, SPM_REG_AVS_CTL, avs_ctl);
+ }
+
+enable_avs:
+ if (avs_enabled) {
+ avs_ctl |= SPM_1_1_AVS_CTL_AVS_ENABLED;
+ spm_register_write(drv, SPM_REG_AVS_CTL, avs_ctl);
+ }
+}
+
+static int spm_get_cpu(struct device *dev)
+{
+ int cpu;
+ bool found;
+
+ for_each_possible_cpu(cpu) {
+ struct device_node *cpu_node, *saw_node;
+
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node)
+ continue;
+
+ saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+ found = (saw_node == dev->of_node);
+ of_node_put(saw_node);
+ of_node_put(cpu_node);
+
+ if (found)
+ return cpu;
+ }
+
+ /* L2 SPM is not bound to any CPU, voltage setting is not supported */
+
+ return -EOPNOTSUPP;
+}
+
+static int spm_register_regulator(struct device *dev, struct spm_driver_data *drv)
+{
+ struct regulator_config config = {
+ .dev = dev,
+ .driver_data = drv,
+ };
+ struct regulator_desc *rdesc;
+ struct regulator_dev *rdev;
+ int ret;
+ bool found;
+
+ if (!drv->reg_data->set_vdd)
+ return 0;
+
+ rdesc = devm_kzalloc(dev, sizeof(*rdesc), GFP_KERNEL);
+ if (!rdesc)
+ return -ENOMEM;
+
+ rdesc->name = "spm";
+ rdesc->of_match = of_match_ptr("regulator");
+ rdesc->type = REGULATOR_VOLTAGE;
+ rdesc->owner = THIS_MODULE;
+ rdesc->ops = &spm_reg_ops;
+
+ rdesc->linear_ranges = drv->reg_data->range;
+ rdesc->n_linear_ranges = 1;
+ rdesc->n_voltages = rdesc->linear_ranges[rdesc->n_linear_ranges - 1].max_sel + 1;
+ rdesc->ramp_delay = drv->reg_data->ramp_delay;
+
+ ret = spm_get_cpu(dev);
+ if (ret < 0)
+ return ret;
+
+ drv->reg_cpu = ret;
+ dev_dbg(dev, "SAW2 bound to CPU %d\n", drv->reg_cpu);
+
+ /*
+ * Program initial voltage, otherwise registration will also try
+ * setting the voltage, which might result in undervolting the CPU.
+ */
+ drv->volt_sel = DIV_ROUND_UP(drv->reg_data->init_uV - rdesc->min_uV,
+ rdesc->uV_step);
+ ret = linear_range_get_selector_high(drv->reg_data->range,
+ drv->reg_data->init_uV,
+ &drv->volt_sel,
+ &found);
+ if (ret) {
+ dev_err(dev, "Initial uV value out of bounds\n");
+ return ret;
+ }
+
+ /* Always do the SAW register writes on the corresponding CPU */
+ smp_call_function_single(drv->reg_cpu, drv->reg_data->set_vdd, drv, true);
+
+ rdev = devm_regulator_register(dev, rdesc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "failed to register regulator\n");
+ return PTR_ERR(rdev);
+ }
+
+ return 0;
+}
+
static const struct of_device_id spm_match_table[] = {
{ .compatible = "qcom,sdm660-gold-saw2-v4.1-l2",
.data = &spm_reg_660_gold_l2 },
@@ -288,6 +524,7 @@ static int spm_dev_probe(struct platform_device *pdev)
return -ENODEV;
drv->reg_data = match_id->data;
+ drv->dev = &pdev->dev;
platform_set_drvdata(pdev, drv);
/* Write the SPM sequences first.. */
@@ -315,6 +552,9 @@ static int spm_dev_probe(struct platform_device *pdev)
if (drv->reg_data->reg_offset[SPM_REG_SPM_CTL])
spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
+ if (IS_ENABLED(CONFIG_REGULATOR))
+ return spm_register_regulator(&pdev->dev, drv);
+
return 0;
}
@@ -332,4 +572,5 @@ static int __init qcom_spm_init(void)
}
arch_initcall(qcom_spm_init);
+MODULE_DESCRIPTION("Qualcomm Subsystem Power Manager (SPM)");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/trace-aoss.h b/drivers/soc/qcom/trace-aoss.h
new file mode 100644
index 000000000000..fb5b0470c40d
--- /dev/null
+++ b/drivers/soc/qcom/trace-aoss.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM qcom_aoss
+
+#if !defined(_TRACE_QCOM_AOSS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_QCOM_AOSS_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(aoss_send,
+ TP_PROTO(const char *msg),
+ TP_ARGS(msg),
+ TP_STRUCT__entry(
+ __string(msg, msg)
+ ),
+ TP_fast_assign(
+ __assign_str(msg);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+TRACE_EVENT(aoss_send_done,
+ TP_PROTO(const char *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __string(msg, msg)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __assign_str(msg);
+ __entry->ret = ret;
+ ),
+ TP_printk("%s: %d", __get_str(msg), __entry->ret)
+);
+
+#endif /* _TRACE_QCOM_AOSS_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace-aoss
+
+#include <trace/define_trace.h>
diff --git a/drivers/soc/qcom/trace-rpmh.h b/drivers/soc/qcom/trace-rpmh.h
index be6b42ecc1f8..593ec1d4e010 100644
--- a/drivers/soc/qcom/trace-rpmh.h
+++ b/drivers/soc/qcom/trace-rpmh.h
@@ -26,7 +26,7 @@ TRACE_EVENT(rpmh_tx_done,
),
TP_fast_assign(
- __assign_str(name, d->name);
+ __assign_str(name);
__entry->m = m;
__entry->addr = r->cmds[0].addr;
__entry->data = r->cmds[0].data;
@@ -55,7 +55,7 @@ TRACE_EVENT(rpmh_send_msg,
),
TP_fast_assign(
- __assign_str(name, d->name);
+ __assign_str(name);
__entry->m = m;
__entry->state = state;
__entry->n = n;
diff --git a/drivers/soc/qcom/trace-smp2p.h b/drivers/soc/qcom/trace-smp2p.h
new file mode 100644
index 000000000000..9a6392043f10
--- /dev/null
+++ b/drivers/soc/qcom/trace-smp2p.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM qcom_smp2p
+
+#if !defined(__QCOM_SMP2P_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __QCOM_SMP2P_TRACE_H__
+
+#include <linux/device.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(smp2p_ssr_ack,
+ TP_PROTO(const struct device *dev),
+ TP_ARGS(dev),
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name(dev))
+ ),
+ TP_fast_assign(
+ __assign_str(dev_name);
+ ),
+ TP_printk("%s: SSR detected", __get_str(dev_name))
+);
+
+TRACE_EVENT(smp2p_negotiate,
+ TP_PROTO(const struct device *dev, unsigned int features),
+ TP_ARGS(dev, features),
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name(dev))
+ __field(u32, out_features)
+ ),
+ TP_fast_assign(
+ __assign_str(dev_name);
+ __entry->out_features = features;
+ ),
+ TP_printk("%s: state=open out_features=%s", __get_str(dev_name),
+ __print_flags(__entry->out_features, "|",
+ {SMP2P_FEATURE_SSR_ACK, "SMP2P_FEATURE_SSR_ACK"})
+ )
+);
+
+TRACE_EVENT(smp2p_notify_in,
+ TP_PROTO(struct smp2p_entry *smp2p_entry, unsigned long status, u32 val),
+ TP_ARGS(smp2p_entry, status, val),
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name(smp2p_entry->smp2p->dev))
+ __string(client_name, smp2p_entry->name)
+ __field(unsigned long, status)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ __assign_str(dev_name);
+ __assign_str(client_name);
+ __entry->status = status;
+ __entry->val = val;
+ ),
+ TP_printk("%s: %s: status:0x%0lx val:0x%0x",
+ __get_str(dev_name),
+ __get_str(client_name),
+ __entry->status,
+ __entry->val
+ )
+);
+
+TRACE_EVENT(smp2p_update_bits,
+ TP_PROTO(struct smp2p_entry *smp2p_entry, u32 orig, u32 val),
+ TP_ARGS(smp2p_entry, orig, val),
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name(smp2p_entry->smp2p->dev))
+ __string(client_name, smp2p_entry->name)
+ __field(u32, orig)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ __assign_str(dev_name);
+ __assign_str(client_name);
+ __entry->orig = orig;
+ __entry->val = val;
+ ),
+ TP_printk("%s: %s: orig:0x%0x new:0x%0x",
+ __get_str(dev_name),
+ __get_str(client_name),
+ __entry->orig,
+ __entry->val
+ )
+);
+
+#endif /* __QCOM_SMP2P_TRACE_H__ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace-smp2p
+
+#include <trace/define_trace.h>
diff --git a/drivers/soc/qcom/trace_icc-bwmon.h b/drivers/soc/qcom/trace_icc-bwmon.h
new file mode 100644
index 000000000000..beb8e6b485a9
--- /dev/null
+++ b/drivers/soc/qcom/trace_icc-bwmon.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM icc_bwmon
+
+#if !defined(_TRACE_ICC_BWMON_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ICC_BWMON_H
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(qcom_bwmon_update,
+ TP_PROTO(const char *name,
+ unsigned int meas_kbps, unsigned int up_kbps, unsigned int down_kbps),
+
+ TP_ARGS(name, meas_kbps, up_kbps, down_kbps),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(unsigned int, meas_kbps)
+ __field(unsigned int, up_kbps)
+ __field(unsigned int, down_kbps)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->meas_kbps = meas_kbps;
+ __entry->up_kbps = up_kbps;
+ __entry->down_kbps = down_kbps;
+ ),
+
+ TP_printk("name=%s meas_kbps=%u up_kbps=%u down_kbps=%u",
+ __get_str(name),
+ __entry->meas_kbps,
+ __entry->up_kbps,
+ __entry->down_kbps)
+);
+
+#endif /* _TRACE_ICC_BWMON_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/soc/qcom/
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace_icc-bwmon
+
+#include <trace/define_trace.h>
diff --git a/drivers/soc/qcom/ubwc_config.c b/drivers/soc/qcom/ubwc_config.c
new file mode 100644
index 000000000000..1c25aaf55e52
--- /dev/null
+++ b/drivers/soc/qcom/ubwc_config.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include <linux/soc/qcom/ubwc.h>
+
+static const struct qcom_ubwc_cfg_data no_ubwc_data = {
+ /* no UBWC, no HBB */
+};
+
+static const struct qcom_ubwc_cfg_data kaanapali_data = {
+ .ubwc_enc_version = UBWC_6_0,
+ .ubwc_dec_version = UBWC_6_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .ubwc_bank_spread = true,
+ .highest_bank_bit = 16,
+ .macrotile_mode = true,
+};
+
+static const struct qcom_ubwc_cfg_data msm8937_data = {
+ .ubwc_enc_version = UBWC_1_0,
+ .ubwc_dec_version = UBWC_1_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL1 |
+ UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .highest_bank_bit = 14,
+};
+
+static const struct qcom_ubwc_cfg_data msm8998_data = {
+ .ubwc_enc_version = UBWC_1_0,
+ .ubwc_dec_version = UBWC_1_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL1 |
+ UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .highest_bank_bit = 15,
+};
+
+static const struct qcom_ubwc_cfg_data qcm2290_data = {
+ /* no UBWC */
+ .highest_bank_bit = 15,
+};
+
+static const struct qcom_ubwc_cfg_data sa8775p_data = {
+ .ubwc_enc_version = UBWC_4_0,
+ .ubwc_dec_version = UBWC_4_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL3,
+ .ubwc_bank_spread = true,
+ .highest_bank_bit = 13,
+ .macrotile_mode = true,
+};
+
+static const struct qcom_ubwc_cfg_data sar2130p_data = {
+ .ubwc_enc_version = UBWC_3_0, /* 4.0.2 in hw */
+ .ubwc_dec_version = UBWC_4_3,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .ubwc_bank_spread = true,
+ .highest_bank_bit = 13,
+ .macrotile_mode = true,
+};
+
+static const struct qcom_ubwc_cfg_data sc7180_data = {
+ .ubwc_enc_version = UBWC_2_0,
+ .ubwc_dec_version = UBWC_2_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .ubwc_bank_spread = true,
+ .highest_bank_bit = 14,
+};
+
+static const struct qcom_ubwc_cfg_data sc7280_data = {
+ .ubwc_enc_version = UBWC_3_0,
+ .ubwc_dec_version = UBWC_4_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .ubwc_bank_spread = true,
+ .highest_bank_bit = 14,
+ .macrotile_mode = true,
+};
+
+static const struct qcom_ubwc_cfg_data sc8180x_data = {
+ .ubwc_enc_version = UBWC_3_0,
+ .ubwc_dec_version = UBWC_3_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .highest_bank_bit = 16,
+ .macrotile_mode = true,
+};
+
+static const struct qcom_ubwc_cfg_data sc8280xp_data = {
+ .ubwc_enc_version = UBWC_4_0,
+ .ubwc_dec_version = UBWC_4_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .ubwc_bank_spread = true,
+ .highest_bank_bit = 16,
+ .macrotile_mode = true,
+};
+
+static const struct qcom_ubwc_cfg_data sdm670_data = {
+ .ubwc_enc_version = UBWC_2_0,
+ .ubwc_dec_version = UBWC_2_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .highest_bank_bit = 14,
+};
+
+static const struct qcom_ubwc_cfg_data sdm845_data = {
+ .ubwc_enc_version = UBWC_2_0,
+ .ubwc_dec_version = UBWC_2_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .highest_bank_bit = 15,
+};
+
+static const struct qcom_ubwc_cfg_data sm6115_data = {
+ .ubwc_enc_version = UBWC_1_0,
+ .ubwc_dec_version = UBWC_2_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL1 |
+ UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .ubwc_bank_spread = true,
+ .highest_bank_bit = 14,
+};
+
+static const struct qcom_ubwc_cfg_data sm6125_data = {
+ .ubwc_enc_version = UBWC_1_0,
+ .ubwc_dec_version = UBWC_3_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL1 |
+ UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .highest_bank_bit = 14,
+};
+
+static const struct qcom_ubwc_cfg_data sm6150_data = {
+ .ubwc_enc_version = UBWC_2_0,
+ .ubwc_dec_version = UBWC_2_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .highest_bank_bit = 14,
+};
+
+static const struct qcom_ubwc_cfg_data sm6350_data = {
+ .ubwc_enc_version = UBWC_2_0,
+ .ubwc_dec_version = UBWC_2_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .ubwc_bank_spread = true,
+ .highest_bank_bit = 14,
+};
+
+static const struct qcom_ubwc_cfg_data sm7150_data = {
+ .ubwc_enc_version = UBWC_2_0,
+ .ubwc_dec_version = UBWC_2_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .highest_bank_bit = 14,
+};
+
+static const struct qcom_ubwc_cfg_data sm8150_data = {
+ .ubwc_enc_version = UBWC_3_0,
+ .ubwc_dec_version = UBWC_3_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .highest_bank_bit = 15,
+};
+
+static const struct qcom_ubwc_cfg_data sm8250_data = {
+ .ubwc_enc_version = UBWC_4_0,
+ .ubwc_dec_version = UBWC_4_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .ubwc_bank_spread = true,
+ /* TODO: highest_bank_bit = 15 for LP_DDR4 */
+ .highest_bank_bit = 16,
+ .macrotile_mode = true,
+};
+
+static const struct qcom_ubwc_cfg_data sm8350_data = {
+ .ubwc_enc_version = UBWC_4_0,
+ .ubwc_dec_version = UBWC_4_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .ubwc_bank_spread = true,
+ /* TODO: highest_bank_bit = 15 for LP_DDR4 */
+ .highest_bank_bit = 16,
+ .macrotile_mode = true,
+};
+
+static const struct qcom_ubwc_cfg_data sm8550_data = {
+ .ubwc_enc_version = UBWC_4_0,
+ .ubwc_dec_version = UBWC_4_3,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .ubwc_bank_spread = true,
+ /* TODO: highest_bank_bit = 15 for LP_DDR4 */
+ .highest_bank_bit = 16,
+ .macrotile_mode = true,
+};
+
+static const struct qcom_ubwc_cfg_data sm8750_data = {
+ .ubwc_enc_version = UBWC_5_0,
+ .ubwc_dec_version = UBWC_5_0,
+ .ubwc_swizzle = 6,
+ .ubwc_bank_spread = true,
+ /* TODO: highest_bank_bit = 15 for LP_DDR4 */
+ .highest_bank_bit = 16,
+ .macrotile_mode = true,
+};
+
+static const struct qcom_ubwc_cfg_data x1e80100_data = {
+ .ubwc_enc_version = UBWC_4_0,
+ .ubwc_dec_version = UBWC_4_3,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .ubwc_bank_spread = true,
+ /* TODO: highest_bank_bit = 15 for LP_DDR4 */
+ .highest_bank_bit = 16,
+ .macrotile_mode = true,
+};
+
+static const struct qcom_ubwc_cfg_data glymur_data = {
+ .ubwc_enc_version = UBWC_5_0,
+ .ubwc_dec_version = UBWC_5_0,
+ .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 |
+ UBWC_SWIZZLE_ENABLE_LVL3,
+ .ubwc_bank_spread = true,
+ /* TODO: highest_bank_bit = 15 for LP_DDR4 */
+ .highest_bank_bit = 16,
+ .macrotile_mode = true,
+};
+
+static const struct of_device_id qcom_ubwc_configs[] __maybe_unused = {
+ { .compatible = "qcom,apq8016", .data = &no_ubwc_data },
+ { .compatible = "qcom,apq8026", .data = &no_ubwc_data },
+ { .compatible = "qcom,apq8074", .data = &no_ubwc_data },
+ { .compatible = "qcom,apq8096", .data = &msm8998_data },
+ { .compatible = "qcom,kaanapali", .data = &kaanapali_data, },
+ { .compatible = "qcom,glymur", .data = &glymur_data},
+ { .compatible = "qcom,msm8226", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8916", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8917", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8937", .data = &msm8937_data },
+ { .compatible = "qcom,msm8929", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8939", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8953", .data = &msm8937_data },
+ { .compatible = "qcom,msm8956", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8974", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8976", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8996", .data = &msm8998_data },
+ { .compatible = "qcom,msm8998", .data = &msm8998_data },
+ { .compatible = "qcom,qcm2290", .data = &qcm2290_data, },
+ { .compatible = "qcom,qcm6490", .data = &sc7280_data, },
+ { .compatible = "qcom,qcs8300", .data = &sc8280xp_data, },
+ { .compatible = "qcom,sa8155p", .data = &sm8150_data, },
+ { .compatible = "qcom,sa8540p", .data = &sc8280xp_data, },
+ { .compatible = "qcom,sa8775p", .data = &sa8775p_data, },
+ { .compatible = "qcom,sar2130p", .data = &sar2130p_data },
+ { .compatible = "qcom,sc7180", .data = &sc7180_data },
+ { .compatible = "qcom,sc7280", .data = &sc7280_data, },
+ { .compatible = "qcom,sc8180x", .data = &sc8180x_data, },
+ { .compatible = "qcom,sc8280xp", .data = &sc8280xp_data, },
+ { .compatible = "qcom,sda660", .data = &msm8937_data },
+ { .compatible = "qcom,sdm450", .data = &msm8937_data },
+ { .compatible = "qcom,sdm630", .data = &msm8937_data },
+ { .compatible = "qcom,sdm632", .data = &msm8937_data },
+ { .compatible = "qcom,sdm636", .data = &msm8937_data },
+ { .compatible = "qcom,sdm660", .data = &msm8937_data },
+ { .compatible = "qcom,sdm670", .data = &sdm670_data, },
+ { .compatible = "qcom,sdm845", .data = &sdm845_data, },
+ { .compatible = "qcom,sm4250", .data = &sm6115_data, },
+ { .compatible = "qcom,sm6115", .data = &sm6115_data, },
+ { .compatible = "qcom,sm6125", .data = &sm6125_data, },
+ { .compatible = "qcom,sm6150", .data = &sm6150_data, },
+ { .compatible = "qcom,sm6350", .data = &sm6350_data, },
+ { .compatible = "qcom,sm6375", .data = &sm6350_data, },
+ { .compatible = "qcom,sm7125", .data = &sc7180_data },
+ { .compatible = "qcom,sm7150", .data = &sm7150_data, },
+ { .compatible = "qcom,sm7225", .data = &sm6350_data, },
+ { .compatible = "qcom,sm7325", .data = &sc7280_data, },
+ { .compatible = "qcom,sm8150", .data = &sm8150_data, },
+ { .compatible = "qcom,sm8250", .data = &sm8250_data, },
+ { .compatible = "qcom,sm8350", .data = &sm8350_data, },
+ { .compatible = "qcom,sm8450", .data = &sm8350_data, },
+ { .compatible = "qcom,sm8550", .data = &sm8550_data, },
+ { .compatible = "qcom,sm8650", .data = &sm8550_data, },
+ { .compatible = "qcom,sm8750", .data = &sm8750_data, },
+ { .compatible = "qcom,x1e80100", .data = &x1e80100_data, },
+ { .compatible = "qcom,x1p42100", .data = &x1e80100_data, },
+ { }
+};
+
+const struct qcom_ubwc_cfg_data *qcom_ubwc_config_get_data(void)
+{
+ const struct qcom_ubwc_cfg_data *data;
+
+ data = of_machine_get_match_data(qcom_ubwc_configs);
+ if (!data) {
+ pr_err("Couldn't find UBWC config data for this platform!\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(qcom_ubwc_config_get_data);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("UBWC config database for QTI SoCs");
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
index ad9942412c58..62b424e90d90 100644
--- a/drivers/soc/qcom/wcnss_ctrl.c
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -3,6 +3,7 @@
* Copyright (c) 2016, Linaro Ltd.
* Copyright (c) 2015, Sony Mobile Communications Inc.
*/
+#include <linux/cleanup.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -198,7 +199,6 @@ static int wcnss_request_version(struct wcnss_ctrl *wcnss)
*/
static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
{
- struct wcnss_download_nv_req *req;
const struct firmware *fw;
struct device *dev = wcnss->dev;
const char *nvbin = NVBIN_FILE;
@@ -206,18 +206,19 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
ssize_t left;
int ret;
- req = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE, GFP_KERNEL);
+ struct wcnss_download_nv_req *req __free(kfree) = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE,
+ GFP_KERNEL);
if (!req)
return -ENOMEM;
ret = of_property_read_string(dev->of_node, "firmware-name", &nvbin);
if (ret < 0 && ret != -EINVAL)
- goto free_req;
+ return ret;
ret = request_firmware(&fw, nvbin, dev);
if (ret < 0) {
dev_err(dev, "Failed to load nv file %s: %d\n", nvbin, ret);
- goto free_req;
+ return ret;
}
data = fw->data;
@@ -263,8 +264,6 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
release_fw:
release_firmware(fw);
-free_req:
- kfree(req);
return ret;
}
@@ -287,7 +286,7 @@ struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rp
return rpmsg_create_ept(_wcnss->channel->rpdev, cb, priv, chinfo);
}
-EXPORT_SYMBOL(qcom_wcnss_open_channel);
+EXPORT_SYMBOL_GPL(qcom_wcnss_open_channel);
static void wcnss_async_probe(struct work_struct *work)
{
@@ -355,7 +354,6 @@ static struct rpmsg_driver wcnss_ctrl_driver = {
.callback = wcnss_ctrl_smd_callback,
.drv = {
.name = "qcom_wcnss_ctrl",
- .owner = THIS_MODULE,
.of_match_table = wcnss_ctrl_of_match,
},
};
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index de31589ed054..340a1ff7e92b 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -24,6 +24,7 @@ config ARCH_RCAR_GEN2
select RENESAS_IRQC
select RST_RCAR
select SYS_SUPPORTS_SH_CMT
+ select SYS_SUPPORTS_SH_TMU
config ARCH_RCAR_GEN3
bool
@@ -34,6 +35,14 @@ config ARCH_RCAR_GEN3
select SYS_SUPPORTS_SH_CMT
select SYS_SUPPORTS_SH_TMU
+config ARCH_RCAR_GEN4
+ bool
+ select ARCH_RCAR_GEN3
+
+config ARCH_RCAR_GEN5
+ bool
+ select ARCH_RCAR_GEN4
+
config ARCH_RMOBILE
bool
select PM
@@ -60,125 +69,144 @@ if ARM && ARCH_RENESAS
config ARCH_EMEV2
bool "ARM32 Platform support for Emma Mobile EV2"
+ default ARCH_RENESAS
select HAVE_ARM_SCU if SMP
select SYS_SUPPORTS_EM_STI
-config ARCH_R8A7794
- bool "ARM32 Platform support for R-Car E2"
- select ARCH_RCAR_GEN2
- select ARM_ERRATA_814220
- select SYSC_R8A7794
-
-config ARCH_R8A7779
- bool "ARM32 Platform support for R-Car H1"
- select ARCH_RCAR_GEN1
+config ARCH_R7S72100
+ bool "ARM32 Platform support for R7S72100 (RZ/A1H)"
+ default ARCH_RENESAS
select ARM_ERRATA_754322
- select ARM_GLOBAL_TIMER
- select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if SMP
- select SYSC_R8A7779
+ select PM
+ select PM_GENERIC_DOMAINS
+ select RENESAS_OSTM
+ select RENESAS_RZA1_IRQC
+ select SYS_SUPPORTS_SH_MTU2
-config ARCH_R8A7790
- bool "ARM32 Platform support for R-Car H2"
- select ARCH_RCAR_GEN2
+config ARCH_R7S9210
+ bool "ARM32 Platform support for R7S9210 (RZ/A2)"
+ default ARCH_RENESAS
+ select PM
+ select PM_GENERIC_DOMAINS
+ select RENESAS_OSTM
+ select RENESAS_RZA1_IRQC
+
+config ARCH_R8A73A4
+ bool "ARM32 Platform support for R8A73A4 (R-Mobile APE6)"
+ default ARCH_RENESAS
+ select ARCH_RMOBILE
select ARM_ERRATA_798181 if SMP
select ARM_ERRATA_814220
- select I2C
- select SYSC_R8A7790
+ select HAVE_ARM_ARCH_TIMER
+ select RENESAS_IRQC
-config ARCH_R8A7778
- bool "ARM32 Platform support for R-Car M1A"
- select ARCH_RCAR_GEN1
+config ARCH_R8A7740
+ bool "ARM32 Platform support for R8A7740 (R-Mobile A1)"
+ default ARCH_RENESAS
+ select ARCH_RMOBILE
select ARM_ERRATA_754322
+ select RENESAS_INTC_IRQPIN
-config ARCH_R8A7793
- bool "ARM32 Platform support for R-Car M2-N"
+config ARCH_R8A7742
+ bool "ARM32 Platform support for R8A7742 (RZ/G1H)"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
- select I2C
- select SYSC_R8A7791
+ select ARM_ERRATA_814220
+ select SYSC_R8A7742
-config ARCH_R8A7791
- bool "ARM32 Platform support for R-Car M2-W"
+config ARCH_R8A7743
+ bool "ARM32 Platform support for R8A7743 (RZ/G1M)"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
- select I2C
- select SYSC_R8A7791
+ select SYSC_R8A7743
-config ARCH_R8A7792
- bool "ARM32 Platform support for R-Car V2H"
+config ARCH_R8A7744
+ bool "ARM32 Platform support for R8A7744 (RZ/G1N)"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
- select SYSC_R8A7792
-
-config ARCH_R8A7740
- bool "ARM32 Platform support for R-Mobile A1"
- select ARCH_RMOBILE
- select ARM_ERRATA_754322
- select RENESAS_INTC_IRQPIN
+ select SYSC_R8A7743
-config ARCH_R8A73A4
- bool "ARM32 Platform support for R-Mobile APE6"
- select ARCH_RMOBILE
- select ARM_ERRATA_798181 if SMP
+config ARCH_R8A7745
+ bool "ARM32 Platform support for R8A7745 (RZ/G1E)"
+ default ARCH_RENESAS
+ select ARCH_RCAR_GEN2
select ARM_ERRATA_814220
- select HAVE_ARM_ARCH_TIMER
- select RENESAS_IRQC
-
-config ARCH_R7S72100
- bool "ARM32 Platform support for RZ/A1H"
- select ARM_ERRATA_754322
- select PM
- select PM_GENERIC_DOMAINS
- select RENESAS_OSTM
- select RENESAS_RZA1_IRQC
- select SYS_SUPPORTS_SH_MTU2
-
-config ARCH_R7S9210
- bool "ARM32 Platform support for RZ/A2"
- select PM
- select PM_GENERIC_DOMAINS
- select RENESAS_OSTM
- select RENESAS_RZA1_IRQC
+ select SYSC_R8A7745
config ARCH_R8A77470
- bool "ARM32 Platform support for RZ/G1C"
+ bool "ARM32 Platform support for R8A77470 (RZ/G1C)"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN2
select ARM_ERRATA_814220
select SYSC_R8A77470
-config ARCH_R8A7745
- bool "ARM32 Platform support for RZ/G1E"
+config ARCH_R8A7778
+ bool "ARM32 Platform support for R8A7778 (R-Car M1A)"
+ default ARCH_RENESAS
+ select ARCH_RCAR_GEN1
+ select ARM_ERRATA_754322
+
+config ARCH_R8A7779
+ bool "ARM32 Platform support for R8A7779 (R-Car H1)"
+ default ARCH_RENESAS
+ select ARCH_RCAR_GEN1
+ select ARM_ERRATA_754322
+ select ARM_GLOBAL_TIMER
+ select HAVE_ARM_SCU if SMP
+ select HAVE_ARM_TWD if SMP
+ select SYSC_R8A7779
+
+config ARCH_R8A7790
+ bool "ARM32 Platform support for R8A7790 (R-Car H2)"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN2
+ select ARM_ERRATA_798181 if SMP
select ARM_ERRATA_814220
- select SYSC_R8A7745
+ select I2C
+ select SYSC_R8A7790
-config ARCH_R8A7742
- bool "ARM32 Platform support for RZ/G1H"
+config ARCH_R8A7791
+ bool "ARM32 Platform support for R8A7791 (R-Car M2-W)"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
- select ARM_ERRATA_814220
- select SYSC_R8A7742
+ select I2C
+ select SYSC_R8A7791
-config ARCH_R8A7743
- bool "ARM32 Platform support for RZ/G1M"
+config ARCH_R8A7792
+ bool "ARM32 Platform support for R8A7792 (R-Car V2H)"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
- select SYSC_R8A7743
+ select SYSC_R8A7792
-config ARCH_R8A7744
- bool "ARM32 Platform support for RZ/G1N"
+config ARCH_R8A7793
+ bool "ARM32 Platform support for R8A7793 (R-Car M2-N)"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
- select SYSC_R8A7743
+ select I2C
+ select SYSC_R8A7791
+
+config ARCH_R8A7794
+ bool "ARM32 Platform support for R8A7794 (R-Car E2)"
+ default ARCH_RENESAS
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_814220
+ select SYSC_R8A7794
config ARCH_R9A06G032
- bool "ARM32 Platform support for RZ/N1D"
+ bool "ARM32 Platform support for R9A06G032 (RZ/N1D)"
+ default ARCH_RENESAS
select ARCH_RZN1
select ARM_ERRATA_814220
config ARCH_SH73A0
- bool "ARM32 Platform support for SH-Mobile AG5"
+ bool "ARM32 Platform support for SH73A0 (SH-Mobile AG5)"
+ default ARCH_RENESAS
select ARCH_RMOBILE
select ARM_ERRATA_754322
select ARM_GLOBAL_TIMER
@@ -190,24 +218,41 @@ endif # ARM
if ARM64
-config ARCH_R8A77995
- bool "ARM64 Platform support for R-Car D3"
+config ARCH_R8A774A1
+ bool "ARM64 Platform support for R8A774A1 (RZ/G2M)"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
- select SYSC_R8A77995
+ select SYSC_R8A774A1
help
- This enables support for the Renesas R-Car D3 SoC.
- This includes different gradings like R-Car D3e.
+ This enables support for the Renesas RZ/G2M SoC.
-config ARCH_R8A77990
- bool "ARM64 Platform support for R-Car E3"
+config ARCH_R8A774B1
+ bool "ARM64 Platform support for R8A774B1 (RZ/G2N)"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
- select SYSC_R8A77990
+ select SYSC_R8A774B1
help
- This enables support for the Renesas R-Car E3 SoC.
- This includes different gradings like R-Car E3e.
+ This enables support for the Renesas RZ/G2N SoC.
+
+config ARCH_R8A774C0
+ bool "ARM64 Platform support for R8A774C0 (RZ/G2E)"
+ default y if ARCH_RENESAS
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A774C0
+ help
+ This enables support for the Renesas RZ/G2E SoC.
+
+config ARCH_R8A774E1
+ bool "ARM64 Platform support for R8A774E1 (RZ/G2H)"
+ default y if ARCH_RENESAS
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A774E1
+ help
+ This enables support for the Renesas RZ/G2H SoC.
config ARCH_R8A77951
- bool "ARM64 Platform support for R-Car H3 ES2.0+"
+ bool "ARM64 Platform support for R8A77951 (R-Car H3 ES2.0+)"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
select SYSC_R8A7795
help
@@ -215,125 +260,194 @@ config ARCH_R8A77951
later).
This includes different gradings like R-Car H3e, H3e-2G, and H3Ne.
-config ARCH_R8A77965
- bool "ARM64 Platform support for R-Car M3-N"
- select ARCH_RCAR_GEN3
- select SYSC_R8A77965
- help
- This enables support for the Renesas R-Car M3-N SoC.
- This includes different gradings like R-Car M3Ne and M3Ne-2G.
-
config ARCH_R8A77960
- bool "ARM64 Platform support for R-Car M3-W"
+ bool "ARM64 Platform support for R8A77960 (R-Car M3-W)"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
select SYSC_R8A77960
help
This enables support for the Renesas R-Car M3-W SoC.
config ARCH_R8A77961
- bool "ARM64 Platform support for R-Car M3-W+"
+ bool "ARM64 Platform support for R8A77961 (R-Car M3-W+)"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
select SYSC_R8A77961
help
This enables support for the Renesas R-Car M3-W+ SoC.
This includes different gradings like R-Car M3e and M3e-2G.
-config ARCH_R8A779F0
- bool "ARM64 Platform support for R-Car S4-8"
+config ARCH_R8A77965
+ bool "ARM64 Platform support for R8A77965 (R-Car M3-N)"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
- select SYSC_R8A779F0
+ select SYSC_R8A77965
help
- This enables support for the Renesas R-Car S4-8 SoC.
+ This enables support for the Renesas R-Car M3-N SoC.
+ This includes different gradings like R-Car M3Ne and M3Ne-2G.
+
+config ARCH_R8A77970
+ bool "ARM64 Platform support for R8A77970 (R-Car V3M)"
+ default y if ARCH_RENESAS
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A77970
+ help
+ This enables support for the Renesas R-Car V3M SoC.
config ARCH_R8A77980
- bool "ARM64 Platform support for R-Car V3H"
+ bool "ARM64 Platform support for R8A77980 (R-Car V3H)"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
select SYSC_R8A77980
help
This enables support for the Renesas R-Car V3H SoC.
-config ARCH_R8A77970
- bool "ARM64 Platform support for R-Car V3M"
+config ARCH_R8A77990
+ bool "ARM64 Platform support for R8A77990 (R-Car E3)"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
- select SYSC_R8A77970
+ select SYSC_R8A77990
help
- This enables support for the Renesas R-Car V3M SoC.
+ This enables support for the Renesas R-Car E3 SoC.
+ This includes different gradings like R-Car E3e.
-config ARCH_R8A779A0
- bool "ARM64 Platform support for R-Car V3U"
+config ARCH_R8A77995
+ bool "ARM64 Platform support for R8A77995 (R-Car D3)"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
+ select SYSC_R8A77995
+ help
+ This enables support for the Renesas R-Car D3 SoC.
+ This includes different gradings like R-Car D3e.
+
+config ARCH_R8A779A0
+ bool "ARM64 Platform support for R8A779A0 (R-Car V3U)"
+ default y if ARCH_RENESAS
+ select ARCH_RCAR_GEN4
select SYSC_R8A779A0
help
This enables support for the Renesas R-Car V3U SoC.
+config ARCH_R8A779F0
+ bool "ARM64 Platform support for R8A779F0 (R-Car S4-8)"
+ default y if ARCH_RENESAS
+ select ARCH_RCAR_GEN4
+ select SYSC_R8A779F0
+ help
+ This enables support for the Renesas R-Car S4-8 SoC.
+
config ARCH_R8A779G0
- bool "ARM64 Platform support for R-Car V4H"
- select ARCH_RCAR_GEN3
+ bool "ARM64 Platform support for R8A779G0 (R-Car V4H)"
+ default y if ARCH_RENESAS
+ select ARCH_RCAR_GEN4
select SYSC_R8A779G0
help
This enables support for the Renesas R-Car V4H SoC.
-config ARCH_R8A774C0
- bool "ARM64 Platform support for RZ/G2E"
- select ARCH_RCAR_GEN3
- select SYSC_R8A774C0
- help
- This enables support for the Renesas RZ/G2E SoC.
-
-config ARCH_R8A774E1
- bool "ARM64 Platform support for RZ/G2H"
- select ARCH_RCAR_GEN3
- select SYSC_R8A774E1
- help
- This enables support for the Renesas RZ/G2H SoC.
-
-config ARCH_R8A774A1
- bool "ARM64 Platform support for RZ/G2M"
- select ARCH_RCAR_GEN3
- select SYSC_R8A774A1
+config ARCH_R8A779H0
+ bool "ARM64 Platform support for R8A779H0 (R-Car V4M)"
+ default y if ARCH_RENESAS
+ select ARCH_RCAR_GEN4
+ select SYSC_R8A779H0
help
- This enables support for the Renesas RZ/G2M SoC.
+ This enables support for the Renesas R-Car V4M SoC.
-config ARCH_R8A774B1
- bool "ARM64 Platform support for RZ/G2N"
- select ARCH_RCAR_GEN3
- select SYSC_R8A774B1
+config ARCH_R8A78000
+ bool "ARM64 Platform support for R8A78000 (R-Car X5H)"
+ default y if ARCH_RENESAS
+ default ARCH_RENESAS
+ select ARCH_RCAR_GEN5
help
- This enables support for the Renesas RZ/G2N SoC.
+ This enables support for the Renesas R-Car X5H SoC.
config ARCH_R9A07G043
- bool "ARM64 Platform support for RZ/G2UL"
+ bool "ARM64 Platform support for R9A07G043U (RZ/G2UL)"
+ default y if ARCH_RENESAS
select ARCH_RZG2L
help
This enables support for the Renesas RZ/G2UL SoC variants.
config ARCH_R9A07G044
- bool "ARM64 Platform support for RZ/G2L"
+ bool "ARM64 Platform support for R9A07G044 (RZ/G2L)"
+ default y if ARCH_RENESAS
select ARCH_RZG2L
help
This enables support for the Renesas RZ/G2L SoC variants.
config ARCH_R9A07G054
- bool "ARM64 Platform support for RZ/V2L"
+ bool "ARM64 Platform support for R9A07G054 (RZ/V2L)"
+ default y if ARCH_RENESAS
select ARCH_RZG2L
help
This enables support for the Renesas RZ/V2L SoC variants.
+config ARCH_R9A08G045
+ bool "ARM64 Platform support for R9A08G045 (RZ/G3S)"
+ default y if ARCH_RENESAS
+ select ARCH_RZG2L
+ select SYSC_R9A08G045
+ help
+ This enables support for the Renesas RZ/G3S SoC variants.
+
config ARCH_R9A09G011
- bool "ARM64 Platform support for RZ/V2M"
+ bool "ARM64 Platform support for R9A09G011 (RZ/V2M)"
+ default y if ARCH_RENESAS
select PM
select PM_GENERIC_DOMAINS
select PWC_RZV2M
help
This enables support for the Renesas RZ/V2M SoC.
+config ARCH_R9A09G047
+ bool "ARM64 Platform support for R9A09G047 (RZ/G3E)"
+ default y if ARCH_RENESAS
+ select SYS_R9A09G047
+ help
+ This enables support for the Renesas RZ/G3E SoC variants.
+
+config ARCH_R9A09G056
+ bool "ARM64 Platform support for R9A09G056 (RZ/V2N)"
+ default y if ARCH_RENESAS
+ select SYS_R9A09G056
+ help
+ This enables support for the Renesas RZ/V2N SoC variants.
+
+config ARCH_R9A09G057
+ bool "ARM64 Platform support for R9A09G057 (RZ/V2H(P))"
+ default y if ARCH_RENESAS
+ select RENESAS_RZV2H_ICU
+ select SYS_R9A09G057
+ help
+ This enables support for the Renesas RZ/V2H(P) SoC variants.
+
+config ARCH_R9A09G077
+ bool "ARM64 Platform support for R9A09G077 (RZ/T2H)"
+ default y if ARCH_RENESAS
+ help
+ This enables support for the Renesas RZ/T2H SoC variants.
+
+config ARCH_R9A09G087
+ bool "ARM64 Platform support for R9A09G087 (RZ/N2H)"
+ default y if ARCH_RENESAS
+ help
+ This enables support for the Renesas RZ/N2H SoC variants.
+
endif # ARM64
if RISCV
config ARCH_R9A07G043
- bool "RISC-V Platform support for RZ/Five"
+ bool "RISC-V Platform support for R9A07G043F (RZ/Five)"
+ depends on NONPORTABLE
+ depends on !DMA_DIRECT_REMAP
+ depends on RISCV_ALTERNATIVE
+ depends on !RISCV_ISA_ZICBOM
+ depends on RISCV_SBI
select ARCH_RZG2L
+ select AX45MP_L2_CACHE
+ select DMA_GLOBAL_POOL
+ select ERRATA_ANDES
+ select ERRATA_ANDES_CMO
help
This enables support for the Renesas RZ/Five SoC.
@@ -345,109 +459,24 @@ config PWC_RZV2M
config RST_RCAR
bool "Reset Controller support for R-Car" if COMPILE_TEST
-config SYSC_RCAR
- bool "System Controller support for R-Car" if COMPILE_TEST
-
-config SYSC_RCAR_GEN4
- bool "System Controller support for R-Car Gen4" if COMPILE_TEST
-
-config SYSC_R8A77995
- bool "System Controller support for R-Car D3" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A7794
- bool "System Controller support for R-Car E2" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A77990
- bool "System Controller support for R-Car E3" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A7779
- bool "System Controller support for R-Car H1" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A7790
- bool "System Controller support for R-Car H2" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A7795
- bool "System Controller support for R-Car H3" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A7791
- bool "System Controller support for R-Car M2-W/N" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A77965
- bool "System Controller support for R-Car M3-N" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A77960
- bool "System Controller support for R-Car M3-W" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A77961
- bool "System Controller support for R-Car M3-W+" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A779F0
- bool "System Controller support for R-Car S4-8" if COMPILE_TEST
- select SYSC_RCAR_GEN4
-
-config SYSC_R8A7792
- bool "System Controller support for R-Car V2H" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A77980
- bool "System Controller support for R-Car V3H" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A77970
- bool "System Controller support for R-Car V3M" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A779A0
- bool "System Controller support for R-Car V3U" if COMPILE_TEST
- select SYSC_RCAR_GEN4
-
-config SYSC_R8A779G0
- bool "System Controller support for R-Car V4H" if COMPILE_TEST
- select SYSC_RCAR_GEN4
-
-config SYSC_RMOBILE
- bool "System Controller support for R-Mobile" if COMPILE_TEST
-
-config SYSC_R8A77470
- bool "System Controller support for RZ/G1C" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A7745
- bool "System Controller support for RZ/G1E" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A7742
- bool "System Controller support for RZ/G1H" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A7743
- bool "System Controller support for RZ/G1M" if COMPILE_TEST
- select SYSC_RCAR
+config SYSC_RZ
+ bool "System controller for RZ SoCs" if COMPILE_TEST
+ select MFD_SYSCON
-config SYSC_R8A774C0
- bool "System Controller support for RZ/G2E" if COMPILE_TEST
- select SYSC_RCAR
+config SYSC_R9A08G045
+ bool "Renesas System controller support for R9A08G045 (RZ/G3S)" if COMPILE_TEST
+ select SYSC_RZ
-config SYSC_R8A774E1
- bool "System Controller support for RZ/G2H" if COMPILE_TEST
- select SYSC_RCAR
+config SYS_R9A09G047
+ bool "Renesas System controller support for R9A09G047 (RZ/G3E)" if COMPILE_TEST
+ select SYSC_RZ
-config SYSC_R8A774A1
- bool "System Controller support for RZ/G2M" if COMPILE_TEST
- select SYSC_RCAR
+config SYS_R9A09G056
+ bool "Renesas System controller support for R9A09G056 (RZ/V2N)" if COMPILE_TEST
+ select SYSC_RZ
-config SYSC_R8A774B1
- bool "System Controller support for RZ/G2N" if COMPILE_TEST
- select SYSC_RCAR
+config SYS_R9A09G057
+ bool "Renesas System controller support for R9A09G057 (RZ/V2H)" if COMPILE_TEST
+ select SYSC_RZ
endif # SOC_RENESAS
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 734f8f8cefa4..3bdcc6a395d5 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -6,7 +6,12 @@ obj-$(CONFIG_SOC_RENESAS) += renesas-soc.o
ifdef CONFIG_SMP
obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o
endif
+obj-$(CONFIG_SYSC_R9A08G045) += r9a08g045-sysc.o
+obj-$(CONFIG_SYS_R9A09G047) += r9a09g047-sys.o
+obj-$(CONFIG_SYS_R9A09G056) += r9a09g056-sys.o
+obj-$(CONFIG_SYS_R9A09G057) += r9a09g057-sys.o
# Family
obj-$(CONFIG_PWC_RZV2M) += pwc-rzv2m.o
obj-$(CONFIG_RST_RCAR) += rcar-rst.o
+obj-$(CONFIG_SYSC_RZ) += rz-sysc.o
diff --git a/drivers/soc/renesas/pwc-rzv2m.c b/drivers/soc/renesas/pwc-rzv2m.c
index 452cee8d68be..6209168b3734 100644
--- a/drivers/soc/renesas/pwc-rzv2m.c
+++ b/drivers/soc/renesas/pwc-rzv2m.c
@@ -24,8 +24,8 @@ struct rzv2m_pwc_priv {
DECLARE_BITMAP(ch_en_bits, 2);
};
-static void rzv2m_pwc_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int rzv2m_pwc_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct rzv2m_pwc_priv *priv = gpiochip_get_data(chip);
u32 reg;
@@ -38,6 +38,8 @@ static void rzv2m_pwc_gpio_set(struct gpio_chip *chip, unsigned int offset,
writel(reg, priv->base + PWC_GPIO);
assign_bit(offset, priv->ch_en_bits, value);
+
+ return 0;
}
static int rzv2m_pwc_gpio_get(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/soc/renesas/r9a08g045-sysc.c b/drivers/soc/renesas/r9a08g045-sysc.c
new file mode 100644
index 000000000000..03d653d5cde5
--- /dev/null
+++ b/drivers/soc/renesas/r9a08g045-sysc.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ/G3S System controller driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/init.h>
+
+#include "rz-sysc.h"
+
+#define SYS_XSPI_MAP_STAADD_CS0 0x348
+#define SYS_XSPI_MAP_ENDADD_CS0 0x34c
+#define SYS_XSPI_MAP_STAADD_CS1 0x350
+#define SYS_XSPI_MAP_ENDADD_CS1 0x354
+#define SYS_GETH0_CFG 0x380
+#define SYS_GETH1_CFG 0x390
+#define SYS_PCIE_CFG 0x3a0
+#define SYS_PCIE_MON 0x3a4
+#define SYS_PCIE_ERR_MON 0x3ac
+#define SYS_PCIE_PHY 0x3b4
+#define SYS_I2C0_CFG 0x400
+#define SYS_I2C1_CFG 0x410
+#define SYS_I2C2_CFG 0x420
+#define SYS_I2C3_CFG 0x430
+#define SYS_I3C_CFG 0x440
+#define SYS_USB_PWRRDY 0xd70
+#define SYS_PCIE_RST_RSM_B 0xd74
+
+static const struct rz_sysc_soc_id_init_data rzg3s_sysc_soc_id_init_data __initconst = {
+ .family = "RZ/G3S",
+ .id = 0x85e0447,
+ .devid_offset = 0xa04,
+ .revision_mask = GENMASK(31, 28),
+ .specific_id_mask = GENMASK(27, 0),
+};
+
+static bool rzg3s_regmap_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SYS_XSPI_MAP_STAADD_CS0:
+ case SYS_XSPI_MAP_ENDADD_CS0:
+ case SYS_XSPI_MAP_STAADD_CS1:
+ case SYS_XSPI_MAP_ENDADD_CS1:
+ case SYS_GETH0_CFG:
+ case SYS_GETH1_CFG:
+ case SYS_PCIE_CFG:
+ case SYS_PCIE_MON:
+ case SYS_PCIE_ERR_MON:
+ case SYS_PCIE_PHY:
+ case SYS_I2C0_CFG:
+ case SYS_I2C1_CFG:
+ case SYS_I2C2_CFG:
+ case SYS_I2C3_CFG:
+ case SYS_I3C_CFG:
+ case SYS_USB_PWRRDY:
+ case SYS_PCIE_RST_RSM_B:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rzg3s_regmap_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SYS_XSPI_MAP_STAADD_CS0:
+ case SYS_XSPI_MAP_ENDADD_CS0:
+ case SYS_XSPI_MAP_STAADD_CS1:
+ case SYS_XSPI_MAP_ENDADD_CS1:
+ case SYS_PCIE_CFG:
+ case SYS_PCIE_PHY:
+ case SYS_I2C0_CFG:
+ case SYS_I2C1_CFG:
+ case SYS_I2C2_CFG:
+ case SYS_I2C3_CFG:
+ case SYS_I3C_CFG:
+ case SYS_USB_PWRRDY:
+ case SYS_PCIE_RST_RSM_B:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct rz_sysc_init_data rzg3s_sysc_init_data __initconst = {
+ .soc_id_init_data = &rzg3s_sysc_soc_id_init_data,
+ .readable_reg = rzg3s_regmap_readable_reg,
+ .writeable_reg = rzg3s_regmap_writeable_reg,
+ .max_register = 0xe20,
+};
diff --git a/drivers/soc/renesas/r9a09g047-sys.c b/drivers/soc/renesas/r9a09g047-sys.c
new file mode 100644
index 000000000000..e413b0eff9bf
--- /dev/null
+++ b/drivers/soc/renesas/r9a09g047-sys.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ/G3E System controller (SYS) driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include "rz-sysc.h"
+
+/* Register Offsets */
+#define SYS_LSI_MODE 0x300
+/*
+ * BOOTPLLCA[1:0]
+ * [0,0] => 1.1GHZ
+ * [0,1] => 1.5GHZ
+ * [1,0] => 1.6GHZ
+ * [1,1] => 1.7GHZ
+ */
+#define SYS_LSI_MODE_STAT_BOOTPLLCA55 GENMASK(12, 11)
+#define SYS_LSI_MODE_CA55_1_7GHZ 0x3
+
+#define SYS_LSI_PRR 0x308
+#define SYS_LSI_PRR_CA55_DIS BIT(8)
+#define SYS_LSI_PRR_NPU_DIS BIT(1)
+
+#define SYS_LSI_OTPTSU1TRMVAL0 0x330
+#define SYS_LSI_OTPTSU1TRMVAL1 0x334
+#define SYS_SPI_STAADDCS0 0x900
+#define SYS_SPI_ENDADDCS0 0x904
+#define SYS_SPI_STAADDCS1 0x908
+#define SYS_SPI_ENDADDCS1 0x90c
+#define SYS_VSP_CLK 0xe00
+#define SYS_GBETH0_CFG 0xf00
+#define SYS_GBETH1_CFG 0xf04
+#define SYS_PCIE_INTX_CH0 0x1000
+#define SYS_PCIE_MSI1_CH0 0x1004
+#define SYS_PCIE_MSI2_CH0 0x1008
+#define SYS_PCIE_MSI3_CH0 0x100c
+#define SYS_PCIE_MSI4_CH0 0x1010
+#define SYS_PCIE_MSI5_CH0 0x1014
+#define SYS_PCIE_PME_CH0 0x1018
+#define SYS_PCIE_ACK_CH0 0x101c
+#define SYS_PCIE_MISC_CH0 0x1020
+#define SYS_PCIE_MODE_CH0 0x1024
+#define SYS_ADC_CFG 0x1600
+
+static void rzg3e_sys_print_id(struct device *dev,
+ void __iomem *sysc_base,
+ struct soc_device_attribute *soc_dev_attr)
+{
+ bool is_quad_core, npu_enabled;
+ u32 prr_val, mode_val;
+
+ prr_val = readl(sysc_base + SYS_LSI_PRR);
+ mode_val = readl(sysc_base + SYS_LSI_MODE);
+
+ /* Check CPU and NPU configuration */
+ is_quad_core = !(prr_val & SYS_LSI_PRR_CA55_DIS);
+ npu_enabled = !(prr_val & SYS_LSI_PRR_NPU_DIS);
+
+ dev_info(dev, "Detected Renesas %s Core %s %s Rev %s%s\n",
+ is_quad_core ? "Quad" : "Dual", soc_dev_attr->family,
+ soc_dev_attr->soc_id, soc_dev_attr->revision,
+ npu_enabled ? " with Ethos-U55" : "");
+
+ /* Check CA55 PLL configuration */
+ if (FIELD_GET(SYS_LSI_MODE_STAT_BOOTPLLCA55, mode_val) != SYS_LSI_MODE_CA55_1_7GHZ)
+ dev_warn(dev, "CA55 PLL is not set to 1.7GHz\n");
+}
+
+static const struct rz_sysc_soc_id_init_data rzg3e_sys_soc_id_init_data __initconst = {
+ .family = "RZ/G3E",
+ .id = 0x8679447,
+ .devid_offset = 0x304,
+ .revision_mask = GENMASK(31, 28),
+ .specific_id_mask = GENMASK(27, 0),
+ .print_id = rzg3e_sys_print_id,
+};
+
+static bool rzg3e_regmap_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SYS_LSI_OTPTSU1TRMVAL0:
+ case SYS_LSI_OTPTSU1TRMVAL1:
+ case SYS_SPI_STAADDCS0:
+ case SYS_SPI_ENDADDCS0:
+ case SYS_SPI_STAADDCS1:
+ case SYS_SPI_ENDADDCS1:
+ case SYS_VSP_CLK:
+ case SYS_GBETH0_CFG:
+ case SYS_GBETH1_CFG:
+ case SYS_PCIE_INTX_CH0:
+ case SYS_PCIE_MSI1_CH0:
+ case SYS_PCIE_MSI2_CH0:
+ case SYS_PCIE_MSI3_CH0:
+ case SYS_PCIE_MSI4_CH0:
+ case SYS_PCIE_MSI5_CH0:
+ case SYS_PCIE_PME_CH0:
+ case SYS_PCIE_ACK_CH0:
+ case SYS_PCIE_MISC_CH0:
+ case SYS_PCIE_MODE_CH0:
+ case SYS_ADC_CFG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rzg3e_regmap_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SYS_SPI_STAADDCS0:
+ case SYS_SPI_ENDADDCS0:
+ case SYS_SPI_STAADDCS1:
+ case SYS_SPI_ENDADDCS1:
+ case SYS_VSP_CLK:
+ case SYS_GBETH0_CFG:
+ case SYS_GBETH1_CFG:
+ case SYS_PCIE_INTX_CH0:
+ case SYS_PCIE_MSI1_CH0:
+ case SYS_PCIE_MSI2_CH0:
+ case SYS_PCIE_MSI3_CH0:
+ case SYS_PCIE_MSI4_CH0:
+ case SYS_PCIE_MSI5_CH0:
+ case SYS_PCIE_PME_CH0:
+ case SYS_PCIE_ACK_CH0:
+ case SYS_PCIE_MISC_CH0:
+ case SYS_PCIE_MODE_CH0:
+ case SYS_ADC_CFG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct rz_sysc_init_data rzg3e_sys_init_data = {
+ .soc_id_init_data = &rzg3e_sys_soc_id_init_data,
+ .readable_reg = rzg3e_regmap_readable_reg,
+ .writeable_reg = rzg3e_regmap_writeable_reg,
+ .max_register = 0x170c,
+};
diff --git a/drivers/soc/renesas/r9a09g056-sys.c b/drivers/soc/renesas/r9a09g056-sys.c
new file mode 100644
index 000000000000..42f5eff291fd
--- /dev/null
+++ b/drivers/soc/renesas/r9a09g056-sys.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ/V2N System controller (SYS) driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include "rz-sysc.h"
+
+/* Register Offsets */
+#define SYS_LSI_MODE 0x300
+#define SYS_LSI_MODE_SEC_EN BIT(16)
+/*
+ * BOOTPLLCA[1:0]
+ * [0,0] => 1.1GHZ
+ * [0,1] => 1.5GHZ
+ * [1,0] => 1.6GHZ
+ * [1,1] => 1.7GHZ
+ */
+#define SYS_LSI_MODE_STAT_BOOTPLLCA55 GENMASK(12, 11)
+#define SYS_LSI_MODE_CA55_1_7GHZ 0x3
+
+#define SYS_LSI_PRR 0x308
+#define SYS_LSI_PRR_GPU_DIS BIT(0)
+#define SYS_LSI_PRR_ISP_DIS BIT(4)
+
+#define SYS_RZV2N_FEATURE_G31 BIT(0)
+#define SYS_RZV2N_FEATURE_C55 BIT(1)
+#define SYS_RZV2N_FEATURE_SEC BIT(2)
+
+#define SYS_LSI_OTPTSU0TRMVAL0 0x320
+#define SYS_LSI_OTPTSU0TRMVAL1 0x324
+#define SYS_LSI_OTPTSU1TRMVAL0 0x330
+#define SYS_LSI_OTPTSU1TRMVAL1 0x334
+#define SYS_GBETH0_CFG 0xf00
+#define SYS_GBETH1_CFG 0xf04
+#define SYS_PCIE_INTX_CH0 0x1000
+#define SYS_PCIE_MSI1_CH0 0x1004
+#define SYS_PCIE_MSI2_CH0 0x1008
+#define SYS_PCIE_MSI3_CH0 0x100c
+#define SYS_PCIE_MSI4_CH0 0x1010
+#define SYS_PCIE_MSI5_CH0 0x1014
+#define SYS_PCIE_PME_CH0 0x1018
+#define SYS_PCIE_ACK_CH0 0x101c
+#define SYS_PCIE_MISC_CH0 0x1020
+#define SYS_PCIE_MODE_CH0 0x1024
+#define SYS_ADC_CFG 0x1600
+
+static void rzv2n_sys_print_id(struct device *dev,
+ void __iomem *sysc_base,
+ struct soc_device_attribute *soc_dev_attr)
+{
+ u32 prr_val, mode_val;
+ u8 feature_flags;
+
+ prr_val = readl(sysc_base + SYS_LSI_PRR);
+ mode_val = readl(sysc_base + SYS_LSI_MODE);
+
+ /* Check GPU, ISP and Cryptographic configuration */
+ feature_flags = !(prr_val & SYS_LSI_PRR_GPU_DIS) ? SYS_RZV2N_FEATURE_G31 : 0;
+ feature_flags |= !(prr_val & SYS_LSI_PRR_ISP_DIS) ? SYS_RZV2N_FEATURE_C55 : 0;
+ feature_flags |= (mode_val & SYS_LSI_MODE_SEC_EN) ? SYS_RZV2N_FEATURE_SEC : 0;
+
+ dev_info(dev, "Detected Renesas %s %sn%d Rev %s%s%s%s%s\n", soc_dev_attr->family,
+ soc_dev_attr->soc_id, 41 + feature_flags, soc_dev_attr->revision,
+ feature_flags ? " with" : "",
+ feature_flags & SYS_RZV2N_FEATURE_G31 ? " GE3D (Mali-G31)" : "",
+ feature_flags & SYS_RZV2N_FEATURE_SEC ? " Cryptographic engine" : "",
+ feature_flags & SYS_RZV2N_FEATURE_C55 ? " ISP (Mali-C55)" : "");
+
+ /* Check CA55 PLL configuration */
+ if (FIELD_GET(SYS_LSI_MODE_STAT_BOOTPLLCA55, mode_val) != SYS_LSI_MODE_CA55_1_7GHZ)
+ dev_warn(dev, "CA55 PLL is not set to 1.7GHz\n");
+}
+
+static const struct rz_sysc_soc_id_init_data rzv2n_sys_soc_id_init_data __initconst = {
+ .family = "RZ/V2N",
+ .id = 0x867d447,
+ .devid_offset = 0x304,
+ .revision_mask = GENMASK(31, 28),
+ .specific_id_mask = GENMASK(27, 0),
+ .print_id = rzv2n_sys_print_id,
+};
+
+static bool rzv2n_regmap_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SYS_LSI_OTPTSU0TRMVAL0:
+ case SYS_LSI_OTPTSU0TRMVAL1:
+ case SYS_LSI_OTPTSU1TRMVAL0:
+ case SYS_LSI_OTPTSU1TRMVAL1:
+ case SYS_GBETH0_CFG:
+ case SYS_GBETH1_CFG:
+ case SYS_PCIE_INTX_CH0:
+ case SYS_PCIE_MSI1_CH0:
+ case SYS_PCIE_MSI2_CH0:
+ case SYS_PCIE_MSI3_CH0:
+ case SYS_PCIE_MSI4_CH0:
+ case SYS_PCIE_MSI5_CH0:
+ case SYS_PCIE_PME_CH0:
+ case SYS_PCIE_ACK_CH0:
+ case SYS_PCIE_MISC_CH0:
+ case SYS_PCIE_MODE_CH0:
+ case SYS_ADC_CFG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rzv2n_regmap_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SYS_GBETH0_CFG:
+ case SYS_GBETH1_CFG:
+ case SYS_PCIE_INTX_CH0:
+ case SYS_PCIE_MSI1_CH0:
+ case SYS_PCIE_MSI2_CH0:
+ case SYS_PCIE_MSI3_CH0:
+ case SYS_PCIE_MSI4_CH0:
+ case SYS_PCIE_MSI5_CH0:
+ case SYS_PCIE_PME_CH0:
+ case SYS_PCIE_ACK_CH0:
+ case SYS_PCIE_MISC_CH0:
+ case SYS_PCIE_MODE_CH0:
+ case SYS_ADC_CFG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct rz_sysc_init_data rzv2n_sys_init_data = {
+ .soc_id_init_data = &rzv2n_sys_soc_id_init_data,
+ .readable_reg = rzv2n_regmap_readable_reg,
+ .writeable_reg = rzv2n_regmap_writeable_reg,
+ .max_register = 0x170c,
+};
diff --git a/drivers/soc/renesas/r9a09g057-sys.c b/drivers/soc/renesas/r9a09g057-sys.c
new file mode 100644
index 000000000000..827c718ac7c5
--- /dev/null
+++ b/drivers/soc/renesas/r9a09g057-sys.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ/V2H System controller (SYS) driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include "rz-sysc.h"
+
+/* Register Offsets */
+#define SYS_LSI_MODE 0x300
+/*
+ * BOOTPLLCA[1:0]
+ * [0,0] => 1.1GHZ
+ * [0,1] => 1.5GHZ
+ * [1,0] => 1.6GHZ
+ * [1,1] => 1.7GHZ
+ */
+#define SYS_LSI_MODE_STAT_BOOTPLLCA55 GENMASK(12, 11)
+#define SYS_LSI_MODE_CA55_1_7GHZ 0x3
+
+#define SYS_LSI_PRR 0x308
+#define SYS_LSI_PRR_GPU_DIS BIT(0)
+#define SYS_LSI_PRR_ISP_DIS BIT(4)
+
+#define SYS_LSI_OTPTSU0TRMVAL0 0x320
+#define SYS_LSI_OTPTSU0TRMVAL1 0x324
+#define SYS_LSI_OTPTSU1TRMVAL0 0x330
+#define SYS_LSI_OTPTSU1TRMVAL1 0x334
+#define SYS_GBETH0_CFG 0xf00
+#define SYS_GBETH1_CFG 0xf04
+#define SYS_PCIE_INTX_CH0 0x1000
+#define SYS_PCIE_MSI1_CH0 0x1004
+#define SYS_PCIE_MSI2_CH0 0x1008
+#define SYS_PCIE_MSI3_CH0 0x100c
+#define SYS_PCIE_MSI4_CH0 0x1010
+#define SYS_PCIE_MSI5_CH0 0x1014
+#define SYS_PCIE_PME_CH0 0x1018
+#define SYS_PCIE_ACK_CH0 0x101c
+#define SYS_PCIE_MISC_CH0 0x1020
+#define SYS_PCIE_MODE_CH0 0x1024
+#define SYS_PCIE_INTX_CH1 0x1030
+#define SYS_PCIE_MSI1_CH1 0x1034
+#define SYS_PCIE_MSI2_CH1 0x1038
+#define SYS_PCIE_MSI3_CH1 0x103c
+#define SYS_PCIE_MSI4_CH1 0x1040
+#define SYS_PCIE_MSI5_CH1 0x1044
+#define SYS_PCIE_PME_CH1 0x1048
+#define SYS_PCIE_ACK_CH1 0x104c
+#define SYS_PCIE_MISC_CH1 0x1050
+#define SYS_PCIE_MODE_CH1 0x1054
+#define SYS_PCIE_MODE 0x1060
+#define SYS_ADC_CFG 0x1600
+
+static void rzv2h_sys_print_id(struct device *dev,
+ void __iomem *sysc_base,
+ struct soc_device_attribute *soc_dev_attr)
+{
+ bool gpu_enabled, isp_enabled;
+ u32 prr_val, mode_val;
+
+ prr_val = readl(sysc_base + SYS_LSI_PRR);
+ mode_val = readl(sysc_base + SYS_LSI_MODE);
+
+ /* Check GPU and ISP configuration */
+ gpu_enabled = !(prr_val & SYS_LSI_PRR_GPU_DIS);
+ isp_enabled = !(prr_val & SYS_LSI_PRR_ISP_DIS);
+
+ dev_info(dev, "Detected Renesas %s %s Rev %s%s%s\n",
+ soc_dev_attr->family, soc_dev_attr->soc_id, soc_dev_attr->revision,
+ gpu_enabled ? " with GE3D (Mali-G31)" : "",
+ isp_enabled ? " with ISP (Mali-C55)" : "");
+
+ /* Check CA55 PLL configuration */
+ if (FIELD_GET(SYS_LSI_MODE_STAT_BOOTPLLCA55, mode_val) != SYS_LSI_MODE_CA55_1_7GHZ)
+ dev_warn(dev, "CA55 PLL is not set to 1.7GHz\n");
+}
+
+static const struct rz_sysc_soc_id_init_data rzv2h_sys_soc_id_init_data __initconst = {
+ .family = "RZ/V2H",
+ .id = 0x847a447,
+ .devid_offset = 0x304,
+ .revision_mask = GENMASK(31, 28),
+ .specific_id_mask = GENMASK(27, 0),
+ .print_id = rzv2h_sys_print_id,
+};
+
+static bool rzv2h_regmap_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SYS_LSI_OTPTSU0TRMVAL0:
+ case SYS_LSI_OTPTSU0TRMVAL1:
+ case SYS_LSI_OTPTSU1TRMVAL0:
+ case SYS_LSI_OTPTSU1TRMVAL1:
+ case SYS_GBETH0_CFG:
+ case SYS_GBETH1_CFG:
+ case SYS_PCIE_INTX_CH0:
+ case SYS_PCIE_MSI1_CH0:
+ case SYS_PCIE_MSI2_CH0:
+ case SYS_PCIE_MSI3_CH0:
+ case SYS_PCIE_MSI4_CH0:
+ case SYS_PCIE_MSI5_CH0:
+ case SYS_PCIE_PME_CH0:
+ case SYS_PCIE_ACK_CH0:
+ case SYS_PCIE_MISC_CH0:
+ case SYS_PCIE_MODE_CH0:
+ case SYS_PCIE_INTX_CH1:
+ case SYS_PCIE_MSI1_CH1:
+ case SYS_PCIE_MSI2_CH1:
+ case SYS_PCIE_MSI3_CH1:
+ case SYS_PCIE_MSI4_CH1:
+ case SYS_PCIE_MSI5_CH1:
+ case SYS_PCIE_PME_CH1:
+ case SYS_PCIE_ACK_CH1:
+ case SYS_PCIE_MISC_CH1:
+ case SYS_PCIE_MODE_CH1:
+ case SYS_PCIE_MODE:
+ case SYS_ADC_CFG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rzv2h_regmap_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SYS_GBETH0_CFG:
+ case SYS_GBETH1_CFG:
+ case SYS_PCIE_INTX_CH0:
+ case SYS_PCIE_MSI1_CH0:
+ case SYS_PCIE_MSI2_CH0:
+ case SYS_PCIE_MSI3_CH0:
+ case SYS_PCIE_MSI4_CH0:
+ case SYS_PCIE_MSI5_CH0:
+ case SYS_PCIE_PME_CH0:
+ case SYS_PCIE_ACK_CH0:
+ case SYS_PCIE_MISC_CH0:
+ case SYS_PCIE_MODE_CH0:
+ case SYS_PCIE_INTX_CH1:
+ case SYS_PCIE_MSI1_CH1:
+ case SYS_PCIE_MSI2_CH1:
+ case SYS_PCIE_MSI3_CH1:
+ case SYS_PCIE_MSI4_CH1:
+ case SYS_PCIE_MSI5_CH1:
+ case SYS_PCIE_PME_CH1:
+ case SYS_PCIE_ACK_CH1:
+ case SYS_PCIE_MISC_CH1:
+ case SYS_PCIE_MODE_CH1:
+ case SYS_PCIE_MODE:
+ case SYS_ADC_CFG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct rz_sysc_init_data rzv2h_sys_init_data = {
+ .soc_id_init_data = &rzv2h_sys_soc_id_init_data,
+ .readable_reg = rzv2h_regmap_readable_reg,
+ .writeable_reg = rzv2h_regmap_writeable_reg,
+ .max_register = 0x170c,
+};
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index 98fd97da6cd4..0541990901fc 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -12,6 +12,7 @@
#define WDTRSTCR_RESET 0xA55A0002
#define WDTRSTCR 0x0054
+#define GEN4_WDTRSTCR_RESET 0xA55A8002
#define GEN4_WDTRSTCR 0x0010
#define CR7BAR 0x0070
@@ -30,7 +31,7 @@ static int rcar_rst_enable_wdt_reset(void __iomem *base)
static int rcar_rst_v3u_enable_wdt_reset(void __iomem *base)
{
- iowrite32(WDTRSTCR_RESET, base + GEN4_WDTRSTCR);
+ iowrite32(GEN4_WDTRSTCR_RESET, base + GEN4_WDTRSTCR);
return 0;
}
@@ -117,6 +118,7 @@ static const struct of_device_id rcar_rst_matches[] __initconst = {
{ .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_v3u },
{ .compatible = "renesas,r8a779f0-rst", .data = &rcar_rst_gen4 },
{ .compatible = "renesas,r8a779g0-rst", .data = &rcar_rst_gen4 },
+ { .compatible = "renesas,r8a779h0-rst", .data = &rcar_rst_gen4 },
{ /* sentinel */ }
};
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 42af7c09f743..ee4f17bb4db4 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -5,6 +5,7 @@
* Copyright (C) 2014-2016 Glider bvba
*/
+#include <linux/bitfield.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -12,7 +13,6 @@
#include <linux/string.h>
#include <linux/sys_soc.h>
-
struct renesas_family {
const char name[16];
u32 reg; /* CCCR or PRR, if not in DT */
@@ -37,6 +37,10 @@ static const struct renesas_family fam_rcar_gen4 __initconst __maybe_unused = {
.name = "R-Car Gen4",
};
+static const struct renesas_family fam_rcar_gen5 __initconst __maybe_unused = {
+ .name = "R-Car Gen5",
+};
+
static const struct renesas_family fam_rmobile __initconst __maybe_unused = {
.name = "R-Mobile",
.reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
@@ -85,7 +89,6 @@ static const struct renesas_family fam_shmobile __initconst __maybe_unused = {
.reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
};
-
struct renesas_soc {
const struct renesas_family *family;
u32 id;
@@ -263,12 +266,21 @@ static const struct renesas_soc soc_rcar_v4h __initconst __maybe_unused = {
.id = 0x5c,
};
+static const struct renesas_soc soc_rcar_v4m __initconst __maybe_unused = {
+ .family = &fam_rcar_gen4,
+ .id = 0x5d,
+};
+
+static const struct renesas_soc soc_rcar_x5h __initconst __maybe_unused = {
+ .family = &fam_rcar_gen5,
+ .id = 0x60,
+};
+
static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
.family = &fam_shmobile,
.id = 0x37,
};
-
static const struct of_device_id renesas_socs[] __initconst __maybe_unused = {
#ifdef CONFIG_ARCH_R7S72100
{ .compatible = "renesas,r7s72100", .data = &soc_rz_a1h },
@@ -373,6 +385,12 @@ static const struct of_device_id renesas_socs[] __initconst __maybe_unused = {
#ifdef CONFIG_ARCH_R8A779G0
{ .compatible = "renesas,r8a779g0", .data = &soc_rcar_v4h },
#endif
+#ifdef CONFIG_ARCH_R8A779H0
+ { .compatible = "renesas,r8a779h0", .data = &soc_rcar_v4m },
+#endif
+#ifdef CONFIG_ARCH_R8A78000
+ { .compatible = "renesas,r8a78000", .data = &soc_rcar_x5h },
+#endif
#ifdef CONFIG_ARCH_R9A07G043
#ifdef CONFIG_RISCV
{ .compatible = "renesas,r9a07g043", .data = &soc_rz_five },
@@ -429,6 +447,7 @@ static const struct of_device_id renesas_ids[] __initconst = {
{ .compatible = "renesas,r9a07g043-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,r9a07g044-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,r9a07g054-sysc", .data = &id_rzg2l },
+ { .compatible = "renesas,r9a08g045-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,r9a09g011-sys", .data = &id_rzv2m },
{ .compatible = "renesas,prr", .data = &id_prr },
{ /* sentinel */ }
@@ -475,10 +494,6 @@ static int __init renesas_soc_init(void)
return -ENOMEM;
}
- np = of_find_node_by_path("/");
- of_property_read_string(np, "model", &soc_dev_attr->machine);
- of_node_put(np);
-
soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL);
soc_dev_attr->soc_id = kstrdup_const(soc_id, GFP_KERNEL);
@@ -510,8 +525,7 @@ static int __init renesas_soc_init(void)
eshi, eslo);
}
- if (soc->id &&
- ((product & id->mask) >> __ffs(id->mask)) != soc->id) {
+ if (soc->id && field_get(id->mask, product) != soc->id) {
pr_warn("SoC mismatch (product = 0x%x)\n", product);
ret = -ENODEV;
goto free_soc_dev_attr;
diff --git a/drivers/soc/renesas/rz-sysc.c b/drivers/soc/renesas/rz-sysc.c
new file mode 100644
index 000000000000..ae727d9c8cc5
--- /dev/null
+++ b/drivers/soc/renesas/rz-sysc.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ System controller driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#include "rz-sysc.h"
+
+/**
+ * struct rz_sysc - RZ SYSC private data structure
+ * @base: SYSC base address
+ * @dev: SYSC device pointer
+ */
+struct rz_sysc {
+ void __iomem *base;
+ struct device *dev;
+};
+
+static int rz_sysc_soc_init(struct rz_sysc *sysc, const struct of_device_id *match)
+{
+ const struct rz_sysc_init_data *sysc_data = match->data;
+ const struct rz_sysc_soc_id_init_data *soc_data = sysc_data->soc_id_init_data;
+ struct soc_device_attribute *soc_dev_attr;
+ const char *soc_id_start, *soc_id_end;
+ u32 val, revision, specific_id;
+ struct soc_device *soc_dev;
+ char soc_id[32] = {0};
+ size_t size;
+
+ soc_id_start = strchr(match->compatible, ',') + 1;
+ soc_id_end = strchr(match->compatible, '-');
+ size = soc_id_end - soc_id_start + 1;
+ if (size > 32)
+ size = sizeof(soc_id);
+ strscpy(soc_id, soc_id_start, size);
+
+ soc_dev_attr = devm_kzalloc(sysc->dev, sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->family = devm_kstrdup(sysc->dev, soc_data->family, GFP_KERNEL);
+ if (!soc_dev_attr->family)
+ return -ENOMEM;
+
+ soc_dev_attr->soc_id = devm_kstrdup(sysc->dev, soc_id, GFP_KERNEL);
+ if (!soc_dev_attr->soc_id)
+ return -ENOMEM;
+
+ val = readl(sysc->base + soc_data->devid_offset);
+ revision = field_get(soc_data->revision_mask, val);
+ specific_id = field_get(soc_data->specific_id_mask, val);
+ soc_dev_attr->revision = devm_kasprintf(sysc->dev, GFP_KERNEL, "%u", revision);
+ if (!soc_dev_attr->revision)
+ return -ENOMEM;
+
+ if (soc_data->id && specific_id != soc_data->id) {
+ dev_warn(sysc->dev, "SoC mismatch (product = 0x%x)\n", specific_id);
+ return -ENODEV;
+ }
+
+ /* Try to call SoC-specific device identification */
+ if (soc_data->print_id) {
+ soc_data->print_id(sysc->dev, sysc->base, soc_dev_attr);
+ } else {
+ dev_info(sysc->dev, "Detected Renesas %s %s Rev %s\n",
+ soc_dev_attr->family, soc_dev_attr->soc_id, soc_dev_attr->revision);
+ }
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
+
+ return 0;
+}
+
+static const struct of_device_id rz_sysc_match[] = {
+#ifdef CONFIG_SYSC_R9A08G045
+ { .compatible = "renesas,r9a08g045-sysc", .data = &rzg3s_sysc_init_data },
+#endif
+#ifdef CONFIG_SYS_R9A09G047
+ { .compatible = "renesas,r9a09g047-sys", .data = &rzg3e_sys_init_data },
+#endif
+#ifdef CONFIG_SYS_R9A09G056
+ { .compatible = "renesas,r9a09g056-sys", .data = &rzv2n_sys_init_data },
+#endif
+#ifdef CONFIG_SYS_R9A09G057
+ { .compatible = "renesas,r9a09g057-sys", .data = &rzv2h_sys_init_data },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(of, rz_sysc_match);
+
+static int rz_sysc_probe(struct platform_device *pdev)
+{
+ const struct rz_sysc_init_data *data;
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ struct rz_sysc *sysc;
+ int ret;
+
+ struct regmap_config *regmap_cfg __free(kfree) = kzalloc(sizeof(*regmap_cfg), GFP_KERNEL);
+ if (!regmap_cfg)
+ return -ENOMEM;
+
+ match = of_match_node(rz_sysc_match, dev->of_node);
+ if (!match)
+ return -ENODEV;
+
+ data = match->data;
+
+ sysc = devm_kzalloc(dev, sizeof(*sysc), GFP_KERNEL);
+ if (!sysc)
+ return -ENOMEM;
+
+ sysc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(sysc->base))
+ return PTR_ERR(sysc->base);
+
+ sysc->dev = dev;
+ ret = rz_sysc_soc_init(sysc, match);
+ if (ret)
+ return ret;
+
+ regmap_cfg->name = "rz_sysc_regs";
+ regmap_cfg->reg_bits = 32;
+ regmap_cfg->reg_stride = 4;
+ regmap_cfg->val_bits = 32;
+ regmap_cfg->fast_io = true;
+ regmap_cfg->max_register = data->max_register;
+ regmap_cfg->readable_reg = data->readable_reg;
+ regmap_cfg->writeable_reg = data->writeable_reg;
+
+ regmap = devm_regmap_init_mmio(dev, sysc->base, regmap_cfg);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return of_syscon_register_regmap(dev->of_node, regmap);
+}
+
+static struct platform_driver rz_sysc_driver = {
+ .driver = {
+ .name = "renesas-rz-sysc",
+ .suppress_bind_attrs = true,
+ .of_match_table = rz_sysc_match
+ },
+ .probe = rz_sysc_probe
+};
+
+static int __init rz_sysc_init(void)
+{
+ return platform_driver_register(&rz_sysc_driver);
+}
+subsys_initcall(rz_sysc_init);
+
+MODULE_DESCRIPTION("Renesas RZ System Controller Driver");
+MODULE_AUTHOR("Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/renesas/rz-sysc.h b/drivers/soc/renesas/rz-sysc.h
new file mode 100644
index 000000000000..88929bf21cb1
--- /dev/null
+++ b/drivers/soc/renesas/rz-sysc.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Renesas RZ System Controller
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#ifndef __SOC_RENESAS_RZ_SYSC_H__
+#define __SOC_RENESAS_RZ_SYSC_H__
+
+#include <linux/device.h>
+#include <linux/sys_soc.h>
+#include <linux/types.h>
+
+/**
+ * struct rz_syc_soc_id_init_data - RZ SYSC SoC identification initialization data
+ * @family: RZ SoC family
+ * @id: RZ SoC expected ID
+ * @devid_offset: SYSC SoC ID register offset
+ * @revision_mask: SYSC SoC ID revision mask
+ * @specific_id_mask: SYSC SoC ID specific ID mask
+ * @print_id: print SoC-specific extended device identification
+ */
+struct rz_sysc_soc_id_init_data {
+ const char * const family;
+ u32 id;
+ u32 devid_offset;
+ u32 revision_mask;
+ u32 specific_id_mask;
+ void (*print_id)(struct device *dev, void __iomem *sysc_base,
+ struct soc_device_attribute *soc_dev_attr);
+};
+
+/**
+ * struct rz_sysc_init_data - RZ SYSC initialization data
+ * @soc_id_init_data: RZ SYSC SoC ID initialization data
+ * @writeable_reg: Regmap writeable register check function
+ * @readable_reg: Regmap readable register check function
+ * @max_register: Maximum SYSC register offset to be used by the regmap config
+ */
+struct rz_sysc_init_data {
+ const struct rz_sysc_soc_id_init_data *soc_id_init_data;
+ bool (*writeable_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
+ u32 max_register;
+};
+
+extern const struct rz_sysc_init_data rzg3e_sys_init_data;
+extern const struct rz_sysc_init_data rzg3s_sysc_init_data;
+extern const struct rz_sysc_init_data rzv2h_sys_init_data;
+extern const struct rz_sysc_init_data rzv2n_sys_init_data;
+
+#endif /* __SOC_RENESAS_RZ_SYSC_H__ */
diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig
index aff2f7e95237..785f60c6f3ad 100644
--- a/drivers/soc/rockchip/Kconfig
+++ b/drivers/soc/rockchip/Kconfig
@@ -22,18 +22,6 @@ config ROCKCHIP_IODOMAIN
necessary for the io domain setting of the SoC to match the
voltage supplied by the regulators.
-config ROCKCHIP_PM_DOMAINS
- bool "Rockchip generic power domain"
- depends on PM
- select PM_GENERIC_DOMAINS
- help
- Say y here to enable power domain support.
- In order to meet high performance and low power requirements, a power
- management unit is designed or saving power when RK3288 in low power
- mode. The RK3288 PMU is dedicated for managing the power of the whole chip.
-
- If unsure, say N.
-
config ROCKCHIP_DTPM
tristate "Rockchip DTPM hierarchy"
depends on DTPM && m
diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
index 5fd62046b28a..27bfa09ff251 100644
--- a/drivers/soc/rockchip/grf.c
+++ b/drivers/soc/rockchip/grf.c
@@ -6,13 +6,12 @@
*/
#include <linux/err.h>
+#include <linux/hw_bitfield.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#define HIWORD_UPDATE(val, mask, shift) \
- ((val) << (shift) | (mask) << ((shift) + 16))
struct rockchip_grf_value {
const char *desc;
@@ -32,7 +31,7 @@ static const struct rockchip_grf_value rk3036_defaults[] __initconst = {
* Disable auto jtag/sdmmc switching that causes issues with the
* clock-framework and the mmc controllers making them unreliable.
*/
- { "jtag switching", RK3036_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 11) },
+ { "jtag switching", RK3036_GRF_SOC_CON0, FIELD_PREP_WM16_CONST(BIT(11), 0) },
};
static const struct rockchip_grf_info rk3036_grf __initconst = {
@@ -41,9 +40,11 @@ static const struct rockchip_grf_info rk3036_grf __initconst = {
};
#define RK3128_GRF_SOC_CON0 0x140
+#define RK3128_GRF_SOC_CON1 0x144
static const struct rockchip_grf_value rk3128_defaults[] __initconst = {
- { "jtag switching", RK3128_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 8) },
+ { "jtag switching", RK3128_GRF_SOC_CON0, FIELD_PREP_WM16_CONST(BIT(8), 0) },
+ { "vpu main clock", RK3128_GRF_SOC_CON1, FIELD_PREP_WM16_CONST(BIT(10), 0) },
};
static const struct rockchip_grf_info rk3128_grf __initconst = {
@@ -54,7 +55,7 @@ static const struct rockchip_grf_info rk3128_grf __initconst = {
#define RK3228_GRF_SOC_CON6 0x418
static const struct rockchip_grf_value rk3228_defaults[] __initconst = {
- { "jtag switching", RK3228_GRF_SOC_CON6, HIWORD_UPDATE(0, 1, 8) },
+ { "jtag switching", RK3228_GRF_SOC_CON6, FIELD_PREP_WM16_CONST(BIT(8), 0) },
};
static const struct rockchip_grf_info rk3228_grf __initconst = {
@@ -66,8 +67,8 @@ static const struct rockchip_grf_info rk3228_grf __initconst = {
#define RK3288_GRF_SOC_CON2 0x24c
static const struct rockchip_grf_value rk3288_defaults[] __initconst = {
- { "jtag switching", RK3288_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 12) },
- { "pwm select", RK3288_GRF_SOC_CON2, HIWORD_UPDATE(1, 1, 0) },
+ { "jtag switching", RK3288_GRF_SOC_CON0, FIELD_PREP_WM16_CONST(BIT(12), 0) },
+ { "pwm select", RK3288_GRF_SOC_CON2, FIELD_PREP_WM16_CONST(BIT(0), 1) },
};
static const struct rockchip_grf_info rk3288_grf __initconst = {
@@ -78,7 +79,7 @@ static const struct rockchip_grf_info rk3288_grf __initconst = {
#define RK3328_GRF_SOC_CON4 0x410
static const struct rockchip_grf_value rk3328_defaults[] __initconst = {
- { "jtag switching", RK3328_GRF_SOC_CON4, HIWORD_UPDATE(0, 1, 12) },
+ { "jtag switching", RK3328_GRF_SOC_CON4, FIELD_PREP_WM16_CONST(BIT(12), 0) },
};
static const struct rockchip_grf_info rk3328_grf __initconst = {
@@ -89,7 +90,8 @@ static const struct rockchip_grf_info rk3328_grf __initconst = {
#define RK3368_GRF_SOC_CON15 0x43c
static const struct rockchip_grf_value rk3368_defaults[] __initconst = {
- { "jtag switching", RK3368_GRF_SOC_CON15, HIWORD_UPDATE(0, 1, 13) },
+ { "jtag switching", RK3368_GRF_SOC_CON15, FIELD_PREP_WM16_CONST(BIT(13), 0) },
+ { "pwm select", RK3368_GRF_SOC_CON15, FIELD_PREP_WM16_CONST(BIT(12), 1) },
};
static const struct rockchip_grf_info rk3368_grf __initconst = {
@@ -97,10 +99,21 @@ static const struct rockchip_grf_info rk3368_grf __initconst = {
.num_values = ARRAY_SIZE(rk3368_defaults),
};
+#define RK3368_PMUGRF_SOC_CON0 0x100
+
+static const struct rockchip_grf_value rk3368_pmugrf_defaults[] __initconst = {
+ { "pwm2 select", RK3368_PMUGRF_SOC_CON0, FIELD_PREP_WM16_CONST(BIT(7), 0) },
+};
+
+static const struct rockchip_grf_info rk3368_pmugrf __initconst = {
+ .values = rk3368_pmugrf_defaults,
+ .num_values = ARRAY_SIZE(rk3368_pmugrf_defaults),
+};
+
#define RK3399_GRF_SOC_CON7 0xe21c
static const struct rockchip_grf_value rk3399_defaults[] __initconst = {
- { "jtag switching", RK3399_GRF_SOC_CON7, HIWORD_UPDATE(0, 1, 12) },
+ { "jtag switching", RK3399_GRF_SOC_CON7, FIELD_PREP_WM16_CONST(BIT(12), 0) },
};
static const struct rockchip_grf_info rk3399_grf __initconst = {
@@ -111,9 +124,9 @@ static const struct rockchip_grf_info rk3399_grf __initconst = {
#define RK3566_GRF_USB3OTG0_CON1 0x0104
static const struct rockchip_grf_value rk3566_defaults[] __initconst = {
- { "usb3otg port switch", RK3566_GRF_USB3OTG0_CON1, HIWORD_UPDATE(0, 1, 12) },
- { "usb3otg clock switch", RK3566_GRF_USB3OTG0_CON1, HIWORD_UPDATE(1, 1, 7) },
- { "usb3otg disable usb3", RK3566_GRF_USB3OTG0_CON1, HIWORD_UPDATE(1, 1, 0) },
+ { "usb3otg port switch", RK3566_GRF_USB3OTG0_CON1, FIELD_PREP_WM16_CONST(BIT(12), 0) },
+ { "usb3otg clock switch", RK3566_GRF_USB3OTG0_CON1, FIELD_PREP_WM16_CONST(BIT(7), 1) },
+ { "usb3otg disable usb3", RK3566_GRF_USB3OTG0_CON1, FIELD_PREP_WM16_CONST(BIT(0), 1) },
};
static const struct rockchip_grf_info rk3566_pipegrf __initconst = {
@@ -121,10 +134,33 @@ static const struct rockchip_grf_info rk3566_pipegrf __initconst = {
.num_values = ARRAY_SIZE(rk3566_defaults),
};
+#define RK3576_SYSGRF_SOC_CON1 0x0004
+
+static const struct rockchip_grf_value rk3576_defaults_sys_grf[] __initconst = {
+ { "i3c0 weakpull", RK3576_SYSGRF_SOC_CON1, FIELD_PREP_WM16_CONST(GENMASK(7, 6), 3) },
+ { "i3c1 weakpull", RK3576_SYSGRF_SOC_CON1, FIELD_PREP_WM16_CONST(GENMASK(9, 8), 3) },
+};
+
+static const struct rockchip_grf_info rk3576_sysgrf __initconst = {
+ .values = rk3576_defaults_sys_grf,
+ .num_values = ARRAY_SIZE(rk3576_defaults_sys_grf),
+};
+
+#define RK3576_IOCGRF_MISC_CON 0x04F0
+
+static const struct rockchip_grf_value rk3576_defaults_ioc_grf[] __initconst = {
+ { "jtag switching", RK3576_IOCGRF_MISC_CON, FIELD_PREP_WM16_CONST(BIT(1), 0) },
+};
+
+static const struct rockchip_grf_info rk3576_iocgrf __initconst = {
+ .values = rk3576_defaults_ioc_grf,
+ .num_values = ARRAY_SIZE(rk3576_defaults_ioc_grf),
+};
+
#define RK3588_GRF_SOC_CON6 0x0318
static const struct rockchip_grf_value rk3588_defaults[] __initconst = {
- { "jtag switching", RK3588_GRF_SOC_CON6, HIWORD_UPDATE(0, 1, 14) },
+ { "jtag switching", RK3588_GRF_SOC_CON6, FIELD_PREP_WM16_CONST(BIT(14), 0) },
};
static const struct rockchip_grf_info rk3588_sysgrf __initconst = {
@@ -132,7 +168,6 @@ static const struct rockchip_grf_info rk3588_sysgrf __initconst = {
.num_values = ARRAY_SIZE(rk3588_defaults),
};
-
static const struct of_device_id rockchip_grf_dt_match[] __initconst = {
{
.compatible = "rockchip,rk3036-grf",
@@ -153,12 +188,21 @@ static const struct of_device_id rockchip_grf_dt_match[] __initconst = {
.compatible = "rockchip,rk3368-grf",
.data = (void *)&rk3368_grf,
}, {
+ .compatible = "rockchip,rk3368-pmugrf",
+ .data = (void *)&rk3368_pmugrf,
+ }, {
.compatible = "rockchip,rk3399-grf",
.data = (void *)&rk3399_grf,
}, {
.compatible = "rockchip,rk3566-pipe-grf",
.data = (void *)&rk3566_pipegrf,
}, {
+ .compatible = "rockchip,rk3576-sys-grf",
+ .data = (void *)&rk3576_sysgrf,
+ }, {
+ .compatible = "rockchip,rk3576-ioc-grf",
+ .data = (void *)&rk3576_iocgrf,
+ }, {
.compatible = "rockchip,rk3588-sys-grf",
.data = (void *)&rk3588_sysgrf,
},
diff --git a/drivers/soc/rockchip/io-domain.c b/drivers/soc/rockchip/io-domain.c
index 6619256c2d11..f94985a905c2 100644
--- a/drivers/soc/rockchip/io-domain.c
+++ b/drivers/soc/rockchip/io-domain.c
@@ -39,6 +39,10 @@
#define RK3288_SOC_CON2_FLASH0 BIT(7)
#define RK3288_SOC_FLASH_SUPPLY_NUM 2
+#define RK3308_SOC_CON0 0x300
+#define RK3308_SOC_CON0_VCCIO3 BIT(8)
+#define RK3308_SOC_VCCIO3_SUPPLY_NUM 3
+
#define RK3328_SOC_CON4 0x410
#define RK3328_SOC_CON4_VCCIO2 BIT(7)
#define RK3328_SOC_VCCIO2_SUPPLY_NUM 1
@@ -229,6 +233,25 @@ static void rk3288_iodomain_init(struct rockchip_iodomain *iod)
dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
}
+static void rk3308_iodomain_init(struct rockchip_iodomain *iod)
+{
+ int ret;
+ u32 val;
+
+ /* if no vccio3 supply we should leave things alone */
+ if (!iod->supplies[RK3308_SOC_VCCIO3_SUPPLY_NUM].reg)
+ return;
+
+ /*
+ * set vccio3 iodomain to also use this framework
+ * instead of a special gpio.
+ */
+ val = RK3308_SOC_CON0_VCCIO3 | (RK3308_SOC_CON0_VCCIO3 << 16);
+ ret = regmap_write(iod->grf, RK3308_SOC_CON0, val);
+ if (ret < 0)
+ dev_warn(iod->dev, "couldn't update vccio3 vsel ctrl\n");
+}
+
static void rk3328_iodomain_init(struct rockchip_iodomain *iod)
{
int ret;
@@ -376,6 +399,19 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3288 = {
.init = rk3288_iodomain_init,
};
+static const struct rockchip_iodomain_soc_data soc_data_rk3308 = {
+ .grf_offset = 0x300,
+ .supply_names = {
+ "vccio0",
+ "vccio1",
+ "vccio2",
+ "vccio3",
+ "vccio4",
+ "vccio5",
+ },
+ .init = rk3308_iodomain_init,
+};
+
static const struct rockchip_iodomain_soc_data soc_data_rk3328 = {
.grf_offset = 0x410,
.supply_names = {
@@ -529,6 +565,10 @@ static const struct of_device_id rockchip_iodomain_match[] = {
.data = &soc_data_rk3288
},
{
+ .compatible = "rockchip,rk3308-io-voltage-domain",
+ .data = &soc_data_rk3308
+ },
+ {
.compatible = "rockchip,rk3328-io-voltage-domain",
.data = &soc_data_rk3328
},
@@ -687,7 +727,7 @@ unreg_notify:
return ret;
}
-static int rockchip_iodomain_remove(struct platform_device *pdev)
+static void rockchip_iodomain_remove(struct platform_device *pdev)
{
struct rockchip_iodomain *iod = platform_get_drvdata(pdev);
int i;
@@ -699,15 +739,13 @@ static int rockchip_iodomain_remove(struct platform_device *pdev)
regulator_unregister_notifier(io_supply->reg,
&io_supply->nb);
}
-
- return 0;
}
static struct platform_driver rockchip_iodomain_driver = {
- .probe = rockchip_iodomain_probe,
- .remove = rockchip_iodomain_remove,
- .driver = {
- .name = "rockchip-iodomain",
+ .probe = rockchip_iodomain_probe,
+ .remove = rockchip_iodomain_remove,
+ .driver = {
+ .name = "rockchip-iodomain",
.of_match_table = rockchip_iodomain_match,
},
};
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index 7a8f291e7704..1a5dfdc978dc 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -42,16 +42,13 @@ config EXYNOS_PMU
depends on ARCH_EXYNOS || ((ARM || ARM64) && COMPILE_TEST)
select EXYNOS_PMU_ARM_DRIVERS if ARM && ARCH_EXYNOS
select MFD_CORE
+ select REGMAP_MMIO
# There is no need to enable these drivers for ARMv8
config EXYNOS_PMU_ARM_DRIVERS
bool "Exynos PMU ARMv7-specific driver extensions" if COMPILE_TEST
depends on EXYNOS_PMU
-config EXYNOS_PM_DOMAINS
- bool "Exynos PM domains" if COMPILE_TEST
- depends on (ARCH_EXYNOS && PM_GENERIC_DOMAINS) || COMPILE_TEST
-
config SAMSUNG_PM_CHECK
bool "S3C2410 PM Suspend Memory CRC"
depends on PM && (ARCH_S3C64XX || ARCH_S5PV210)
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
index 248a33d7754a..636a762608c9 100644
--- a/drivers/soc/samsung/Makefile
+++ b/drivers/soc/samsung/Makefile
@@ -6,7 +6,8 @@ exynos_chipid-y += exynos-chipid.o exynos-asv.o
obj-$(CONFIG_EXYNOS_USI) += exynos-usi.o
-obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o
+obj-$(CONFIG_EXYNOS_PMU) += exynos_pmu.o
+exynos_pmu-y += exynos-pmu.o gs101-pmu.o
obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \
exynos5250-pmu.o exynos5420-pmu.o
diff --git a/drivers/soc/samsung/exynos-asv.c b/drivers/soc/samsung/exynos-asv.c
index d60af8acc391..8e681f519526 100644
--- a/drivers/soc/samsung/exynos-asv.c
+++ b/drivers/soc/samsung/exynos-asv.c
@@ -9,8 +9,10 @@
* Samsung Exynos SoC Adaptive Supply Voltage support
*/
+#include <linux/array_size.h>
#include <linux/cpu.h>
#include <linux/device.h>
+#include <linux/energy_model.h>
#include <linux/errno.h>
#include <linux/of.h>
#include <linux/pm_opp.h>
@@ -97,9 +99,16 @@ static int exynos_asv_update_opps(struct exynos_asv *asv)
last_opp_table = opp_table;
ret = exynos_asv_update_cpu_opps(asv, cpu);
- if (ret < 0)
+ if (!ret) {
+ /*
+ * Update EM power values since OPP
+ * voltage values may have changed.
+ */
+ em_dev_update_chip_binning(cpu);
+ } else {
dev_err(asv->dev, "Couldn't udate OPPs for cpu%d\n",
cpuid);
+ }
}
dev_pm_opp_put_opp_table(opp_table);
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index 7ba45c4aff97..d3b4b5508e0c 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -12,6 +12,7 @@
* Samsung Exynos SoC Adaptive Supply Voltage and Chip ID support
*/
+#include <linux/array_size.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/mfd/syscon.h>
@@ -55,10 +56,18 @@ static const struct exynos_soc_id {
{ "EXYNOS5440", 0xE5440000 },
{ "EXYNOS5800", 0xE5422000 },
{ "EXYNOS7420", 0xE7420000 },
+ { "EXYNOS7870", 0xE7870000 },
+ { "EXYNOS8890", 0xE8890000 },
/* Compatible with: samsung,exynos850-chipid */
+ { "EXYNOS2200", 0xE9925000 },
{ "EXYNOS7885", 0xE7885000 },
{ "EXYNOS850", 0xE3830000 },
+ { "EXYNOS8895", 0xE8895000 },
+ { "EXYNOS9610", 0xE9610000 },
+ { "EXYNOS9810", 0xE9810000 },
+ { "EXYNOS990", 0xE9830000 },
{ "EXYNOSAUTOV9", 0xAAA80000 },
+ { "EXYNOSAUTOV920", 0x0A920000 },
};
static const char *product_id_to_soc_id(unsigned int product_id)
@@ -100,16 +109,17 @@ static int exynos_chipid_probe(struct platform_device *pdev)
const struct exynos_chipid_variant *drv_data;
struct exynos_chipid_info soc_info;
struct soc_device_attribute *soc_dev_attr;
+ struct device *dev = &pdev->dev;
struct soc_device *soc_dev;
struct device_node *root;
struct regmap *regmap;
int ret;
- drv_data = of_device_get_match_data(&pdev->dev);
+ drv_data = of_device_get_match_data(dev);
if (!drv_data)
return -EINVAL;
- regmap = device_node_to_regmap(pdev->dev.of_node);
+ regmap = device_node_to_regmap(dev->of_node);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
@@ -117,8 +127,7 @@ static int exynos_chipid_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr),
- GFP_KERNEL);
+ soc_dev_attr = devm_kzalloc(dev, sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
@@ -128,8 +137,10 @@ static int exynos_chipid_probe(struct platform_device *pdev)
of_property_read_string(root, "model", &soc_dev_attr->machine);
of_node_put(root);
- soc_dev_attr->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "%x", soc_info.revision);
+ soc_dev_attr->revision = devm_kasprintf(dev, GFP_KERNEL, "%x",
+ soc_info.revision);
+ if (!soc_dev_attr->revision)
+ return -ENOMEM;
soc_dev_attr->soc_id = product_id_to_soc_id(soc_info.product_id);
if (!soc_dev_attr->soc_id) {
pr_err("Unknown SoC\n");
@@ -141,13 +152,13 @@ static int exynos_chipid_probe(struct platform_device *pdev)
if (IS_ERR(soc_dev))
return PTR_ERR(soc_dev);
- ret = exynos_asv_init(&pdev->dev, regmap);
+ ret = exynos_asv_init(dev, regmap);
if (ret)
goto err;
platform_set_drvdata(pdev, soc_dev);
- dev_info(&pdev->dev, "Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n",
+ dev_info(dev, "Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n",
soc_dev_attr->soc_id, soc_info.product_id, soc_info.revision);
return 0;
@@ -158,13 +169,11 @@ err:
return ret;
}
-static int exynos_chipid_remove(struct platform_device *pdev)
+static void exynos_chipid_remove(struct platform_device *pdev)
{
struct soc_device *soc_dev = platform_get_drvdata(pdev);
soc_device_unregister(soc_dev);
-
- return 0;
}
static const struct exynos_chipid_variant exynos4210_chipid_drv_data = {
@@ -196,8 +205,8 @@ static struct platform_driver exynos_chipid_driver = {
.name = "exynos-chipid",
.of_match_table = exynos_chipid_of_device_ids,
},
- .probe = exynos_chipid_probe,
- .remove = exynos_chipid_remove,
+ .probe = exynos_chipid_probe,
+ .remove = exynos_chipid_remove,
};
module_platform_driver(exynos_chipid_driver);
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index 250537d7cfd6..d58376c38179 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -5,6 +5,10 @@
//
// Exynos - CPU PMU(Power Management Unit) support
+#include <linux/array_size.h>
+#include <linux/bitmap.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpu_pm.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/mfd/core.h>
@@ -12,6 +16,8 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
@@ -21,10 +27,23 @@
struct exynos_pmu_context {
struct device *dev;
const struct exynos_pmu_data *pmu_data;
+ struct regmap *pmureg;
+ struct regmap *pmuintrgen;
+ /*
+ * Serialization lock for CPU hot plug and cpuidle ACPM hint
+ * programming. Also protects in_cpuhp, sys_insuspend & sys_inreboot
+ * flags.
+ */
+ raw_spinlock_t cpupm_lock;
+ unsigned long *in_cpuhp;
+ bool sys_insuspend;
+ bool sys_inreboot;
};
void __iomem *pmu_base_addr;
static struct exynos_pmu_context *pmu_context;
+/* forward declaration */
+static struct platform_driver exynos_pmu_driver;
void pmu_raw_writel(u32 val, u32 offset)
{
@@ -75,11 +94,36 @@ void exynos_sys_powerdown_conf(enum sys_powerdown mode)
#define exynos_pmu_data_arm_ptr(data) NULL
#endif
+static const struct regmap_config regmap_smccfg = {
+ .name = "pmu_regs",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .use_single_read = true,
+ .use_single_write = true,
+ .reg_read = tensor_sec_reg_read,
+ .reg_write = tensor_sec_reg_write,
+ .reg_update_bits = tensor_sec_update_bits,
+ .use_raw_spinlock = true,
+};
+
+static const struct regmap_config regmap_pmu_intr = {
+ .name = "pmu_intr_gen",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .use_raw_spinlock = true,
+};
+
/*
* PMU platform driver and devicetree bindings.
*/
static const struct of_device_id exynos_pmu_of_device_ids[] = {
{
+ .compatible = "google,gs101-pmu",
+ .data = &gs101_pmu_data,
+ }, {
.compatible = "samsung,exynos3250-pmu",
.data = exynos_pmu_data_arm_ptr(exynos3250_pmu_data),
}, {
@@ -113,19 +157,328 @@ static const struct mfd_cell exynos_pmu_devs[] = {
{ .name = "exynos-clkout", },
};
+/**
+ * exynos_get_pmu_regmap() - Obtain pmureg regmap
+ *
+ * Find the pmureg regmap previously configured in probe() and return regmap
+ * pointer.
+ *
+ * Return: A pointer to regmap if found or ERR_PTR error value.
+ */
struct regmap *exynos_get_pmu_regmap(void)
{
struct device_node *np = of_find_matching_node(NULL,
exynos_pmu_of_device_ids);
if (np)
- return syscon_node_to_regmap(np);
+ return exynos_get_pmu_regmap_by_phandle(np, NULL);
return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
+/**
+ * exynos_get_pmu_regmap_by_phandle() - Obtain pmureg regmap via phandle
+ * @np: Device node holding PMU phandle property
+ * @propname: Name of property holding phandle value
+ *
+ * Find the pmureg regmap previously configured in probe() and return regmap
+ * pointer.
+ *
+ * Return: A pointer to regmap if found or ERR_PTR error value.
+ */
+struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
+ const char *propname)
+{
+ struct device_node *pmu_np;
+ struct device *dev;
+
+ if (propname)
+ pmu_np = of_parse_phandle(np, propname, 0);
+ else
+ pmu_np = np;
+
+ if (!pmu_np)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * Determine if exynos-pmu device has probed and therefore regmap
+ * has been created and can be returned to the caller. Otherwise we
+ * return -EPROBE_DEFER.
+ */
+ dev = driver_find_device_by_of_node(&exynos_pmu_driver.driver,
+ (void *)pmu_np);
+
+ if (propname)
+ of_node_put(pmu_np);
+
+ if (!dev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ put_device(dev);
+
+ return syscon_node_to_regmap(pmu_np);
+}
+EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle);
+
+/*
+ * CPU_INFORM register "hint" values are required to be programmed in addition to
+ * the standard PSCI calls to have functional CPU hotplug and CPU idle states.
+ * This is required to workaround limitations in the el3mon/ACPM firmware.
+ */
+#define CPU_INFORM_CLEAR 0
+#define CPU_INFORM_C2 1
+
+/*
+ * __gs101_cpu_pmu_ prefix functions are common code shared by CPU PM notifiers
+ * (CPUIdle) and CPU hotplug callbacks. Functions should be called with IRQs
+ * disabled and cpupm_lock held.
+ */
+static int __gs101_cpu_pmu_online(unsigned int cpu)
+ __must_hold(&pmu_context->cpupm_lock)
+{
+ unsigned int cpuhint = smp_processor_id();
+ u32 reg, mask;
+
+ /* clear cpu inform hint */
+ regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint),
+ CPU_INFORM_CLEAR);
+
+ mask = BIT(cpu);
+
+ regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE,
+ mask, (0 << cpu));
+
+ regmap_read(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_UPEND, &reg);
+
+ regmap_write(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_CLEAR,
+ reg & mask);
+
+ return 0;
+}
+
+/* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */
+static int gs101_cpu_pmu_online(void)
+{
+ int cpu;
+
+ raw_spin_lock(&pmu_context->cpupm_lock);
+
+ if (pmu_context->sys_inreboot) {
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return NOTIFY_OK;
+ }
+
+ cpu = smp_processor_id();
+ __gs101_cpu_pmu_online(cpu);
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+
+ return NOTIFY_OK;
+}
+
+/* Called from CPU hot plug callback with IRQs enabled */
+static int gs101_cpuhp_pmu_online(unsigned int cpu)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
+
+ __gs101_cpu_pmu_online(cpu);
+ /*
+ * Mark this CPU as having finished the hotplug.
+ * This means this CPU can now enter C2 idle state.
+ */
+ clear_bit(cpu, pmu_context->in_cpuhp);
+ raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
+
+ return 0;
+}
+
+/* Common function shared by both CPU hot plug and CPUIdle */
+static int __gs101_cpu_pmu_offline(unsigned int cpu)
+ __must_hold(&pmu_context->cpupm_lock)
+{
+ unsigned int cpuhint = smp_processor_id();
+ u32 reg, mask;
+
+ /* set cpu inform hint */
+ regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint),
+ CPU_INFORM_C2);
+
+ mask = BIT(cpu);
+ regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE,
+ mask, BIT(cpu));
+
+ regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, &reg);
+ regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR,
+ reg & mask);
+
+ mask = (BIT(cpu + 8));
+ regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, &reg);
+ regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR,
+ reg & mask);
+
+ return 0;
+}
+
+/* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */
+static int gs101_cpu_pmu_offline(void)
+{
+ int cpu;
+
+ raw_spin_lock(&pmu_context->cpupm_lock);
+ cpu = smp_processor_id();
+
+ if (test_bit(cpu, pmu_context->in_cpuhp)) {
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return NOTIFY_BAD;
+ }
+
+ /* Ignore CPU_PM_ENTER event in reboot or suspend sequence. */
+ if (pmu_context->sys_insuspend || pmu_context->sys_inreboot) {
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return NOTIFY_OK;
+ }
+
+ __gs101_cpu_pmu_offline(cpu);
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+
+ return NOTIFY_OK;
+}
+
+/* Called from CPU hot plug callback with IRQs enabled */
+static int gs101_cpuhp_pmu_offline(unsigned int cpu)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
+ /*
+ * Mark this CPU as entering hotplug. So as not to confuse
+ * ACPM the CPU entering hotplug should not enter C2 idle state.
+ */
+ set_bit(cpu, pmu_context->in_cpuhp);
+ __gs101_cpu_pmu_offline(cpu);
+
+ raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
+
+ return 0;
+}
+
+static int gs101_cpu_pm_notify_callback(struct notifier_block *self,
+ unsigned long action, void *v)
+{
+ switch (action) {
+ case CPU_PM_ENTER:
+ return gs101_cpu_pmu_offline();
+
+ case CPU_PM_EXIT:
+ return gs101_cpu_pmu_online();
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block gs101_cpu_pm_notifier = {
+ .notifier_call = gs101_cpu_pm_notify_callback,
+ /*
+ * We want to be called first, as the ACPM hint and handshake is what
+ * puts the CPU into C2.
+ */
+ .priority = INT_MAX
+};
+
+static int exynos_cpupm_reboot_notifier(struct notifier_block *nb,
+ unsigned long event, void *v)
+{
+ unsigned long flags;
+
+ switch (event) {
+ case SYS_POWER_OFF:
+ case SYS_RESTART:
+ raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
+ pmu_context->sys_inreboot = true;
+ raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block exynos_cpupm_reboot_nb = {
+ .priority = INT_MAX,
+ .notifier_call = exynos_cpupm_reboot_notifier,
+};
+
+static int setup_cpuhp_and_cpuidle(struct device *dev)
+{
+ struct device_node *intr_gen_node;
+ struct resource intrgen_res;
+ void __iomem *virt_addr;
+ int ret, cpu;
+
+ intr_gen_node = of_parse_phandle(dev->of_node,
+ "google,pmu-intr-gen-syscon", 0);
+ if (!intr_gen_node) {
+ /*
+ * To maintain support for older DTs that didn't specify syscon
+ * phandle just issue a warning rather than fail to probe.
+ */
+ dev_warn(dev, "pmu-intr-gen syscon unavailable\n");
+ return 0;
+ }
+
+ /*
+ * To avoid lockdep issues (CPU PM notifiers use raw spinlocks) create
+ * a mmio regmap for pmu-intr-gen that uses raw spinlocks instead of
+ * syscon provided regmap.
+ */
+ ret = of_address_to_resource(intr_gen_node, 0, &intrgen_res);
+ of_node_put(intr_gen_node);
+
+ virt_addr = devm_ioremap(dev, intrgen_res.start,
+ resource_size(&intrgen_res));
+ if (!virt_addr)
+ return -ENOMEM;
+
+ pmu_context->pmuintrgen = devm_regmap_init_mmio(dev, virt_addr,
+ &regmap_pmu_intr);
+ if (IS_ERR(pmu_context->pmuintrgen)) {
+ dev_err(dev, "failed to initialize pmu-intr-gen regmap\n");
+ return PTR_ERR(pmu_context->pmuintrgen);
+ }
+
+ /* register custom mmio regmap with syscon */
+ ret = of_syscon_register_regmap(intr_gen_node,
+ pmu_context->pmuintrgen);
+ if (ret)
+ return ret;
+
+ pmu_context->in_cpuhp = devm_bitmap_zalloc(dev, num_possible_cpus(),
+ GFP_KERNEL);
+ if (!pmu_context->in_cpuhp)
+ return -ENOMEM;
+
+ /* set PMU to power on */
+ for_each_online_cpu(cpu)
+ gs101_cpuhp_pmu_online(cpu);
+
+ /* register CPU hotplug callbacks */
+ cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "soc/exynos-pmu:prepare",
+ gs101_cpuhp_pmu_online, NULL);
+
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/exynos-pmu:online",
+ NULL, gs101_cpuhp_pmu_offline);
+
+ /* register CPU PM notifiers for cpuidle */
+ cpu_pm_register_notifier(&gs101_cpu_pm_notifier);
+ register_reboot_notifier(&exynos_cpupm_reboot_nb);
+ return 0;
+}
+
static int exynos_pmu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct regmap_config pmu_regmcfg;
+ struct regmap *regmap;
+ struct resource *res;
int ret;
pmu_base_addr = devm_platform_ioremap_resource(pdev, 0);
@@ -137,9 +490,53 @@ static int exynos_pmu_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!pmu_context)
return -ENOMEM;
- pmu_context->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
pmu_context->pmu_data = of_device_get_match_data(dev);
+ /* For SoCs that secure PMU register writes use custom regmap */
+ if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_secure) {
+ pmu_regmcfg = regmap_smccfg;
+ pmu_regmcfg.max_register = resource_size(res) -
+ pmu_regmcfg.reg_stride;
+ pmu_regmcfg.wr_table = pmu_context->pmu_data->wr_table;
+ pmu_regmcfg.rd_table = pmu_context->pmu_data->rd_table;
+
+ /* Need physical address for SMC call */
+ regmap = devm_regmap_init(dev, NULL,
+ (void *)(uintptr_t)res->start,
+ &pmu_regmcfg);
+
+ if (IS_ERR(regmap))
+ return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
+ "regmap init failed\n");
+
+ ret = of_syscon_register_regmap(dev->of_node, regmap);
+ if (ret)
+ return ret;
+ } else {
+ /* let syscon create mmio regmap */
+ regmap = syscon_node_to_regmap(dev->of_node);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
+ "syscon_node_to_regmap failed\n");
+ }
+
+ pmu_context->pmureg = regmap;
+ pmu_context->dev = dev;
+ raw_spin_lock_init(&pmu_context->cpupm_lock);
+ pmu_context->sys_inreboot = false;
+ pmu_context->sys_insuspend = false;
+
+ if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_cpuhp) {
+ ret = setup_cpuhp_and_cpuidle(dev);
+ if (ret)
+ return ret;
+ }
+
if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init)
pmu_context->pmu_data->pmu_init();
@@ -157,10 +554,32 @@ static int exynos_pmu_probe(struct platform_device *pdev)
return 0;
}
+static int exynos_cpupm_suspend_noirq(struct device *dev)
+{
+ raw_spin_lock(&pmu_context->cpupm_lock);
+ pmu_context->sys_insuspend = true;
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return 0;
+}
+
+static int exynos_cpupm_resume_noirq(struct device *dev)
+{
+ raw_spin_lock(&pmu_context->cpupm_lock);
+ pmu_context->sys_insuspend = false;
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return 0;
+}
+
+static const struct dev_pm_ops cpupm_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_cpupm_suspend_noirq,
+ exynos_cpupm_resume_noirq)
+};
+
static struct platform_driver exynos_pmu_driver = {
.driver = {
.name = "exynos-pmu",
.of_match_table = exynos_pmu_of_device_ids,
+ .pm = pm_sleep_ptr(&cpupm_pm_ops),
},
.probe = exynos_pmu_probe,
};
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h
index 1c652ffd79b4..fbe381e2a2e1 100644
--- a/drivers/soc/samsung/exynos-pmu.h
+++ b/drivers/soc/samsung/exynos-pmu.h
@@ -13,18 +13,50 @@
#define PMU_TABLE_END (-1U)
+struct regmap_access_table;
+
struct exynos_pmu_conf {
unsigned int offset;
u8 val[NUM_SYS_POWERDOWN];
};
+/**
+ * struct exynos_pmu_data - of_device_id (match) data
+ *
+ * @pmu_config: Optional table detailing register writes for target system
+ * states: SYS_AFTR, SYS_LPA, SYS_SLEEP.
+ * @pmu_config_extra: Optional secondary table detailing additional register
+ * writes for target system states: SYS_AFTR, SYS_LPA,
+ * SYS_SLEEP.
+ * @pmu_secure: Whether or not PMU register writes need to be done via SMC call.
+ * @pmu_cpuhp: Whether or not extra handling is required for CPU hotplug and
+ * CPUidle outside of standard PSCI calls, due to non-compliant
+ * firmware.
+ * @pmu_init: Optional init function.
+ * @powerdown_conf: Optional callback before entering target system states:
+ * SYS_AFTR, SYS_LPA, SYS_SLEEP. This will be invoked before
+ * the registers from @pmu_config are written.
+ * @powerdown_conf_extra: Optional secondary callback before entering
+ * target system states: SYS_AFTR, SYS_LPA, SYS_SLEEP.
+ * This will be invoked after @pmu_config registers have
+ * been written.
+ * @rd_table: A table of readable register ranges in case a custom regmap is
+ * used (i.e. when @pmu_secure is @true).
+ * @wr_table: A table of writable register ranges in case a custom regmap is
+ * used (i.e. when @pmu_secure is @true).
+ */
struct exynos_pmu_data {
const struct exynos_pmu_conf *pmu_config;
const struct exynos_pmu_conf *pmu_config_extra;
+ bool pmu_secure;
+ bool pmu_cpuhp;
void (*pmu_init)(void);
void (*powerdown_conf)(enum sys_powerdown);
void (*powerdown_conf_extra)(enum sys_powerdown);
+
+ const struct regmap_access_table *rd_table;
+ const struct regmap_access_table *wr_table;
};
extern void __iomem *pmu_base_addr;
@@ -38,7 +70,14 @@ extern const struct exynos_pmu_data exynos4412_pmu_data;
extern const struct exynos_pmu_data exynos5250_pmu_data;
extern const struct exynos_pmu_data exynos5420_pmu_data;
#endif
+extern const struct exynos_pmu_data gs101_pmu_data;
extern void pmu_raw_writel(u32 val, u32 offset);
extern u32 pmu_raw_readl(u32 offset);
+
+int tensor_sec_reg_write(void *context, unsigned int reg, unsigned int val);
+int tensor_sec_reg_read(void *context, unsigned int reg, unsigned int *val);
+int tensor_sec_update_bits(void *context, unsigned int reg, unsigned int mask,
+ unsigned int val);
+
#endif /* __EXYNOS_PMU_H */
diff --git a/drivers/soc/samsung/exynos-usi.c b/drivers/soc/samsung/exynos-usi.c
index 114352695ac2..5f7bdf3bab05 100644
--- a/drivers/soc/samsung/exynos-usi.c
+++ b/drivers/soc/samsung/exynos-usi.c
@@ -6,6 +6,7 @@
* Samsung Exynos USI driver (Universal Serial Interface).
*/
+#include <linux/array_size.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -16,6 +17,18 @@
#include <dt-bindings/soc/samsung,exynos-usi.h>
+/* USIv1: System Register: SW_CONF register bits */
+#define USI_V1_SW_CONF_NONE 0x0
+#define USI_V1_SW_CONF_I2C0 0x1
+#define USI_V1_SW_CONF_I2C1 0x2
+#define USI_V1_SW_CONF_I2C0_1 0x3
+#define USI_V1_SW_CONF_SPI 0x4
+#define USI_V1_SW_CONF_UART 0x8
+#define USI_V1_SW_CONF_UART_I2C1 0xa
+#define USI_V1_SW_CONF_MASK (USI_V1_SW_CONF_I2C0 | USI_V1_SW_CONF_I2C1 | \
+ USI_V1_SW_CONF_I2C0_1 | USI_V1_SW_CONF_SPI | \
+ USI_V1_SW_CONF_UART | USI_V1_SW_CONF_UART_I2C1)
+
/* USIv2: System Register: SW_CONF register bits */
#define USI_V2_SW_CONF_NONE 0x0
#define USI_V2_SW_CONF_UART BIT(0)
@@ -34,7 +47,8 @@
#define USI_OPTION_CLKSTOP_ON BIT(2)
enum exynos_usi_ver {
- USI_VER2 = 2,
+ USI_VER1 = 0,
+ USI_VER2,
};
struct exynos_usi_variant {
@@ -66,19 +80,39 @@ struct exynos_usi_mode {
unsigned int val; /* mode register value */
};
-static const struct exynos_usi_mode exynos_usi_modes[] = {
- [USI_V2_NONE] = { .name = "none", .val = USI_V2_SW_CONF_NONE },
- [USI_V2_UART] = { .name = "uart", .val = USI_V2_SW_CONF_UART },
- [USI_V2_SPI] = { .name = "spi", .val = USI_V2_SW_CONF_SPI },
- [USI_V2_I2C] = { .name = "i2c", .val = USI_V2_SW_CONF_I2C },
+#define USI_MODES_MAX (USI_MODE_UART_I2C1 + 1)
+static const struct exynos_usi_mode exynos_usi_modes[][USI_MODES_MAX] = {
+ [USI_VER1] = {
+ [USI_MODE_NONE] = { .name = "none", .val = USI_V1_SW_CONF_NONE },
+ [USI_MODE_UART] = { .name = "uart", .val = USI_V1_SW_CONF_UART },
+ [USI_MODE_SPI] = { .name = "spi", .val = USI_V1_SW_CONF_SPI },
+ [USI_MODE_I2C] = { .name = "i2c", .val = USI_V1_SW_CONF_I2C0 },
+ [USI_MODE_I2C1] = { .name = "i2c1", .val = USI_V1_SW_CONF_I2C1 },
+ [USI_MODE_I2C0_1] = { .name = "i2c0_1", .val = USI_V1_SW_CONF_I2C0_1 },
+ [USI_MODE_UART_I2C1] = { .name = "uart_i2c1", .val = USI_V1_SW_CONF_UART_I2C1 },
+ }, [USI_VER2] = {
+ [USI_MODE_NONE] = { .name = "none", .val = USI_V2_SW_CONF_NONE },
+ [USI_MODE_UART] = { .name = "uart", .val = USI_V2_SW_CONF_UART },
+ [USI_MODE_SPI] = { .name = "spi", .val = USI_V2_SW_CONF_SPI },
+ [USI_MODE_I2C] = { .name = "i2c", .val = USI_V2_SW_CONF_I2C },
+ },
};
static const char * const exynos850_usi_clk_names[] = { "pclk", "ipclk" };
static const struct exynos_usi_variant exynos850_usi_data = {
.ver = USI_VER2,
.sw_conf_mask = USI_V2_SW_CONF_MASK,
- .min_mode = USI_V2_NONE,
- .max_mode = USI_V2_I2C,
+ .min_mode = USI_MODE_NONE,
+ .max_mode = USI_MODE_I2C,
+ .num_clks = ARRAY_SIZE(exynos850_usi_clk_names),
+ .clk_names = exynos850_usi_clk_names,
+};
+
+static const struct exynos_usi_variant exynos8895_usi_data = {
+ .ver = USI_VER1,
+ .sw_conf_mask = USI_V1_SW_CONF_MASK,
+ .min_mode = USI_MODE_NONE,
+ .max_mode = USI_MODE_UART_I2C1,
.num_clks = ARRAY_SIZE(exynos850_usi_clk_names),
.clk_names = exynos850_usi_clk_names,
};
@@ -87,6 +121,9 @@ static const struct of_device_id exynos_usi_dt_match[] = {
{
.compatible = "samsung,exynos850-usi",
.data = &exynos850_usi_data,
+ }, {
+ .compatible = "samsung,exynos8895-usi",
+ .data = &exynos8895_usi_data,
},
{ } /* sentinel */
};
@@ -109,14 +146,15 @@ static int exynos_usi_set_sw_conf(struct exynos_usi *usi, size_t mode)
if (mode < usi->data->min_mode || mode > usi->data->max_mode)
return -EINVAL;
- val = exynos_usi_modes[mode].val;
+ val = exynos_usi_modes[usi->data->ver][mode].val;
ret = regmap_update_bits(usi->sysreg, usi->sw_conf,
usi->data->sw_conf_mask, val);
if (ret)
return ret;
usi->mode = mode;
- dev_dbg(usi->dev, "protocol: %s\n", exynos_usi_modes[usi->mode].name);
+ dev_dbg(usi->dev, "protocol: %s\n",
+ exynos_usi_modes[usi->data->ver][usi->mode].name);
return 0;
}
@@ -168,10 +206,42 @@ static int exynos_usi_configure(struct exynos_usi *usi)
if (ret)
return ret;
- if (usi->data->ver == USI_VER2)
- return exynos_usi_enable(usi);
+ if (usi->data->ver == USI_VER1)
+ ret = clk_bulk_prepare_enable(usi->data->num_clks,
+ usi->clks);
+ else if (usi->data->ver == USI_VER2)
+ ret = exynos_usi_enable(usi);
- return 0;
+ return ret;
+}
+
+static void exynos_usi_unconfigure(void *data)
+{
+ struct exynos_usi *usi = data;
+ u32 val;
+ int ret;
+
+ if (usi->data->ver == USI_VER1) {
+ clk_bulk_disable_unprepare(usi->data->num_clks, usi->clks);
+ return;
+ }
+
+ ret = clk_bulk_prepare_enable(usi->data->num_clks, usi->clks);
+ if (ret)
+ return;
+
+ /* Make sure that we've stopped providing the clock to USI IP */
+ val = readl(usi->regs + USI_OPTION);
+ val &= ~USI_OPTION_CLKREQ_ON;
+ val |= USI_OPTION_CLKSTOP_ON;
+ writel(val, usi->regs + USI_OPTION);
+
+ /* Set USI block state to reset */
+ val = readl(usi->regs + USI_CON);
+ val |= USI_CON_RESET;
+ writel(val, usi->regs + USI_CON);
+
+ clk_bulk_disable_unprepare(usi->data->num_clks, usi->clks);
}
static int exynos_usi_parse_dt(struct device_node *np, struct exynos_usi *usi)
@@ -186,15 +256,11 @@ static int exynos_usi_parse_dt(struct device_node *np, struct exynos_usi *usi)
return -EINVAL;
usi->mode = mode;
- usi->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg");
+ usi->sysreg = syscon_regmap_lookup_by_phandle_args(np, "samsung,sysreg",
+ 1, &usi->sw_conf);
if (IS_ERR(usi->sysreg))
return PTR_ERR(usi->sysreg);
- ret = of_property_read_u32_index(np, "samsung,sysreg", 1,
- &usi->sw_conf);
- if (ret)
- return ret;
-
usi->clkreq_on = of_property_read_bool(np, "samsung,clkreq-on");
return 0;
@@ -255,6 +321,10 @@ static int exynos_usi_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&pdev->dev, exynos_usi_unconfigure, usi);
+ if (ret)
+ return ret;
+
/* Make it possible to embed protocol nodes into USI np */
return of_platform_populate(np, NULL, NULL, dev);
}
diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c
index 30f230ed1769..4bad12a99542 100644
--- a/drivers/soc/samsung/exynos3250-pmu.c
+++ b/drivers/soc/samsung/exynos3250-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos3250 - CPU PMU (Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5250-pmu.c b/drivers/soc/samsung/exynos5250-pmu.c
index 7a2d50be6b4a..2ae5c3e1b07a 100644
--- a/drivers/soc/samsung/exynos5250-pmu.c
+++ b/drivers/soc/samsung/exynos5250-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos5250 - CPU PMU (Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c
index 6fedcd78cb45..58a2209795f7 100644
--- a/drivers/soc/samsung/exynos5420-pmu.c
+++ b/drivers/soc/samsung/exynos5420-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos5420 - CPU PMU (Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/pm.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/gs101-pmu.c b/drivers/soc/samsung/gs101-pmu.c
new file mode 100644
index 000000000000..17dadc1b9c6e
--- /dev/null
+++ b/drivers/soc/samsung/gs101-pmu.c
@@ -0,0 +1,446 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 Linaro Ltd.
+ *
+ * GS101 PMU (Power Management Unit) support
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/array_size.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/regmap.h>
+
+#include "exynos-pmu.h"
+
+#define PMUALIVE_MASK GENMASK(13, 0)
+#define TENSOR_SET_BITS (BIT(15) | BIT(14))
+#define TENSOR_CLR_BITS BIT(15)
+#define TENSOR_SMC_PMU_SEC_REG 0x82000504
+#define TENSOR_PMUREG_READ 0
+#define TENSOR_PMUREG_WRITE 1
+#define TENSOR_PMUREG_RMW 2
+
+static const struct regmap_range gs101_pmu_registers[] = {
+ regmap_reg_range(GS101_OM_STAT, GS101_SYSTEM_INFO),
+ regmap_reg_range(GS101_IDLE_IP(0), GS101_IDLE_IP_MASK(3)),
+ regmap_reg_range(GS101_DATARAM_STATE_SLC_CH(0),
+ GS101_PPMPURAM_INFORM_SCL_CH(3)),
+ regmap_reg_range(GS101_INFORM0, GS101_SYSIP_DAT(0)),
+ /* skip SYSIP_DAT1 SYSIP_DAT2 */
+ regmap_reg_range(GS101_SYSIP_DAT(3), GS101_PWR_HOLD_SW_TRIP),
+ regmap_reg_range(GS101_GSA_INFORM(0), GS101_GSA_INFORM(1)),
+ regmap_reg_range(GS101_INFORM4, GS101_IROM_INFORM),
+ regmap_reg_range(GS101_IROM_CPU_INFORM(0), GS101_IROM_CPU_INFORM(7)),
+ regmap_reg_range(GS101_PMU_SPARE(0), GS101_PMU_SPARE(3)),
+ /* skip most IROM_xxx registers */
+ regmap_reg_range(GS101_DREX_CALIBRATION(0), GS101_DREX_CALIBRATION(7)),
+
+#define CLUSTER_CPU_RANGE(cl, cpu) \
+ regmap_reg_range(GS101_CLUSTER_CPU_CONFIGURATION(cl, cpu), \
+ GS101_CLUSTER_CPU_OPTION(cl, cpu)), \
+ regmap_reg_range(GS101_CLUSTER_CPU_OUT(cl, cpu), \
+ GS101_CLUSTER_CPU_IN(cl, cpu)), \
+ regmap_reg_range(GS101_CLUSTER_CPU_INT_IN(cl, cpu), \
+ GS101_CLUSTER_CPU_INT_DIR(cl, cpu))
+
+ /* cluster 0..2 and cpu 0..4 or 0..1 */
+ CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 0),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 1),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 2),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 3),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER1_OFFSET, 0),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER1_OFFSET, 1),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER2_OFFSET, 0),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER2_OFFSET, 1),
+#undef CLUSTER_CPU_RANGE
+
+#define CLUSTER_NONCPU_RANGE(cl) \
+ regmap_reg_range(GS101_CLUSTER_NONCPU_CONFIGURATION(cl), \
+ GS101_CLUSTER_NONCPU_OPTION(cl)), \
+ regmap_reg_range(GS101_CLUSTER_NONCPU_OUT(cl), \
+ GS101_CLUSTER_NONCPU_IN(cl)), \
+ regmap_reg_range(GS101_CLUSTER_NONCPU_INT_IN(cl), \
+ GS101_CLUSTER_NONCPU_INT_DIR(cl)), \
+ regmap_reg_range(GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_OUT(cl), \
+ GS101_CLUSTER_NONCPU_DUALRAIL_POS_OUT(cl)), \
+ regmap_reg_range(GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_IN(cl), \
+ GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_IN(cl))
+
+ CLUSTER_NONCPU_RANGE(0),
+ regmap_reg_range(GS101_CLUSTER0_NONCPU_DSU_PCH,
+ GS101_CLUSTER0_NONCPU_DSU_PCH),
+ CLUSTER_NONCPU_RANGE(1),
+ CLUSTER_NONCPU_RANGE(2),
+#undef CLUSTER_NONCPU_RANGE
+
+#define SUBBLK_RANGE(blk) \
+ regmap_reg_range(GS101_SUBBLK_CONFIGURATION(blk), \
+ GS101_SUBBLK_CTRL(blk)), \
+ regmap_reg_range(GS101_SUBBLK_OUT(blk), GS101_SUBBLK_IN(blk)), \
+ regmap_reg_range(GS101_SUBBLK_INT_IN(blk), \
+ GS101_SUBBLK_INT_DIR(blk)), \
+ regmap_reg_range(GS101_SUBBLK_MEMORY_OUT(blk), \
+ GS101_SUBBLK_MEMORY_IN(blk))
+
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_ALIVE),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_AOC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_APM),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CMU),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BUS0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BUS1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BUS2),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CORE),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_EH),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CPUCL0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CPUCL1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CPUCL2),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_G3D),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_EMBEDDED_CPUCL0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_EMBEDDED_G3D),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_HSI0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_HSI1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_HSI2),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_DPU),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_DISP),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_G2D),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MFC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CSIS),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_PDP),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_DNS),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_G3AA),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_IPP),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_ITP),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MCSC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_GDC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_TNR),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BO),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_TPU),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF2),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF3),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MISC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_PERIC0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_PERIC1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_S2D),
+#undef SUBBLK_RANGE
+
+#define SUBBLK_CPU_RANGE(blk) \
+ regmap_reg_range(GS101_SUBBLK_CPU_CONFIGURATION(blk), \
+ GS101_SUBBLK_CPU_OPTION(blk)), \
+ regmap_reg_range(GS101_SUBBLK_CPU_OUT(blk), \
+ GS101_SUBBLK_CPU_IN(blk)), \
+ regmap_reg_range(GS101_SUBBLK_CPU_INT_IN(blk), \
+ GS101_SUBBLK_CPU_INT_DIR(blk))
+
+ SUBBLK_CPU_RANGE(GS101_SUBBBLK_CPU_OFFSET_APM),
+ SUBBLK_CPU_RANGE(GS101_SUBBBLK_CPU_OFFSET_DBGCORE),
+ SUBBLK_CPU_RANGE(GS101_SUBBBLK_CPU_OFFSET_SSS),
+#undef SUBBLK_CPU_RANGE
+
+ regmap_reg_range(GS101_MIF_CONFIGURATION, GS101_MIF_CTRL),
+ regmap_reg_range(GS101_MIF_OUT, GS101_MIF_IN),
+ regmap_reg_range(GS101_MIF_INT_IN, GS101_MIF_INT_DIR),
+ regmap_reg_range(GS101_TOP_CONFIGURATION, GS101_TOP_OPTION),
+ regmap_reg_range(GS101_TOP_OUT, GS101_TOP_IN),
+ regmap_reg_range(GS101_TOP_INT_IN, GS101_WAKEUP2_STAT),
+ regmap_reg_range(GS101_WAKEUP2_INT_IN, GS101_WAKEUP2_INT_DIR),
+ regmap_reg_range(GS101_SYSTEM_CONFIGURATION, GS101_USER_DEFINED_OUT),
+ regmap_reg_range(GS101_SYSTEM_OUT, GS101_SYSTEM_IN),
+ regmap_reg_range(GS101_SYSTEM_INT_IN, GS101_EINT_WAKEUP_MASK3),
+ regmap_reg_range(GS101_USER_DEFINED_INT_IN, GS101_SCAN2DRAM_INT_DIR),
+ /* skip HCU_START */
+ regmap_reg_range(GS101_CUSTOM_OUT, GS101_CUSTOM_IN),
+ regmap_reg_range(GS101_CUSTOM_INT_IN, GS101_CUSTOM_INT_DIR),
+ regmap_reg_range(GS101_ACK_LAST_CPU, GS101_HCU_R(3)),
+ regmap_reg_range(GS101_HCU_SP, GS101_HCU_PC),
+ /* skip PMU_RAM_CTRL */
+ regmap_reg_range(GS101_APM_HCU_CTRL, GS101_APM_HCU_CTRL),
+ regmap_reg_range(GS101_APM_NMI_ENABLE, GS101_RST_STAT_PMU),
+ regmap_reg_range(GS101_HPM_INT_IN, GS101_BOOT_STAT),
+ regmap_reg_range(GS101_PMLINK_OUT, GS101_PMLINK_AOC_CTRL),
+ regmap_reg_range(GS101_TCXO_BUF_CTRL, GS101_ADD_CTRL),
+ regmap_reg_range(GS101_HCU_TIMEOUT_RESET, GS101_HCU_TIMEOUT_SCAN2DRAM),
+ regmap_reg_range(GS101_TIMER(0), GS101_TIMER(3)),
+ regmap_reg_range(GS101_PPC_MIF(0), GS101_PPC_EH),
+ /* PPC_OFFSET, skip PPC_CPUCL1_0 PPC_CPUCL1_1 */
+ regmap_reg_range(GS101_EXT_REGULATOR_MIF_DURATION, GS101_TCXO_DURATION),
+ regmap_reg_range(GS101_BURNIN_CTRL, GS101_TMU_SUB_TRIP),
+ regmap_reg_range(GS101_MEMORY_CEN, GS101_MEMORY_SMX_FEEDBACK),
+ regmap_reg_range(GS101_SLC_PCH_CHANNEL, GS101_SLC_PCH_CB),
+ regmap_reg_range(GS101_FORCE_NOMC, GS101_FORCE_NOMC),
+ regmap_reg_range(GS101_FORCE_BOOST, GS101_PMLINK_SLC_BUSY),
+ regmap_reg_range(GS101_BOOTSYNC_OUT, GS101_CTRL_SECJTAG_ALIVE),
+ regmap_reg_range(GS101_CTRL_DIV_PLL_ALV_DIVLOW, GS101_CTRL_CLKDIV__CLKRTC),
+ regmap_reg_range(GS101_CTRL_SOC32K, GS101_CTRL_SBU_SW_EN),
+ regmap_reg_range(GS101_PAD_CTRL_CLKOUT0, GS101_PAD_CTRL_WRESETO_n),
+ regmap_reg_range(GS101_PHY_CTRL_USB20, GS101_PHY_CTRL_UFS),
+};
+
+static const struct regmap_range gs101_pmu_ro_registers[] = {
+ regmap_reg_range(GS101_OM_STAT, GS101_VERSION),
+ regmap_reg_range(GS101_OTP_STATUS, GS101_OTP_STATUS),
+
+ regmap_reg_range(GS101_DATARAM_STATE_SLC_CH(0),
+ GS101_PPMPURAM_STATE_SLC_CH(0)),
+ regmap_reg_range(GS101_DATARAM_STATE_SLC_CH(1),
+ GS101_PPMPURAM_STATE_SLC_CH(1)),
+ regmap_reg_range(GS101_DATARAM_STATE_SLC_CH(2),
+ GS101_PPMPURAM_STATE_SLC_CH(2)),
+ regmap_reg_range(GS101_DATARAM_STATE_SLC_CH(3),
+ GS101_PPMPURAM_STATE_SLC_CH(3)),
+
+#define CLUSTER_CPU_RANGE(cl, cpu) \
+ regmap_reg_range(GS101_CLUSTER_CPU_IN(cl, cpu), \
+ GS101_CLUSTER_CPU_IN(cl, cpu)), \
+ regmap_reg_range(GS101_CLUSTER_CPU_INT_IN(cl, cpu), \
+ GS101_CLUSTER_CPU_INT_IN(cl, cpu))
+
+ CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 0),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 1),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 2),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 3),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER1_OFFSET, 0),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER1_OFFSET, 1),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER2_OFFSET, 0),
+ CLUSTER_CPU_RANGE(GS101_CLUSTER2_OFFSET, 1),
+#undef CLUSTER_CPU_RANGE
+
+#define CLUSTER_NONCPU_RANGE(cl) \
+ regmap_reg_range(GS101_CLUSTER_NONCPU_IN(cl), \
+ GS101_CLUSTER_NONCPU_IN(cl)), \
+ regmap_reg_range(GS101_CLUSTER_NONCPU_INT_IN(cl), \
+ GS101_CLUSTER_NONCPU_INT_IN(cl)), \
+ regmap_reg_range(GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_IN(cl), \
+ GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_IN(cl))
+
+ CLUSTER_NONCPU_RANGE(0),
+ CLUSTER_NONCPU_RANGE(1),
+ CLUSTER_NONCPU_RANGE(2),
+ regmap_reg_range(GS101_CLUSTER_NONCPU_INT_EN(2),
+ GS101_CLUSTER_NONCPU_INT_DIR(2)),
+#undef CLUSTER_NONCPU_RANGE
+
+#define SUBBLK_RANGE(blk) \
+ regmap_reg_range(GS101_SUBBLK_IN(blk), GS101_SUBBLK_IN(blk)), \
+ regmap_reg_range(GS101_SUBBLK_INT_IN(blk), \
+ GS101_SUBBLK_INT_IN(blk)), \
+ regmap_reg_range(GS101_SUBBLK_MEMORY_IN(blk), \
+ GS101_SUBBLK_MEMORY_IN(blk))
+
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_ALIVE),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_AOC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_APM),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CMU),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BUS0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BUS1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BUS2),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CORE),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_EH),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CPUCL0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CPUCL1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CPUCL2),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_G3D),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_EMBEDDED_CPUCL0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_EMBEDDED_G3D),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_HSI0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_HSI1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_HSI2),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_DPU),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_DISP),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_G2D),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MFC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CSIS),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_PDP),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_DNS),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_G3AA),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_IPP),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_ITP),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MCSC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_GDC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_TNR),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BO),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_TPU),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF2),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF3),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MISC),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_PERIC0),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_PERIC1),
+ SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_S2D),
+#undef SUBBLK_RANGE
+
+#define SUBBLK_CPU_RANGE(blk) \
+ regmap_reg_range(GS101_SUBBLK_CPU_IN(blk), \
+ GS101_SUBBLK_CPU_IN(blk)), \
+ regmap_reg_range(GS101_SUBBLK_CPU_INT_IN(blk), \
+ GS101_SUBBLK_CPU_INT_IN(blk))
+
+ SUBBLK_CPU_RANGE(GS101_SUBBBLK_CPU_OFFSET_APM),
+ SUBBLK_CPU_RANGE(GS101_SUBBBLK_CPU_OFFSET_DBGCORE),
+ SUBBLK_CPU_RANGE(GS101_SUBBBLK_CPU_OFFSET_SSS),
+#undef SUBBLK_CPU_RANGE
+
+ regmap_reg_range(GS101_MIF_CONFIGURATION, GS101_MIF_CONFIGURATION),
+ regmap_reg_range(GS101_MIF_IN, GS101_MIF_IN),
+ regmap_reg_range(GS101_MIF_INT_IN, GS101_MIF_INT_IN),
+ regmap_reg_range(GS101_TOP_IN, GS101_TOP_IN),
+ regmap_reg_range(GS101_TOP_INT_IN, GS101_TOP_INT_IN),
+ regmap_reg_range(GS101_WAKEUP2_INT_IN, GS101_WAKEUP2_INT_IN),
+ regmap_reg_range(GS101_SYSTEM_IN, GS101_SYSTEM_IN),
+ regmap_reg_range(GS101_SYSTEM_INT_IN, GS101_SYSTEM_INT_IN),
+ regmap_reg_range(GS101_EINT_INT_IN, GS101_EINT_INT_IN),
+ regmap_reg_range(GS101_EINT2_INT_IN, GS101_EINT2_INT_IN),
+ regmap_reg_range(GS101_EINT3_INT_IN, GS101_EINT3_INT_IN),
+ regmap_reg_range(GS101_USER_DEFINED_INT_IN, GS101_USER_DEFINED_INT_IN),
+ regmap_reg_range(GS101_SCAN2DRAM_INT_IN, GS101_SCAN2DRAM_INT_IN),
+ regmap_reg_range(GS101_CUSTOM_IN, GS101_CUSTOM_IN),
+ regmap_reg_range(GS101_CUSTOM_INT_IN, GS101_CUSTOM_INT_IN),
+ regmap_reg_range(GS101_HCU_R(0), GS101_HCU_R(3)),
+ regmap_reg_range(GS101_HCU_SP, GS101_HCU_PC),
+ regmap_reg_range(GS101_NMI_SRC_IN, GS101_NMI_SRC_IN),
+ regmap_reg_range(GS101_HPM_INT_IN, GS101_HPM_INT_IN),
+ regmap_reg_range(GS101_MEMORY_PGEN_FEEDBACK, GS101_MEMORY_PGEN_FEEDBACK),
+ regmap_reg_range(GS101_MEMORY_SMX_FEEDBACK, GS101_MEMORY_SMX_FEEDBACK),
+ regmap_reg_range(GS101_PMLINK_SLC_ACK, GS101_PMLINK_SLC_BUSY),
+ regmap_reg_range(GS101_BOOTSYNC_IN, GS101_BOOTSYNC_IN),
+ regmap_reg_range(GS101_SCAN_READY_IN, GS101_SCAN_READY_IN),
+ regmap_reg_range(GS101_CTRL_PLL_ALV_LOCK, GS101_CTRL_PLL_ALV_LOCK),
+};
+
+static const struct regmap_access_table gs101_pmu_rd_table = {
+ .yes_ranges = gs101_pmu_registers,
+ .n_yes_ranges = ARRAY_SIZE(gs101_pmu_registers),
+};
+
+static const struct regmap_access_table gs101_pmu_wr_table = {
+ .yes_ranges = gs101_pmu_registers,
+ .n_yes_ranges = ARRAY_SIZE(gs101_pmu_registers),
+ .no_ranges = gs101_pmu_ro_registers,
+ .n_no_ranges = ARRAY_SIZE(gs101_pmu_ro_registers),
+};
+
+const struct exynos_pmu_data gs101_pmu_data = {
+ .pmu_secure = true,
+ .pmu_cpuhp = true,
+ .rd_table = &gs101_pmu_rd_table,
+ .wr_table = &gs101_pmu_wr_table,
+};
+
+/*
+ * Tensor SoCs are configured so that PMU_ALIVE registers can only be written
+ * from EL3, but are still read accessible. As Linux needs to write some of
+ * these registers, the following functions are provided and exposed via
+ * regmap.
+ *
+ * Note: This SMC interface is known to be implemented on gs101 and derivative
+ * SoCs.
+ */
+
+/* Write to a protected PMU register. */
+int tensor_sec_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct arm_smccc_res res;
+ unsigned long pmu_base = (unsigned long)context;
+
+ arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
+ TENSOR_PMUREG_WRITE, val, 0, 0, 0, 0, &res);
+
+ /* returns -EINVAL if access isn't allowed or 0 */
+ if (res.a0)
+ pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
+
+ return (int)res.a0;
+}
+
+/* Read/Modify/Write a protected PMU register. */
+static int tensor_sec_reg_rmw(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ struct arm_smccc_res res;
+ unsigned long pmu_base = (unsigned long)context;
+
+ arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
+ TENSOR_PMUREG_RMW, mask, val, 0, 0, 0, &res);
+
+ /* returns -EINVAL if access isn't allowed or 0 */
+ if (res.a0)
+ pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
+
+ return (int)res.a0;
+}
+
+/*
+ * Read a protected PMU register. All PMU registers can be read by Linux.
+ * Note: The SMC read register is not used, as only registers that can be
+ * written are readable via SMC.
+ */
+int tensor_sec_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ *val = pmu_raw_readl(reg);
+ return 0;
+}
+
+/*
+ * For SoCs that have set/clear bit hardware this function can be used when
+ * the PMU register will be accessed by multiple masters.
+ *
+ * For example, to set bits 13:8 in PMU reg offset 0x3e80
+ * tensor_set_bits_atomic(ctx, 0x3e80, 0x3f00, 0x3f00);
+ *
+ * Set bit 8, and clear bits 13:9 PMU reg offset 0x3e80
+ * tensor_set_bits_atomic(0x3e80, 0x100, 0x3f00);
+ */
+static int tensor_set_bits_atomic(void *context, unsigned int offset, u32 val,
+ u32 mask)
+{
+ int ret;
+ unsigned int i;
+
+ for (i = 0; i < 32; i++) {
+ if (!(mask & BIT(i)))
+ continue;
+
+ offset &= ~TENSOR_SET_BITS;
+
+ if (val & BIT(i))
+ offset |= TENSOR_SET_BITS;
+ else
+ offset |= TENSOR_CLR_BITS;
+
+ ret = tensor_sec_reg_write(context, offset, i);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static bool tensor_is_atomic(unsigned int reg)
+{
+ /*
+ * Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF)
+ * as the target registers can be accessed by multiple masters. SFRs
+ * that don't support atomic are added to the switch statement below.
+ */
+ if (reg > PMUALIVE_MASK)
+ return false;
+
+ switch (reg) {
+ case GS101_SYSIP_DAT(0):
+ case GS101_SYSTEM_CONFIGURATION:
+ return false;
+ default:
+ return true;
+ }
+}
+
+int tensor_sec_update_bits(void *context, unsigned int reg, unsigned int mask,
+ unsigned int val)
+{
+ if (!tensor_is_atomic(reg))
+ return tensor_sec_reg_rmw(context, reg, mask, val);
+
+ return tensor_set_bits_atomic(context, reg, val, mask);
+}
diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig
deleted file mode 100644
index e86870be34c9..000000000000
--- a/drivers/soc/sifive/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-if SOC_SIFIVE || SOC_STARFIVE
-
-config SIFIVE_CCACHE
- bool "Sifive Composable Cache controller"
- help
- Support for the composable cache controller on SiFive platforms.
-
-endif
diff --git a/drivers/soc/sifive/Makefile b/drivers/soc/sifive/Makefile
deleted file mode 100644
index 1f5dc339bf82..000000000000
--- a/drivers/soc/sifive/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o
diff --git a/drivers/soc/sifive/sifive_ccache.c b/drivers/soc/sifive/sifive_ccache.c
deleted file mode 100644
index 3684f5b40a80..000000000000
--- a/drivers/soc/sifive/sifive_ccache.c
+++ /dev/null
@@ -1,272 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * SiFive composable cache controller Driver
- *
- * Copyright (C) 2018-2022 SiFive, Inc.
- *
- */
-
-#define pr_fmt(fmt) "CCACHE: " fmt
-
-#include <linux/debugfs.h>
-#include <linux/interrupt.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/device.h>
-#include <linux/bitfield.h>
-#include <asm/cacheinfo.h>
-#include <soc/sifive/sifive_ccache.h>
-
-#define SIFIVE_CCACHE_DIRECCFIX_LOW 0x100
-#define SIFIVE_CCACHE_DIRECCFIX_HIGH 0x104
-#define SIFIVE_CCACHE_DIRECCFIX_COUNT 0x108
-
-#define SIFIVE_CCACHE_DIRECCFAIL_LOW 0x120
-#define SIFIVE_CCACHE_DIRECCFAIL_HIGH 0x124
-#define SIFIVE_CCACHE_DIRECCFAIL_COUNT 0x128
-
-#define SIFIVE_CCACHE_DATECCFIX_LOW 0x140
-#define SIFIVE_CCACHE_DATECCFIX_HIGH 0x144
-#define SIFIVE_CCACHE_DATECCFIX_COUNT 0x148
-
-#define SIFIVE_CCACHE_DATECCFAIL_LOW 0x160
-#define SIFIVE_CCACHE_DATECCFAIL_HIGH 0x164
-#define SIFIVE_CCACHE_DATECCFAIL_COUNT 0x168
-
-#define SIFIVE_CCACHE_CONFIG 0x00
-#define SIFIVE_CCACHE_CONFIG_BANK_MASK GENMASK_ULL(7, 0)
-#define SIFIVE_CCACHE_CONFIG_WAYS_MASK GENMASK_ULL(15, 8)
-#define SIFIVE_CCACHE_CONFIG_SETS_MASK GENMASK_ULL(23, 16)
-#define SIFIVE_CCACHE_CONFIG_BLKS_MASK GENMASK_ULL(31, 24)
-
-#define SIFIVE_CCACHE_WAYENABLE 0x08
-#define SIFIVE_CCACHE_ECCINJECTERR 0x40
-
-#define SIFIVE_CCACHE_MAX_ECCINTR 4
-
-static void __iomem *ccache_base;
-static int g_irq[SIFIVE_CCACHE_MAX_ECCINTR];
-static struct riscv_cacheinfo_ops ccache_cache_ops;
-static int level;
-
-enum {
- DIR_CORR = 0,
- DATA_CORR,
- DATA_UNCORR,
- DIR_UNCORR,
-};
-
-#ifdef CONFIG_DEBUG_FS
-static struct dentry *sifive_test;
-
-static ssize_t ccache_write(struct file *file, const char __user *data,
- size_t count, loff_t *ppos)
-{
- unsigned int val;
-
- if (kstrtouint_from_user(data, count, 0, &val))
- return -EINVAL;
- if ((val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
- writel(val, ccache_base + SIFIVE_CCACHE_ECCINJECTERR);
- else
- return -EINVAL;
- return count;
-}
-
-static const struct file_operations ccache_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .write = ccache_write
-};
-
-static void setup_sifive_debug(void)
-{
- sifive_test = debugfs_create_dir("sifive_ccache_cache", NULL);
-
- debugfs_create_file("sifive_debug_inject_error", 0200,
- sifive_test, NULL, &ccache_fops);
-}
-#endif
-
-static void ccache_config_read(void)
-{
- u32 cfg;
-
- cfg = readl(ccache_base + SIFIVE_CCACHE_CONFIG);
- pr_info("%llu banks, %llu ways, sets/bank=%llu, bytes/block=%llu\n",
- FIELD_GET(SIFIVE_CCACHE_CONFIG_BANK_MASK, cfg),
- FIELD_GET(SIFIVE_CCACHE_CONFIG_WAYS_MASK, cfg),
- BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_SETS_MASK, cfg)),
- BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_BLKS_MASK, cfg)));
-
- cfg = readl(ccache_base + SIFIVE_CCACHE_WAYENABLE);
- pr_info("Index of the largest way enabled: %u\n", cfg);
-}
-
-static const struct of_device_id sifive_ccache_ids[] = {
- { .compatible = "sifive,fu540-c000-ccache" },
- { .compatible = "sifive,fu740-c000-ccache" },
- { .compatible = "sifive,ccache0" },
- { /* end of table */ }
-};
-
-static ATOMIC_NOTIFIER_HEAD(ccache_err_chain);
-
-int register_sifive_ccache_error_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_register(&ccache_err_chain, nb);
-}
-EXPORT_SYMBOL_GPL(register_sifive_ccache_error_notifier);
-
-int unregister_sifive_ccache_error_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_unregister(&ccache_err_chain, nb);
-}
-EXPORT_SYMBOL_GPL(unregister_sifive_ccache_error_notifier);
-
-static int ccache_largest_wayenabled(void)
-{
- return readl(ccache_base + SIFIVE_CCACHE_WAYENABLE) & 0xFF;
-}
-
-static ssize_t number_of_ways_enabled_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%u\n", ccache_largest_wayenabled());
-}
-
-static DEVICE_ATTR_RO(number_of_ways_enabled);
-
-static struct attribute *priv_attrs[] = {
- &dev_attr_number_of_ways_enabled.attr,
- NULL,
-};
-
-static const struct attribute_group priv_attr_group = {
- .attrs = priv_attrs,
-};
-
-static const struct attribute_group *ccache_get_priv_group(struct cacheinfo
- *this_leaf)
-{
- /* We want to use private group for composable cache only */
- if (this_leaf->level == level)
- return &priv_attr_group;
- else
- return NULL;
-}
-
-static irqreturn_t ccache_int_handler(int irq, void *device)
-{
- unsigned int add_h, add_l;
-
- if (irq == g_irq[DIR_CORR]) {
- add_h = readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_HIGH);
- add_l = readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_LOW);
- pr_err("DirError @ 0x%08X.%08X\n", add_h, add_l);
- /* Reading this register clears the DirError interrupt sig */
- readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_COUNT);
- atomic_notifier_call_chain(&ccache_err_chain,
- SIFIVE_CCACHE_ERR_TYPE_CE,
- "DirECCFix");
- }
- if (irq == g_irq[DIR_UNCORR]) {
- add_h = readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_HIGH);
- add_l = readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_LOW);
- /* Reading this register clears the DirFail interrupt sig */
- readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_COUNT);
- atomic_notifier_call_chain(&ccache_err_chain,
- SIFIVE_CCACHE_ERR_TYPE_UE,
- "DirECCFail");
- panic("CCACHE: DirFail @ 0x%08X.%08X\n", add_h, add_l);
- }
- if (irq == g_irq[DATA_CORR]) {
- add_h = readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_HIGH);
- add_l = readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_LOW);
- pr_err("DataError @ 0x%08X.%08X\n", add_h, add_l);
- /* Reading this register clears the DataError interrupt sig */
- readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_COUNT);
- atomic_notifier_call_chain(&ccache_err_chain,
- SIFIVE_CCACHE_ERR_TYPE_CE,
- "DatECCFix");
- }
- if (irq == g_irq[DATA_UNCORR]) {
- add_h = readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_HIGH);
- add_l = readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_LOW);
- pr_err("DataFail @ 0x%08X.%08X\n", add_h, add_l);
- /* Reading this register clears the DataFail interrupt sig */
- readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_COUNT);
- atomic_notifier_call_chain(&ccache_err_chain,
- SIFIVE_CCACHE_ERR_TYPE_UE,
- "DatECCFail");
- }
-
- return IRQ_HANDLED;
-}
-
-static int __init sifive_ccache_init(void)
-{
- struct device_node *np;
- struct resource res;
- int i, rc, intr_num;
-
- np = of_find_matching_node(NULL, sifive_ccache_ids);
- if (!np)
- return -ENODEV;
-
- if (of_address_to_resource(np, 0, &res)) {
- rc = -ENODEV;
- goto err_node_put;
- }
-
- ccache_base = ioremap(res.start, resource_size(&res));
- if (!ccache_base) {
- rc = -ENOMEM;
- goto err_node_put;
- }
-
- if (of_property_read_u32(np, "cache-level", &level)) {
- rc = -ENOENT;
- goto err_unmap;
- }
-
- intr_num = of_property_count_u32_elems(np, "interrupts");
- if (!intr_num) {
- pr_err("No interrupts property\n");
- rc = -ENODEV;
- goto err_unmap;
- }
-
- for (i = 0; i < intr_num; i++) {
- g_irq[i] = irq_of_parse_and_map(np, i);
- rc = request_irq(g_irq[i], ccache_int_handler, 0, "ccache_ecc",
- NULL);
- if (rc) {
- pr_err("Could not request IRQ %d\n", g_irq[i]);
- goto err_free_irq;
- }
- }
- of_node_put(np);
-
- ccache_config_read();
-
- ccache_cache_ops.get_priv_group = ccache_get_priv_group;
- riscv_set_cacheinfo_ops(&ccache_cache_ops);
-
-#ifdef CONFIG_DEBUG_FS
- setup_sifive_debug();
-#endif
- return 0;
-
-err_free_irq:
- while (--i >= 0)
- free_irq(g_irq[i], NULL);
-err_unmap:
- iounmap(ccache_base);
-err_node_put:
- of_node_put(np);
- return rc;
-}
-
-device_initcall(sifive_ccache_init);
diff --git a/drivers/soc/sophgo/Kconfig b/drivers/soc/sophgo/Kconfig
new file mode 100644
index 000000000000..45f78b270c91
--- /dev/null
+++ b/drivers/soc/sophgo/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Sophgo SoC drivers
+#
+
+if ARCH_SOPHGO || COMPILE_TEST
+menu "Sophgo SoC drivers"
+
+config SOPHGO_CV1800_RTCSYS
+ tristate "Sophgo CV1800 RTC MFD"
+ select MFD_CORE
+ help
+ If you say yes here you get support the RTC MFD driver for Sophgo
+ CV1800 series SoC. The RTC module comprises a 32kHz oscillator,
+ Power-on-Reset (PoR) sub-module, HW state machine to control chip
+ power-on, power-off and reset. Furthermore, the 8051 subsystem is
+ located within RTCSYS including associated SRAM block.
+
+ This driver can also be built as a module. If so, the module will be
+ called cv1800-rtcsys.
+
+config SOPHGO_SG2044_TOPSYS
+ tristate "Sophgo SG2044 TOP syscon driver"
+ select MFD_CORE
+ help
+ This is the core driver for the Sophgo SG2044 TOP system
+ controller device. This driver provide PLL clock device
+ for the SoC.
+
+ This driver can also be built as a module. If so, the module
+ will be called sg2044-topsys.
+
+endmenu
+endif
diff --git a/drivers/soc/sophgo/Makefile b/drivers/soc/sophgo/Makefile
new file mode 100644
index 000000000000..27f68df22c4d
--- /dev/null
+++ b/drivers/soc/sophgo/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SOPHGO_CV1800_RTCSYS) += cv1800-rtcsys.o
+obj-$(CONFIG_SOPHGO_SG2044_TOPSYS) += sg2044-topsys.o
diff --git a/drivers/soc/sophgo/cv1800-rtcsys.c b/drivers/soc/sophgo/cv1800-rtcsys.c
new file mode 100644
index 000000000000..fdae2e2a61c5
--- /dev/null
+++ b/drivers/soc/sophgo/cv1800-rtcsys.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Sophgo CV1800 series SoC RTC subsystem
+ *
+ * The RTC module comprises a 32kHz oscillator, Power-on-Reset (PoR) sub-module,
+ * HW state machine to control chip power-on, power-off and reset. Furthermore,
+ * the 8051 subsystem is located within RTCSYS including associated SRAM block.
+ *
+ * Copyright (C) 2025 Alexander Sverdlin <alexander.sverdlin@gmail.com>
+ *
+ */
+
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/property.h>
+
+static struct resource cv1800_rtcsys_irq_resources[] = {
+ DEFINE_RES_IRQ_NAMED(0, "alarm"),
+};
+
+static const struct mfd_cell cv1800_rtcsys_subdev[] = {
+ {
+ .name = "cv1800b-rtc",
+ .num_resources = 1,
+ .resources = &cv1800_rtcsys_irq_resources[0],
+ },
+};
+
+static int cv1800_rtcsys_probe(struct platform_device *pdev)
+{
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "alarm");
+ if (irq < 0)
+ return irq;
+ cv1800_rtcsys_irq_resources[0].start = irq;
+ cv1800_rtcsys_irq_resources[0].end = irq;
+
+ return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
+ cv1800_rtcsys_subdev,
+ ARRAY_SIZE(cv1800_rtcsys_subdev),
+ NULL, 0, NULL);
+}
+
+static const struct of_device_id cv1800_rtcsys_of_match[] = {
+ { .compatible = "sophgo,cv1800b-rtc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, cv1800_rtcsys_of_match);
+
+static struct platform_driver cv1800_rtcsys_mfd = {
+ .probe = cv1800_rtcsys_probe,
+ .driver = {
+ .name = "cv1800_rtcsys",
+ .of_match_table = cv1800_rtcsys_of_match,
+ },
+};
+module_platform_driver(cv1800_rtcsys_mfd);
+
+MODULE_AUTHOR("Alexander Sverdlin <alexander.sverdlin@gmail.com>");
+MODULE_DESCRIPTION("Sophgo CV1800 series SoC RTC subsystem driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/sophgo/sg2044-topsys.c b/drivers/soc/sophgo/sg2044-topsys.c
new file mode 100644
index 000000000000..179f2620b2a9
--- /dev/null
+++ b/drivers/soc/sophgo/sg2044-topsys.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2044 multi-function system controller driver
+ *
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/mfd/core.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/resource.h>
+
+static const struct mfd_cell sg2044_topsys_subdev[] = {
+ {
+ .name = "sg2044-pll",
+ },
+};
+
+static int sg2044_topsys_probe(struct platform_device *pdev)
+{
+ return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
+ sg2044_topsys_subdev,
+ ARRAY_SIZE(sg2044_topsys_subdev),
+ NULL, 0, NULL);
+}
+
+static const struct of_device_id sg2044_topsys_of_match[] = {
+ { .compatible = "sophgo,sg2044-top-syscon" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sg2044_topsys_of_match);
+
+static struct platform_driver sg2044_topsys_driver = {
+ .probe = sg2044_topsys_probe,
+ .driver = {
+ .name = "sg2044-topsys",
+ .of_match_table = sg2044_topsys_of_match,
+ },
+};
+module_platform_driver(sg2044_topsys_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo SG2044 multi-function system controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/starfive/Kconfig b/drivers/soc/starfive/Kconfig
deleted file mode 100644
index bdb96dc4c989..000000000000
--- a/drivers/soc/starfive/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-config JH71XX_PMU
- bool "Support PMU for StarFive JH71XX Soc"
- depends on PM
- depends on SOC_STARFIVE || COMPILE_TEST
- default SOC_STARFIVE
- select PM_GENERIC_DOMAINS
- help
- Say 'y' here to enable support power domain support.
- In order to meet low power requirements, a Power Management Unit (PMU)
- is designed for controlling power resources in StarFive JH71XX SoCs.
diff --git a/drivers/soc/sunxi/Kconfig b/drivers/soc/sunxi/Kconfig
index c5070914fc6a..8aecbc9b1976 100644
--- a/drivers/soc/sunxi/Kconfig
+++ b/drivers/soc/sunxi/Kconfig
@@ -19,12 +19,3 @@ config SUNXI_SRAM
Say y here to enable the SRAM controller support. This
device is responsible on mapping the SRAM in the sunXi SoCs
whether to the CPU/DMA, or to the devices.
-
-config SUN20I_PPU
- bool "Allwinner D1 PPU power domain driver"
- depends on ARCH_SUNXI || COMPILE_TEST
- depends on PM
- select PM_GENERIC_DOMAINS
- help
- Say y to enable the PPU power domain driver. This saves power
- when certain peripherals, such as the video engine, are idle.
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index 4458b2e0562b..446b9fc1f175 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -12,6 +12,7 @@
#include <linux/debugfs.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -33,7 +34,6 @@ struct sunxi_sram_data {
u8 offset;
u8 width;
struct sunxi_sram_func *func;
- struct list_head list;
};
struct sunxi_sram_desc {
@@ -103,7 +103,6 @@ static const struct of_device_id sunxi_sram_dt_ids[] = {
};
static struct device *sram_dev;
-static LIST_HEAD(claimed_sram);
static DEFINE_SPINLOCK(sram_lock);
static void __iomem *base;
@@ -287,6 +286,7 @@ EXPORT_SYMBOL(sunxi_sram_release);
struct sunxi_sramc_variant {
int num_emac_clocks;
bool has_ldo_ctrl;
+ bool has_ths_offset;
};
static const struct sunxi_sramc_variant sun4i_a10_sramc_variant = {
@@ -308,8 +308,14 @@ static const struct sunxi_sramc_variant sun50i_a64_sramc_variant = {
static const struct sunxi_sramc_variant sun50i_h616_sramc_variant = {
.num_emac_clocks = 2,
+ .has_ths_offset = true,
};
+static const struct sunxi_sramc_variant sun55i_a523_sramc_variant = {
+ .num_emac_clocks = 2,
+};
+
+#define SUNXI_SRAM_THS_OFFSET_REG 0x0
#define SUNXI_SRAM_EMAC_CLOCK_REG 0x30
#define SUNXI_SYS_LDO_CTRL_REG 0x150
@@ -318,6 +324,8 @@ static bool sunxi_sram_regmap_accessible_reg(struct device *dev,
{
const struct sunxi_sramc_variant *variant = dev_get_drvdata(dev);
+ if (reg == SUNXI_SRAM_THS_OFFSET_REG && variant->has_ths_offset)
+ return true;
if (reg >= SUNXI_SRAM_EMAC_CLOCK_REG &&
reg < SUNXI_SRAM_EMAC_CLOCK_REG + variant->num_emac_clocks * 4)
return true;
@@ -327,7 +335,21 @@ static bool sunxi_sram_regmap_accessible_reg(struct device *dev,
return false;
}
-static struct regmap_config sunxi_sram_regmap_config = {
+static void sunxi_sram_lock(void *_lock)
+{
+ spinlock_t *lock = _lock;
+
+ spin_lock(lock);
+}
+
+static void sunxi_sram_unlock(void *_lock)
+{
+ spinlock_t *lock = _lock;
+
+ spin_unlock(lock);
+}
+
+static const struct regmap_config sunxi_sram_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -336,6 +358,9 @@ static struct regmap_config sunxi_sram_regmap_config = {
/* other devices have no business accessing other registers */
.readable_reg = sunxi_sram_regmap_accessible_reg,
.writeable_reg = sunxi_sram_regmap_accessible_reg,
+ .lock = sunxi_sram_lock,
+ .unlock = sunxi_sram_unlock,
+ .lock_arg = &sram_lock,
};
static int __init sunxi_sram_probe(struct platform_device *pdev)
@@ -343,6 +368,7 @@ static int __init sunxi_sram_probe(struct platform_device *pdev)
const struct sunxi_sramc_variant *variant;
struct device *dev = &pdev->dev;
struct regmap *regmap;
+ int ret;
sram_dev = &pdev->dev;
@@ -360,6 +386,10 @@ static int __init sunxi_sram_probe(struct platform_device *pdev)
regmap = devm_regmap_init_mmio(dev, base, &sunxi_sram_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
+
+ ret = of_syscon_register_regmap(dev->of_node, regmap);
+ if (ret)
+ return ret;
}
of_platform_populate(dev->of_node, NULL, NULL, dev);
@@ -410,6 +440,10 @@ static const struct of_device_id sunxi_sram_dt_match[] = {
.compatible = "allwinner,sun50i-h616-system-control",
.data = &sun50i_h616_sramc_variant,
},
+ {
+ .compatible = "allwinner,sun55i-a523-system-control",
+ .data = &sun55i_a523_sramc_variant,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, sunxi_sram_dt_match);
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 6f3098822969..c0fc54c3cd35 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -96,10 +96,8 @@ config ARCH_TEGRA_210_SOC
config ARCH_TEGRA_186_SOC
bool "NVIDIA Tegra186 SoC"
depends on !CPU_BIG_ENDIAN
+ select PINCTRL_TEGRA186
select MAILBOX
- select TEGRA_BPMP
- select TEGRA_HSP_MBOX
- select TEGRA_IVC
select SOC_TEGRA_PMC
help
Enable support for the NVIDIA Tegar186 SoC. The Tegra186 features a
@@ -114,9 +112,6 @@ config ARCH_TEGRA_194_SOC
depends on !CPU_BIG_ENDIAN
select MAILBOX
select PINCTRL_TEGRA194
- select TEGRA_BPMP
- select TEGRA_HSP_MBOX
- select TEGRA_IVC
select SOC_TEGRA_PMC
help
Enable support for the NVIDIA Tegra194 SoC.
@@ -126,13 +121,23 @@ config ARCH_TEGRA_234_SOC
depends on !CPU_BIG_ENDIAN
select MAILBOX
select PINCTRL_TEGRA234
- select TEGRA_BPMP
- select TEGRA_HSP_MBOX
- select TEGRA_IVC
select SOC_TEGRA_PMC
help
Enable support for the NVIDIA Tegra234 SoC.
+config ARCH_TEGRA_241_SOC
+ bool "NVIDIA Tegra241 SoC"
+ help
+ Enable support for the NVIDIA Tegra241 SoC.
+
+config ARCH_TEGRA_264_SOC
+ bool "NVIDIA Tegra264 SoC"
+ depends on !CPU_BIG_ENDIAN
+ select MAILBOX
+ select SOC_TEGRA_PMC
+ help
+ Enable support for the NVIDIA Tegra264 SoC.
+
endif
endif
@@ -152,11 +157,6 @@ config SOC_TEGRA_PMC
select PM_GENERIC_DOMAINS
select REGMAP
-config SOC_TEGRA_POWERGATE_BPMP
- def_bool y
- depends on PM_GENERIC_DOMAINS
- depends on TEGRA_BPMP
-
config SOC_TEGRA20_VOLTAGE_COUPLER
bool "Voltage scaling support for Tegra20 SoCs"
depends on ARCH_TEGRA_2x_SOC || COMPILE_TEST
diff --git a/drivers/soc/tegra/cbb/tegra-cbb.c b/drivers/soc/tegra/cbb/tegra-cbb.c
index 84ab46c9d9f5..6215c6a84fbe 100644
--- a/drivers/soc/tegra/cbb/tegra-cbb.c
+++ b/drivers/soc/tegra/cbb/tegra-cbb.c
@@ -69,19 +69,12 @@ static int tegra_cbb_err_show(struct seq_file *file, void *data)
}
DEFINE_SHOW_ATTRIBUTE(tegra_cbb_err);
-static int tegra_cbb_err_debugfs_init(struct tegra_cbb *cbb)
+static void tegra_cbb_err_debugfs_init(struct tegra_cbb *cbb)
{
static struct dentry *root;
- if (!root) {
+ if (!root)
root = debugfs_create_file("tegra_cbb_err", 0444, NULL, cbb, &tegra_cbb_err_fops);
- if (IS_ERR_OR_NULL(root)) {
- pr_err("%s(): could not create debugfs node\n", __func__);
- return PTR_ERR(root);
- }
- }
-
- return 0;
}
void tegra_cbb_stall_enable(struct tegra_cbb *cbb)
@@ -148,13 +141,8 @@ int tegra_cbb_register(struct tegra_cbb *cbb)
{
int ret;
- if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- ret = tegra_cbb_err_debugfs_init(cbb);
- if (ret) {
- dev_err(cbb->dev, "failed to create debugfs\n");
- return ret;
- }
- }
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ tegra_cbb_err_debugfs_init(cbb);
/* register interrupt handler for errors due to different initiators */
ret = cbb->ops->interrupt_enable(cbb);
diff --git a/drivers/soc/tegra/cbb/tegra194-cbb.c b/drivers/soc/tegra/cbb/tegra194-cbb.c
index cf6886f362d3..ab75d50cc85c 100644
--- a/drivers/soc/tegra/cbb/tegra194-cbb.c
+++ b/drivers/soc/tegra/cbb/tegra194-cbb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved
+ * Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved
*
* The driver handles Error's from Control Backbone(CBB) generated due to
* illegal accesses. When an error is reported from a NOC within CBB,
@@ -138,7 +138,7 @@ struct tegra194_cbb_userbits {
struct tegra194_cbb_noc_data {
const char *name;
bool erd_mask_inband_err;
- const char * const *master_id;
+ const char * const *initiator_id;
unsigned int max_aperture;
const struct tegra194_cbb_aperture *noc_aperture;
const char * const *routeid_initflow;
@@ -216,7 +216,7 @@ static const char * const tegra194_axi2apb_error[] = {
"CH2RFIFOF - Ch2 Request FIFO Full interrupt"
};
-static const char * const tegra194_master_id[] = {
+static const char * const tegra194_initiator_id[] = {
[0x0] = "CCPLEX",
[0x1] = "CCPLEX_DPMU",
[0x2] = "BPMP",
@@ -238,7 +238,7 @@ static const struct tegra_cbb_error tegra194_cbb_errors[] = {
{
.code = "SLV",
.source = "Target",
- .desc = "Target error detected by CBB slave"
+ .desc = "Target error detected by CBB target"
}, {
.code = "DEC",
.source = "Initiator NIU",
@@ -1774,8 +1774,8 @@ static void print_errlog5(struct seq_file *file, struct tegra194_cbb *cbb)
tegra_cbb_print_err(file, "\t AXI ID\t\t: %#x\n", userbits.axi_id);
}
- tegra_cbb_print_err(file, "\t Master ID\t\t: %s\n",
- cbb->noc->master_id[userbits.mstr_id]);
+ tegra_cbb_print_err(file, "\t Initiator ID\t\t: %s\n",
+ cbb->noc->initiator_id[userbits.mstr_id]);
tegra_cbb_print_err(file, "\t Security Group(GRPSEC): %#x\n", userbits.grpsec);
tegra_cbb_print_cache(file, userbits.axcache);
tegra_cbb_print_prot(file, userbits.axprot);
@@ -1836,15 +1836,15 @@ print_errlog1_2(struct seq_file *file, struct tegra194_cbb *cbb,
}
/*
- * Print transcation type, error code and description from ErrLog0 for all
- * errors. For NOC slave errors, all relevant error info is printed using
+ * Print transaction type, error code and description from ErrLog0 for all
+ * errors. For NOC target errors, all relevant error info is printed using
* ErrLog0 only. But additional information is printed for errors from
- * APB slaves because for them:
- * - All errors are logged as SLV(slave) errors due to APB having only single
+ * APB targets because for them:
+ * - All errors are logged as SLV(target) errors due to APB having only single
* bit pslverr to report all errors.
* - Exact cause is printed by reading DMAAPB_X_RAW_INTERRUPT_STATUS register.
* - The driver prints information showing AXI2APB bridge and exact error
- * only if there is error in any AXI2APB slave.
+ * only if there is error in any AXI2APB target.
* - There is still no way to disambiguate a DEC error from SLV error type.
*/
static bool print_errlog0(struct seq_file *file, struct tegra194_cbb *cbb)
@@ -1884,8 +1884,8 @@ static bool print_errlog0(struct seq_file *file, struct tegra194_cbb *cbb)
/* For all SLV errors, read DMAAPB_X_RAW_INTERRUPT_STATUS
* register to get error status for all AXI2APB bridges.
* Print bridge details if a bit is set in a bridge's
- * status register due to error in a APB slave connected
- * to that bridge. For other NOC slaves, none of the status
+ * status register due to error in a APB target connected
+ * to that bridge. For other NOC targets, none of the status
* register will be set.
*/
@@ -2118,7 +2118,7 @@ static const struct tegra_cbb_ops tegra194_cbb_ops = {
static struct tegra194_cbb_noc_data tegra194_cbb_central_noc_data = {
.name = "cbb-noc",
.erd_mask_inband_err = true,
- .master_id = tegra194_master_id,
+ .initiator_id = tegra194_initiator_id,
.noc_aperture = tegra194_cbbcentralnoc_apert_lookup,
.max_aperture = ARRAY_SIZE(tegra194_cbbcentralnoc_apert_lookup),
.routeid_initflow = tegra194_cbbcentralnoc_routeid_initflow,
@@ -2130,7 +2130,7 @@ static struct tegra194_cbb_noc_data tegra194_cbb_central_noc_data = {
static struct tegra194_cbb_noc_data tegra194_aon_noc_data = {
.name = "aon-noc",
.erd_mask_inband_err = false,
- .master_id = tegra194_master_id,
+ .initiator_id = tegra194_initiator_id,
.noc_aperture = tegra194_aonnoc_aperture_lookup,
.max_aperture = ARRAY_SIZE(tegra194_aonnoc_aperture_lookup),
.routeid_initflow = tegra194_aonnoc_routeid_initflow,
@@ -2142,7 +2142,7 @@ static struct tegra194_cbb_noc_data tegra194_aon_noc_data = {
static struct tegra194_cbb_noc_data tegra194_bpmp_noc_data = {
.name = "bpmp-noc",
.erd_mask_inband_err = false,
- .master_id = tegra194_master_id,
+ .initiator_id = tegra194_initiator_id,
.noc_aperture = tegra194_bpmpnoc_apert_lookup,
.max_aperture = ARRAY_SIZE(tegra194_bpmpnoc_apert_lookup),
.routeid_initflow = tegra194_bpmpnoc_routeid_initflow,
@@ -2154,7 +2154,7 @@ static struct tegra194_cbb_noc_data tegra194_bpmp_noc_data = {
static struct tegra194_cbb_noc_data tegra194_rce_noc_data = {
.name = "rce-noc",
.erd_mask_inband_err = false,
- .master_id = tegra194_master_id,
+ .initiator_id = tegra194_initiator_id,
.noc_aperture = tegra194_scenoc_apert_lookup,
.max_aperture = ARRAY_SIZE(tegra194_scenoc_apert_lookup),
.routeid_initflow = tegra194_scenoc_routeid_initflow,
@@ -2166,7 +2166,7 @@ static struct tegra194_cbb_noc_data tegra194_rce_noc_data = {
static struct tegra194_cbb_noc_data tegra194_sce_noc_data = {
.name = "sce-noc",
.erd_mask_inband_err = false,
- .master_id = tegra194_master_id,
+ .initiator_id = tegra194_initiator_id,
.noc_aperture = tegra194_scenoc_apert_lookup,
.max_aperture = ARRAY_SIZE(tegra194_scenoc_apert_lookup),
.routeid_initflow = tegra194_scenoc_routeid_initflow,
@@ -2293,7 +2293,7 @@ static int tegra194_cbb_probe(struct platform_device *pdev)
return tegra_cbb_register(&cbb->base);
}
-static int tegra194_cbb_remove(struct platform_device *pdev)
+static void tegra194_cbb_remove(struct platform_device *pdev)
{
struct tegra194_cbb *cbb = platform_get_drvdata(pdev);
struct tegra_cbb *noc, *tmp;
@@ -2311,8 +2311,6 @@ static int tegra194_cbb_remove(struct platform_device *pdev)
}
spin_unlock_irqrestore(&cbb_lock, flags);
-
- return 0;
}
static int __maybe_unused tegra194_cbb_resume_noirq(struct device *dev)
diff --git a/drivers/soc/tegra/cbb/tegra234-cbb.c b/drivers/soc/tegra/cbb/tegra234-cbb.c
index 5cf0e8c34164..a9adbcecd47c 100644
--- a/drivers/soc/tegra/cbb/tegra234-cbb.c
+++ b/drivers/soc/tegra/cbb/tegra234-cbb.c
@@ -1,13 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved
+ * Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved
*
* The driver handles Error's from Control Backbone(CBB) version 2.0.
* generated due to illegal accesses. The driver prints debug information
* about failed transaction on receiving interrupt from Error Notifier.
* Error types supported by CBB2.0 are:
* UNSUPPORTED_ERR, PWRDOWN_ERR, TIMEOUT_ERR, FIREWALL_ERR, DECODE_ERR,
- * SLAVE_ERR
+ * TARGET_ERR
*/
#include <linux/acpi.h>
@@ -30,18 +30,22 @@
#define FABRIC_EN_CFG_ADDR_LOW_0 0x80
#define FABRIC_EN_CFG_ADDR_HI_0 0x84
-#define FABRIC_MN_MASTER_ERR_EN_0 0x200
-#define FABRIC_MN_MASTER_ERR_FORCE_0 0x204
-#define FABRIC_MN_MASTER_ERR_STATUS_0 0x208
-#define FABRIC_MN_MASTER_ERR_OVERFLOW_STATUS_0 0x20c
+#define FABRIC_EN_CFG_TARGET_NODE_ADDR_INDEX_0_0 0x100
+#define FABRIC_EN_CFG_TARGET_NODE_ADDR_LOW_0 0x140
+#define FABRIC_EN_CFG_TARGET_NODE_ADDR_HI_0 0x144
-#define FABRIC_MN_MASTER_LOG_ERR_STATUS_0 0x300
-#define FABRIC_MN_MASTER_LOG_ADDR_LOW_0 0x304
-#define FABRIC_MN_MASTER_LOG_ADDR_HIGH_0 0x308
-#define FABRIC_MN_MASTER_LOG_ATTRIBUTES0_0 0x30c
-#define FABRIC_MN_MASTER_LOG_ATTRIBUTES1_0 0x310
-#define FABRIC_MN_MASTER_LOG_ATTRIBUTES2_0 0x314
-#define FABRIC_MN_MASTER_LOG_USER_BITS0_0 0x318
+#define FABRIC_MN_INITIATOR_ERR_EN_0 0x200
+#define FABRIC_MN_INITIATOR_ERR_FORCE_0 0x204
+#define FABRIC_MN_INITIATOR_ERR_STATUS_0 0x208
+#define FABRIC_MN_INITIATOR_ERR_OVERFLOW_STATUS_0 0x20c
+
+#define FABRIC_MN_INITIATOR_LOG_ERR_STATUS_0 0x300
+#define FABRIC_MN_INITIATOR_LOG_ADDR_LOW_0 0x304
+#define FABRIC_MN_INITIATOR_LOG_ADDR_HIGH_0 0x308
+#define FABRIC_MN_INITIATOR_LOG_ATTRIBUTES0_0 0x30c
+#define FABRIC_MN_INITIATOR_LOG_ATTRIBUTES1_0 0x310
+#define FABRIC_MN_INITIATOR_LOG_ATTRIBUTES2_0 0x314
+#define FABRIC_MN_INITIATOR_LOG_USER_BITS0_0 0x318
#define AXI_SLV_TIMEOUT_STATUS_0_0 0x8
#define APB_BLOCK_TMO_STATUS_0 0xc00
@@ -53,7 +57,7 @@
#define FAB_EM_EL_FALCONSEC GENMASK(1, 0)
#define FAB_EM_EL_FABID GENMASK(20, 16)
-#define FAB_EM_EL_SLAVEID GENMASK(7, 0)
+#define FAB_EM_EL_TARGETID GENMASK(7, 0)
#define FAB_EM_EL_ACCESSID GENMASK(7, 0)
@@ -74,34 +78,79 @@
#define WEN 0x20000
enum tegra234_cbb_fabric_ids {
- CBB_FAB_ID,
- SCE_FAB_ID,
- RCE_FAB_ID,
- DCE_FAB_ID,
- AON_FAB_ID,
- PSC_FAB_ID,
- BPMP_FAB_ID,
- FSI_FAB_ID,
- MAX_FAB_ID,
+ T234_CBB_FABRIC_ID,
+ T234_SCE_FABRIC_ID,
+ T234_RCE_FABRIC_ID,
+ T234_DCE_FABRIC_ID,
+ T234_AON_FABRIC_ID,
+ T234_PSC_FABRIC_ID,
+ T234_BPMP_FABRIC_ID,
+ T234_FSI_FABRIC_ID,
+ T234_MAX_FABRIC_ID,
+};
+
+enum tegra264_cbb_fabric_ids {
+ T264_SYSTEM_CBB_FABRIC_ID,
+ T264_TOP_0_CBB_FABRIC_ID,
+ T264_VISION_CBB_FABRIC_ID,
+ T264_DISP_USB_CBB_FABRIC_ID,
+ T264_UPHY0_CBB_FABRIC_ID,
+ T264_RSVD0_FABRIC_ID,
+ T264_RSVD1_FABRIC_ID,
+ T264_RSVD2_FABRIC_ID,
+ T264_RSVD3_FABRIC_ID,
+ T264_RSVD4_FABRIC_ID,
+ T264_RSVD5_FABRIC_ID,
+ T264_AON_FABRIC_ID,
+ T264_PSC_FABRIC_ID,
+ T264_OESP_FABRIC_ID,
+ T264_APE_FABRIC_ID,
+ T264_BPMP_FABRIC_ID,
+ T264_RCE_0_FABRIC_ID,
+ T264_RCE_1_FABRIC_ID,
+ T264_RSVD6_FABRIC_ID,
+ T264_DCE_FABRIC_ID,
+ T264_FSI_FABRIC_ID,
+ T264_ISC_FABRIC_ID,
+ T264_SB_FABRIC_ID,
+ T264_ISC_CPU_FABRIC_ID,
+ T264_RSVD7_FABRIC_ID,
+};
+
+enum t254_cbb_fabric_ids {
+ T254_DCE_FABRIC_ID = 19,
+ T254_DISP_CLUSTER_FABRIC_ID = 25,
+ T254_C2C_FABRIC_ID = 26,
+ T254_GPU_FABRIC_ID = 27,
+ T254_DISP_CLUSTER_1_FABRIC_ID = 28,
+ T254_MAX_FABRIC_ID,
};
-struct tegra234_slave_lookup {
+struct tegra234_target_lookup {
const char *name;
unsigned int offset;
};
-struct tegra234_cbb_fabric {
+struct tegra234_fabric_lookup {
const char *name;
+ bool is_lookup;
+ const struct tegra234_target_lookup *target_map;
+ const int max_targets;
+};
+
+struct tegra234_cbb_fabric {
+ int fab_id;
phys_addr_t off_mask_erd;
phys_addr_t firewall_base;
unsigned int firewall_ctl;
unsigned int firewall_wr_ctl;
- const char * const *master_id;
+ const char * const *initiator_id;
unsigned int notifier_offset;
const struct tegra_cbb_error *errors;
const int max_errors;
- const struct tegra234_slave_lookup *slave_map;
- const int max_slaves;
+ const struct tegra234_fabric_lookup *fab_list;
+ const u32 err_intr_enbl;
+ const u32 err_status_clr;
};
struct tegra234_cbb {
@@ -177,7 +226,7 @@ static void tegra234_cbb_fault_enable(struct tegra_cbb *cbb)
void __iomem *addr;
addr = priv->regs + priv->fabric->notifier_offset;
- writel(0x1ff, addr + FABRIC_EN_CFG_INTERRUPT_ENABLE_0_0);
+ writel(priv->fabric->err_intr_enbl, addr + FABRIC_EN_CFG_INTERRUPT_ENABLE_0_0);
dsb(sy);
}
@@ -185,7 +234,9 @@ static void tegra234_cbb_error_clear(struct tegra_cbb *cbb)
{
struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
- writel(0x3f, priv->mon + FABRIC_MN_MASTER_ERR_STATUS_0);
+ writel(0, priv->mon + FABRIC_MN_INITIATOR_ERR_FORCE_0);
+
+ writel(priv->fabric->err_status_clr, priv->mon + FABRIC_MN_INITIATOR_ERR_STATUS_0);
dsb(sy);
}
@@ -216,13 +267,13 @@ static u32 tegra234_cbb_get_tmo_slv(void __iomem *addr)
return timeout;
}
-static void tegra234_cbb_tmo_slv(struct seq_file *file, const char *slave, void __iomem *addr,
+static void tegra234_cbb_tmo_slv(struct seq_file *file, const char *target, void __iomem *addr,
u32 status)
{
- tegra_cbb_print_err(file, "\t %s : %#x\n", slave, status);
+ tegra_cbb_print_err(file, "\t %s : %#x\n", target, status);
}
-static void tegra234_cbb_lookup_apbslv(struct seq_file *file, const char *slave,
+static void tegra234_cbb_lookup_apbslv(struct seq_file *file, const char *target,
void __iomem *base)
{
unsigned int block = 0;
@@ -232,7 +283,7 @@ static void tegra234_cbb_lookup_apbslv(struct seq_file *file, const char *slave,
status = tegra234_cbb_get_tmo_slv(base);
if (status)
- tegra_cbb_print_err(file, "\t %s_BLOCK_TMO_STATUS : %#x\n", slave, status);
+ tegra_cbb_print_err(file, "\t %s_BLOCK_TMO_STATUS : %#x\n", target, status);
while (status) {
if (status & BIT(0)) {
@@ -247,7 +298,7 @@ static void tegra234_cbb_lookup_apbslv(struct seq_file *file, const char *slave,
if (clients != 0xffffffff)
clients &= BIT(client);
- sprintf(name, "%s_BLOCK%d_TMO", slave, block);
+ sprintf(name, "%s_BLOCK%d_TMO", target, block);
tegra234_cbb_tmo_slv(file, name, addr, clients);
}
@@ -262,22 +313,27 @@ static void tegra234_cbb_lookup_apbslv(struct seq_file *file, const char *slave,
}
}
-static void tegra234_lookup_slave_timeout(struct seq_file *file, struct tegra234_cbb *cbb,
- u8 slave_id, u8 fab_id)
+static void tegra234_sw_lookup_target_timeout(struct seq_file *file, struct tegra234_cbb *cbb,
+ u8 target_id, u8 fab_id)
{
- const struct tegra234_slave_lookup *map = cbb->fabric->slave_map;
+ const struct tegra234_target_lookup *map = cbb->fabric->fab_list[fab_id].target_map;
void __iomem *addr;
+ if (target_id >= cbb->fabric->fab_list[fab_id].max_targets) {
+ tegra_cbb_print_err(file, "\t Invalid target_id:%d\n", target_id);
+ return;
+ }
+
/*
- * 1) Get slave node name and address mapping using slave_id.
- * 2) Check if the timed out slave node is APB or AXI.
- * 3) If AXI, then print timeout register and reset axi slave
+ * 1) Get target node name and address mapping using target_id.
+ * 2) Check if the timed out target node is APB or AXI.
+ * 3) If AXI, then print timeout register and reset axi target
* using <FABRIC>_SN_<>_SLV_TIMEOUT_STATUS_0_0 register.
* 4) If APB, then perform an additional lookup to find the client
* which timed out.
* a) Get block number from the index of set bit in
* <FABRIC>_SN_AXI2APB_<>_BLOCK_TMO_STATUS_0 register.
- * b) Get address of register repective to block number i.e.
+ * b) Get address of register respective to block number i.e.
* <FABRIC>_SN_AXI2APB_<>_BLOCK<index-set-bit>_TMO_0.
* c) Read the register in above step to get client_id which
* timed out as per the set bits.
@@ -285,12 +341,12 @@ static void tegra234_lookup_slave_timeout(struct seq_file *file, struct tegra234
* e) Goto step-a till all bits are set.
*/
- addr = cbb->regs + map[slave_id].offset;
+ addr = cbb->regs + map[target_id].offset;
- if (strstr(map[slave_id].name, "AXI2APB")) {
+ if (strstr(map[target_id].name, "AXI2APB")) {
addr += APB_BLOCK_TMO_STATUS_0;
- tegra234_cbb_lookup_apbslv(file, map[slave_id].name, addr);
+ tegra234_cbb_lookup_apbslv(file, map[target_id].name, addr);
} else {
char name[64];
u32 status;
@@ -299,12 +355,29 @@ static void tegra234_lookup_slave_timeout(struct seq_file *file, struct tegra234
status = tegra234_cbb_get_tmo_slv(addr);
if (status) {
- sprintf(name, "%s_SLV_TIMEOUT_STATUS", map[slave_id].name);
+ sprintf(name, "%s_SLV_TIMEOUT_STATUS", map[target_id].name);
tegra234_cbb_tmo_slv(file, name, addr, status);
}
}
}
+static void tegra234_hw_lookup_target_timeout(struct seq_file *file, struct tegra234_cbb *cbb,
+ u8 target_id, u8 fab_id)
+{
+ unsigned int notifier = cbb->fabric->notifier_offset;
+ u32 hi, lo;
+ u64 addr;
+
+ writel(target_id, cbb->regs + notifier + FABRIC_EN_CFG_TARGET_NODE_ADDR_INDEX_0_0);
+
+ hi = readl(cbb->regs + notifier + FABRIC_EN_CFG_TARGET_NODE_ADDR_HI_0);
+ lo = readl(cbb->regs + notifier + FABRIC_EN_CFG_TARGET_NODE_ADDR_LOW_0);
+
+ addr = (u64)hi << 32 | lo;
+
+ tegra_cbb_print_err(file, "\t Target Node Addr : %#llx\n", addr);
+}
+
static void tegra234_cbb_print_error(struct seq_file *file, struct tegra234_cbb *cbb, u32 status,
u32 overflow)
{
@@ -349,8 +422,7 @@ static void tegra234_cbb_print_error(struct seq_file *file, struct tegra234_cbb
static void print_errlog_err(struct seq_file *file, struct tegra234_cbb *cbb)
{
u8 cache_type, prot_type, burst_length, mstr_id, grpsec, vqc, falconsec, beat_size;
- u8 access_type, access_id, requester_socket_id, local_socket_id, slave_id, fab_id;
- char fabric_name[20];
+ u8 access_type, access_id, requester_socket_id, local_socket_id, target_id, fab_id;
bool is_numa = false;
u8 burst_type;
@@ -364,7 +436,7 @@ static void print_errlog_err(struct seq_file *file, struct tegra234_cbb *cbb)
/*
* For SOC with multiple NUMA nodes, print cross socket access
- * errors only if initiator/master_id is CCPLEX, CPMU or GPU.
+ * errors only if initiator_id is CCPLEX, CPMU or GPU.
*/
if (is_numa) {
local_socket_id = numa_node_id();
@@ -377,7 +449,7 @@ static void print_errlog_err(struct seq_file *file, struct tegra234_cbb *cbb)
}
fab_id = FIELD_GET(FAB_EM_EL_FABID, cbb->mn_attr2);
- slave_id = FIELD_GET(FAB_EM_EL_SLAVEID, cbb->mn_attr2);
+ target_id = FIELD_GET(FAB_EM_EL_TARGETID, cbb->mn_attr2);
access_id = FIELD_GET(FAB_EM_EL_ACCESSID, cbb->mn_attr1);
@@ -395,21 +467,18 @@ static void print_errlog_err(struct seq_file *file, struct tegra234_cbb *cbb)
else
tegra_cbb_print_err(file, "\t Wrong type index:%u\n", cbb->type);
- tegra_cbb_print_err(file, "\t MASTER_ID\t\t: %s\n", cbb->fabric->master_id[mstr_id]);
+ tegra_cbb_print_err(file, "\t Initiator_Id\t\t: %#x\n", mstr_id);
+ if (cbb->fabric->initiator_id)
+ tegra_cbb_print_err(file, "\t Initiator\t\t: %s\n",
+ cbb->fabric->initiator_id[mstr_id]);
+
tegra_cbb_print_err(file, "\t Address\t\t: %#llx\n", cbb->access);
tegra_cbb_print_cache(file, cache_type);
tegra_cbb_print_prot(file, prot_type);
tegra_cbb_print_err(file, "\t Access_Type\t\t: %s", (access_type) ? "Write\n" : "Read\n");
- tegra_cbb_print_err(file, "\t Access_ID\t\t: %#x", access_id);
-
- if (fab_id == PSC_FAB_ID)
- strcpy(fabric_name, "psc-fabric");
- else if (fab_id == FSI_FAB_ID)
- strcpy(fabric_name, "fsi-fabric");
- else
- strcpy(fabric_name, cbb->fabric->name);
+ tegra_cbb_print_err(file, "\t Access_ID\t\t: %#x\n", access_id);
if (is_numa) {
tegra_cbb_print_err(file, "\t Requester_Socket_Id\t: %#x\n",
@@ -420,8 +489,21 @@ static void print_errlog_err(struct seq_file *file, struct tegra234_cbb *cbb)
num_possible_nodes());
}
- tegra_cbb_print_err(file, "\t Fabric\t\t: %s\n", fabric_name);
- tegra_cbb_print_err(file, "\t Slave_Id\t\t: %#x\n", slave_id);
+ tegra_cbb_print_err(file, "\t Fabric\t\t: %s (id:%#x)\n",
+ cbb->fabric->fab_list[fab_id].name, fab_id);
+
+ if (of_machine_is_compatible("nvidia,tegra264") && fab_id == T264_UPHY0_CBB_FABRIC_ID) {
+ /*
+ * In T264, AON Fabric ID value is incorrectly same as UPHY0 fabric ID.
+ * For 'ID = 0x4', we must check for the address which caused the error
+ * to find the correct fabric which returned error.
+ */
+ tegra_cbb_print_err(file, "\t or Fabric\t\t: %s\n",
+ cbb->fabric->fab_list[T264_AON_FABRIC_ID].name);
+ tegra_cbb_print_err(file, "\t Please use Address to determine correct fabric.\n");
+ }
+
+ tegra_cbb_print_err(file, "\t Target_Id\t\t: %#x\n", target_id);
tegra_cbb_print_err(file, "\t Burst_length\t\t: %#x\n", burst_length);
tegra_cbb_print_err(file, "\t Burst_type\t\t: %#x\n", burst_type);
tegra_cbb_print_err(file, "\t Beat_size\t\t: %#x\n", beat_size);
@@ -429,27 +511,30 @@ static void print_errlog_err(struct seq_file *file, struct tegra234_cbb *cbb)
tegra_cbb_print_err(file, "\t GRPSEC\t\t: %#x\n", grpsec);
tegra_cbb_print_err(file, "\t FALCONSEC\t\t: %#x\n", falconsec);
- if ((fab_id == PSC_FAB_ID) || (fab_id == FSI_FAB_ID))
+ if (!cbb->fabric->fab_list[fab_id].is_lookup)
return;
- if (slave_id >= cbb->fabric->max_slaves) {
- tegra_cbb_print_err(file, "\t Invalid slave_id:%d\n", slave_id);
- return;
- }
-
+ /*
+ * If is_lookup field is set in fabric_lookup table of soc data, it
+ * means that address lookup of target is supported for Timeout errors.
+ * If is_lookup is set and the target_map is not populated making
+ * max_targets as zero, then it means HW lookup is to be performed.
+ */
if (!strcmp(cbb->fabric->errors[cbb->type].code, "TIMEOUT_ERR")) {
- tegra234_lookup_slave_timeout(file, cbb, slave_id, fab_id);
- return;
+ if (cbb->fabric->fab_list[fab_id].max_targets)
+ tegra234_sw_lookup_target_timeout(file, cbb, target_id, fab_id);
+ else
+ tegra234_hw_lookup_target_timeout(file, cbb, target_id, fab_id);
}
- tegra_cbb_print_err(file, "\t Slave\t\t\t: %s\n", cbb->fabric->slave_map[slave_id].name);
+ return;
}
static int print_errmonX_info(struct seq_file *file, struct tegra234_cbb *cbb)
{
u32 overflow, status, error;
- status = readl(cbb->mon + FABRIC_MN_MASTER_ERR_STATUS_0);
+ status = readl(cbb->mon + FABRIC_MN_INITIATOR_ERR_STATUS_0);
if (!status) {
pr_err("Error Notifier received a spurious notification\n");
return -ENODATA;
@@ -460,11 +545,11 @@ static int print_errmonX_info(struct seq_file *file, struct tegra234_cbb *cbb)
return -EINVAL;
}
- overflow = readl(cbb->mon + FABRIC_MN_MASTER_ERR_OVERFLOW_STATUS_0);
+ overflow = readl(cbb->mon + FABRIC_MN_INITIATOR_ERR_OVERFLOW_STATUS_0);
tegra234_cbb_print_error(file, cbb, status, overflow);
- error = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ERR_STATUS_0);
+ error = readl(cbb->mon + FABRIC_MN_INITIATOR_LOG_ERR_STATUS_0);
if (!error) {
pr_info("Error Monitor doesn't have Error Logger\n");
return -EINVAL;
@@ -476,15 +561,15 @@ static int print_errmonX_info(struct seq_file *file, struct tegra234_cbb *cbb)
if (error & BIT(0)) {
u32 hi, lo;
- hi = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ADDR_HIGH_0);
- lo = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ADDR_LOW_0);
+ hi = readl(cbb->mon + FABRIC_MN_INITIATOR_LOG_ADDR_HIGH_0);
+ lo = readl(cbb->mon + FABRIC_MN_INITIATOR_LOG_ADDR_LOW_0);
cbb->access = (u64)hi << 32 | lo;
- cbb->mn_attr0 = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ATTRIBUTES0_0);
- cbb->mn_attr1 = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ATTRIBUTES1_0);
- cbb->mn_attr2 = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ATTRIBUTES2_0);
- cbb->mn_user_bits = readl(cbb->mon + FABRIC_MN_MASTER_LOG_USER_BITS0_0);
+ cbb->mn_attr0 = readl(cbb->mon + FABRIC_MN_INITIATOR_LOG_ATTRIBUTES0_0);
+ cbb->mn_attr1 = readl(cbb->mon + FABRIC_MN_INITIATOR_LOG_ATTRIBUTES1_0);
+ cbb->mn_attr2 = readl(cbb->mon + FABRIC_MN_INITIATOR_LOG_ATTRIBUTES2_0);
+ cbb->mn_user_bits = readl(cbb->mon + FABRIC_MN_INITIATOR_LOG_USER_BITS0_0);
print_errlog_err(file, cbb);
}
@@ -503,7 +588,7 @@ static int print_err_notifier(struct seq_file *file, struct tegra234_cbb *cbb, u
pr_crit("**************************************\n");
pr_crit("CPU:%d, Error:%s, Errmon:%d\n", smp_processor_id(),
- cbb->fabric->name, status);
+ cbb->fabric->fab_list[cbb->fabric->fab_id].name, status);
while (status) {
if (status & BIT(0)) {
@@ -526,13 +611,13 @@ static int print_err_notifier(struct seq_file *file, struct tegra234_cbb *cbb, u
tegra234_cbb_error_clear(&cbb->base);
if (err)
return err;
+ tegra_cbb_print_err(file, "\t**************************************\n");
}
status >>= 1;
index++;
}
- tegra_cbb_print_err(file, "\t**************************************\n");
return 0;
}
@@ -581,7 +666,8 @@ static irqreturn_t tegra234_cbb_isr(int irq, void *data)
if (status && (irq == priv->sec_irq)) {
tegra_cbb_print_err(NULL, "CPU:%d, Error: %s@0x%llx, irq=%d\n",
- smp_processor_id(), priv->fabric->name,
+ smp_processor_id(),
+ priv->fabric->fab_list[priv->fabric->fab_id].name,
priv->res->start, irq);
err = print_err_notifier(NULL, priv, status);
@@ -589,7 +675,7 @@ static irqreturn_t tegra234_cbb_isr(int irq, void *data)
goto unlock;
/*
- * If illegal request is from CCPLEX(id:0x1) master then call WARN()
+ * If illegal request is from CCPLEX(id:0x1) initiator then call WARN()
*/
if (priv->fabric->off_mask_erd) {
mstr_id = FIELD_GET(USRBITS_MSTR_ID, priv->mn_user_bits);
@@ -641,7 +727,7 @@ static const struct tegra_cbb_ops tegra234_cbb_ops = {
#endif
};
-static const char * const tegra234_master_id[] = {
+static const char * const tegra234_initiator_id[] = {
[0x00] = "TZ",
[0x01] = "CCPLEX",
[0x02] = "CCPMU",
@@ -672,8 +758,8 @@ static const char * const tegra234_master_id[] = {
static const struct tegra_cbb_error tegra234_cbb_errors[] = {
{
- .code = "SLAVE_ERR",
- .desc = "Slave being accessed responded with an error"
+ .code = "TARGET_ERR",
+ .desc = "Target being accessed responded with an error"
}, {
.code = "DECODE_ERR",
.desc = "Attempt to access an address hole"
@@ -682,37 +768,24 @@ static const struct tegra_cbb_error tegra234_cbb_errors[] = {
.desc = "Attempt to access a region which is firewall protected"
}, {
.code = "TIMEOUT_ERR",
- .desc = "No response returned by slave"
+ .desc = "No response returned by target"
}, {
.code = "PWRDOWN_ERR",
.desc = "Attempt to access a portion of fabric that is powered down"
}, {
.code = "UNSUPPORTED_ERR",
- .desc = "Attempt to access a slave through an unsupported access"
+ .desc = "Attempt to access a target through an unsupported access"
}
};
-static const struct tegra234_slave_lookup tegra234_aon_slave_map[] = {
+static const struct tegra234_target_lookup tegra234_aon_target_map[] = {
{ "AXI2APB", 0x00000 },
{ "AST", 0x14000 },
{ "CBB", 0x15000 },
{ "CPU", 0x16000 },
};
-static const struct tegra234_cbb_fabric tegra234_aon_fabric = {
- .name = "aon-fabric",
- .master_id = tegra234_master_id,
- .slave_map = tegra234_aon_slave_map,
- .max_slaves = ARRAY_SIZE(tegra234_aon_slave_map),
- .errors = tegra234_cbb_errors,
- .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
- .notifier_offset = 0x17000,
- .firewall_base = 0x30000,
- .firewall_ctl = 0x8d0,
- .firewall_wr_ctl = 0x8c8,
-};
-
-static const struct tegra234_slave_lookup tegra234_bpmp_slave_map[] = {
+static const struct tegra234_target_lookup tegra234_bpmp_target_map[] = {
{ "AXI2APB", 0x00000 },
{ "AST0", 0x15000 },
{ "AST1", 0x16000 },
@@ -720,20 +793,16 @@ static const struct tegra234_slave_lookup tegra234_bpmp_slave_map[] = {
{ "CPU", 0x18000 },
};
-static const struct tegra234_cbb_fabric tegra234_bpmp_fabric = {
- .name = "bpmp-fabric",
- .master_id = tegra234_master_id,
- .slave_map = tegra234_bpmp_slave_map,
- .max_slaves = ARRAY_SIZE(tegra234_bpmp_slave_map),
- .errors = tegra234_cbb_errors,
- .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
- .notifier_offset = 0x19000,
- .firewall_base = 0x30000,
- .firewall_ctl = 0x8f0,
- .firewall_wr_ctl = 0x8e8,
+static const struct tegra234_target_lookup tegra234_common_target_map[] = {
+ { "AXI2APB", 0x00000 },
+ { "AST0", 0x15000 },
+ { "AST1", 0x16000 },
+ { "CBB", 0x17000 },
+ { "RSVD", 0x00000 },
+ { "CPU", 0x18000 },
};
-static const struct tegra234_slave_lookup tegra234_cbb_slave_map[] = {
+static const struct tegra234_target_lookup tegra234_cbb_target_map[] = {
{ "AON", 0x40000 },
{ "BPMP", 0x41000 },
{ "CBB", 0x42000 },
@@ -797,13 +866,65 @@ static const struct tegra234_slave_lookup tegra234_cbb_slave_map[] = {
{ "AXI2APB_3", 0x91000 },
};
+static const struct tegra234_fabric_lookup tegra234_cbb_fab_list[] = {
+ [T234_CBB_FABRIC_ID] = { "cbb-fabric", true,
+ tegra234_cbb_target_map,
+ ARRAY_SIZE(tegra234_cbb_target_map) },
+ [T234_SCE_FABRIC_ID] = { "sce-fabric", true,
+ tegra234_common_target_map,
+ ARRAY_SIZE(tegra234_common_target_map) },
+ [T234_RCE_FABRIC_ID] = { "rce-fabric", true,
+ tegra234_common_target_map,
+ ARRAY_SIZE(tegra234_common_target_map) },
+ [T234_DCE_FABRIC_ID] = { "dce-fabric", true,
+ tegra234_common_target_map,
+ ARRAY_SIZE(tegra234_common_target_map) },
+ [T234_AON_FABRIC_ID] = { "aon-fabric", true,
+ tegra234_aon_target_map,
+ ARRAY_SIZE(tegra234_bpmp_target_map) },
+ [T234_PSC_FABRIC_ID] = { "psc-fabric" },
+ [T234_BPMP_FABRIC_ID] = { "bpmp-fabric", true,
+ tegra234_bpmp_target_map,
+ ARRAY_SIZE(tegra234_bpmp_target_map) },
+ [T234_FSI_FABRIC_ID] = { "fsi-fabric" },
+};
+
+static const struct tegra234_cbb_fabric tegra234_aon_fabric = {
+ .fab_id = T234_AON_FABRIC_ID,
+ .fab_list = tegra234_cbb_fab_list,
+ .initiator_id = tegra234_initiator_id,
+ .errors = tegra234_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .err_intr_enbl = 0x7,
+ .err_status_clr = 0x3f,
+ .notifier_offset = 0x17000,
+ .firewall_base = 0x30000,
+ .firewall_ctl = 0x8d0,
+ .firewall_wr_ctl = 0x8c8,
+};
+
+static const struct tegra234_cbb_fabric tegra234_bpmp_fabric = {
+ .fab_id = T234_BPMP_FABRIC_ID,
+ .fab_list = tegra234_cbb_fab_list,
+ .initiator_id = tegra234_initiator_id,
+ .errors = tegra234_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .err_intr_enbl = 0xf,
+ .err_status_clr = 0x3f,
+ .notifier_offset = 0x19000,
+ .firewall_base = 0x30000,
+ .firewall_ctl = 0x8f0,
+ .firewall_wr_ctl = 0x8e8,
+};
+
static const struct tegra234_cbb_fabric tegra234_cbb_fabric = {
- .name = "cbb-fabric",
- .master_id = tegra234_master_id,
- .slave_map = tegra234_cbb_slave_map,
- .max_slaves = ARRAY_SIZE(tegra234_cbb_slave_map),
+ .fab_id = T234_CBB_FABRIC_ID,
+ .fab_list = tegra234_cbb_fab_list,
+ .initiator_id = tegra234_initiator_id,
.errors = tegra234_cbb_errors,
.max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .err_intr_enbl = 0x7f,
+ .err_status_clr = 0x3f,
.notifier_offset = 0x60000,
.off_mask_erd = 0x3a004,
.firewall_base = 0x10000,
@@ -811,22 +932,14 @@ static const struct tegra234_cbb_fabric tegra234_cbb_fabric = {
.firewall_wr_ctl = 0x23e8,
};
-static const struct tegra234_slave_lookup tegra234_common_slave_map[] = {
- { "AXI2APB", 0x00000 },
- { "AST0", 0x15000 },
- { "AST1", 0x16000 },
- { "CBB", 0x17000 },
- { "RSVD", 0x00000 },
- { "CPU", 0x18000 },
-};
-
static const struct tegra234_cbb_fabric tegra234_dce_fabric = {
- .name = "dce-fabric",
- .master_id = tegra234_master_id,
- .slave_map = tegra234_common_slave_map,
- .max_slaves = ARRAY_SIZE(tegra234_common_slave_map),
+ .fab_id = T234_DCE_FABRIC_ID,
+ .fab_list = tegra234_cbb_fab_list,
+ .initiator_id = tegra234_initiator_id,
.errors = tegra234_cbb_errors,
.max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .err_intr_enbl = 0xf,
+ .err_status_clr = 0x3f,
.notifier_offset = 0x19000,
.firewall_base = 0x30000,
.firewall_ctl = 0x290,
@@ -834,12 +947,13 @@ static const struct tegra234_cbb_fabric tegra234_dce_fabric = {
};
static const struct tegra234_cbb_fabric tegra234_rce_fabric = {
- .name = "rce-fabric",
- .master_id = tegra234_master_id,
- .slave_map = tegra234_common_slave_map,
- .max_slaves = ARRAY_SIZE(tegra234_common_slave_map),
+ .fab_id = T234_RCE_FABRIC_ID,
+ .fab_list = tegra234_cbb_fab_list,
+ .initiator_id = tegra234_initiator_id,
.errors = tegra234_cbb_errors,
.max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .err_intr_enbl = 0xf,
+ .err_status_clr = 0x3f,
.notifier_offset = 0x19000,
.firewall_base = 0x30000,
.firewall_ctl = 0x290,
@@ -847,19 +961,20 @@ static const struct tegra234_cbb_fabric tegra234_rce_fabric = {
};
static const struct tegra234_cbb_fabric tegra234_sce_fabric = {
- .name = "sce-fabric",
- .master_id = tegra234_master_id,
- .slave_map = tegra234_common_slave_map,
- .max_slaves = ARRAY_SIZE(tegra234_common_slave_map),
+ .fab_id = T234_SCE_FABRIC_ID,
+ .fab_list = tegra234_cbb_fab_list,
+ .initiator_id = tegra234_initiator_id,
.errors = tegra234_cbb_errors,
.max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .err_intr_enbl = 0xf,
+ .err_status_clr = 0x3f,
.notifier_offset = 0x19000,
.firewall_base = 0x30000,
.firewall_ctl = 0x290,
.firewall_wr_ctl = 0x288,
};
-static const char * const tegra241_master_id[] = {
+static const char * const tegra241_initiator_id[] = {
[0x0] = "TZ",
[0x1] = "CCPLEX",
[0x2] = "CCPMU",
@@ -877,22 +992,22 @@ static const char * const tegra241_master_id[] = {
};
/*
- * Possible causes for Slave and Timeout errors.
- * SLAVE_ERR:
- * Slave being accessed responded with an error. Slave could return
+ * Possible causes for Target and Timeout errors.
+ * TARGET_ERR:
+ * Target being accessed responded with an error. Target could return
* an error for various cases :
* Unsupported access, clamp setting when power gated, register
- * level firewall(SCR), address hole within the slave, etc
+ * level firewall(SCR), address hole within the target, etc
*
* TIMEOUT_ERR:
- * No response returned by slave. Can be due to slave being clock
- * gated, under reset, powered down or slave inability to respond
- * for an internal slave issue
+ * No response returned by target. Can be due to target being clock
+ * gated, under reset, powered down or target inability to respond
+ * for an internal target issue
*/
static const struct tegra_cbb_error tegra241_cbb_errors[] = {
{
- .code = "SLAVE_ERR",
- .desc = "Slave being accessed responded with an error."
+ .code = "TARGET_ERR",
+ .desc = "Target being accessed responded with an error."
}, {
.code = "DECODE_ERR",
.desc = "Attempt to access an address hole or Reserved region of memory."
@@ -901,16 +1016,16 @@ static const struct tegra_cbb_error tegra241_cbb_errors[] = {
.desc = "Attempt to access a region which is firewalled."
}, {
.code = "TIMEOUT_ERR",
- .desc = "No response returned by slave."
+ .desc = "No response returned by target."
}, {
.code = "PWRDOWN_ERR",
.desc = "Attempt to access a portion of the fabric that is powered down."
}, {
.code = "UNSUPPORTED_ERR",
- .desc = "Attempt to access a slave through an unsupported access."
+ .desc = "Attempt to access a target through an unsupported access."
}, {
.code = "POISON_ERR",
- .desc = "Slave responds with poison error to indicate error in data."
+ .desc = "Target responds with poison error to indicate error in data."
}, {
.code = "RSVD"
}, {
@@ -968,7 +1083,18 @@ static const struct tegra_cbb_error tegra241_cbb_errors[] = {
},
};
-static const struct tegra234_slave_lookup tegra241_cbb_slave_map[] = {
+static const struct tegra234_target_lookup tegra241_bpmp_target_map[] = {
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "CBB", 0x15000 },
+ { "CPU", 0x16000 },
+ { "AXI2APB", 0x00000 },
+ { "DBB0", 0x17000 },
+ { "DBB1", 0x18000 },
+};
+
+static const struct tegra234_target_lookup tegra241_cbb_target_map[] = {
{ "RSVD", 0x00000 },
{ "PCIE_C8", 0x51000 },
{ "PCIE_C9", 0x52000 },
@@ -1030,13 +1156,20 @@ static const struct tegra234_slave_lookup tegra241_cbb_slave_map[] = {
{ "AXI2APB_32", 0x8F000 },
};
+static const struct tegra234_fabric_lookup tegra241_cbb_fab_list[] = {
+ [T234_CBB_FABRIC_ID] = { "cbb-fabric", true,
+ tegra241_cbb_target_map, ARRAY_SIZE(tegra241_cbb_target_map) },
+ [T234_BPMP_FABRIC_ID] = { "bpmp-fabric", true,
+ tegra241_bpmp_target_map, ARRAY_SIZE(tegra241_cbb_target_map) },
+};
static const struct tegra234_cbb_fabric tegra241_cbb_fabric = {
- .name = "cbb-fabric",
- .master_id = tegra241_master_id,
- .slave_map = tegra241_cbb_slave_map,
- .max_slaves = ARRAY_SIZE(tegra241_cbb_slave_map),
+ .fab_id = T234_CBB_FABRIC_ID,
+ .fab_list = tegra241_cbb_fab_list,
+ .initiator_id = tegra241_initiator_id,
.errors = tegra241_cbb_errors,
.max_errors = ARRAY_SIZE(tegra241_cbb_errors),
+ .err_intr_enbl = 0x7,
+ .err_status_clr = 0x1ff007f,
.notifier_offset = 0x60000,
.off_mask_erd = 0x40004,
.firewall_base = 0x20000,
@@ -1044,30 +1177,302 @@ static const struct tegra234_cbb_fabric tegra241_cbb_fabric = {
.firewall_wr_ctl = 0x2368,
};
-static const struct tegra234_slave_lookup tegra241_bpmp_slave_map[] = {
- { "RSVD", 0x00000 },
- { "RSVD", 0x00000 },
- { "RSVD", 0x00000 },
- { "CBB", 0x15000 },
- { "CPU", 0x16000 },
- { "AXI2APB", 0x00000 },
- { "DBB0", 0x17000 },
- { "DBB1", 0x18000 },
-};
-
static const struct tegra234_cbb_fabric tegra241_bpmp_fabric = {
- .name = "bpmp-fabric",
- .master_id = tegra241_master_id,
- .slave_map = tegra241_bpmp_slave_map,
- .max_slaves = ARRAY_SIZE(tegra241_bpmp_slave_map),
+ .fab_id = T234_BPMP_FABRIC_ID,
+ .fab_list = tegra241_cbb_fab_list,
+ .initiator_id = tegra241_initiator_id,
.errors = tegra241_cbb_errors,
.max_errors = ARRAY_SIZE(tegra241_cbb_errors),
+ .err_intr_enbl = 0xf,
+ .err_status_clr = 0x1ff007f,
.notifier_offset = 0x19000,
.firewall_base = 0x30000,
.firewall_ctl = 0x8f0,
.firewall_wr_ctl = 0x8e8,
};
+static const char * const tegra264_initiator_id[] = {
+ [0x0] = "TZ",
+ [0x1] = "CCPLEX",
+ [0x2] = "ISC",
+ [0x3] = "BPMP_FW",
+ [0x4] = "AON",
+ [0x5] = "MSS_SEQ",
+ [0x6] = "GPCDMA_P",
+ [0x7] = "TSECA_NONSECURE",
+ [0x8] = "TSECA_LIGHTSECURE",
+ [0x9] = "TSECA_HEAVYSECURE",
+ [0xa] = "CORESIGHT",
+ [0xb] = "APE_0",
+ [0xc] = "APE_1",
+ [0xd] = "PEATRANS",
+ [0xe] = "JTAGM_DFT",
+ [0xf] = "RCE",
+ [0x10] = "DCE",
+ [0x11] = "PSC_FW_USER",
+ [0x12] = "PSC_FW_SUPERVISOR",
+ [0x13] = "PSC_FW_MACHINE",
+ [0x14] = "PSC_BOOT",
+ [0x15] = "BPMP_BOOT",
+ [0x16] = "GPU_0",
+ [0x17] = "GPU_1",
+ [0x18] = "GPU_2",
+ [0x19] = "GPU_3",
+ [0x1a] = "GPU_4",
+ [0x1b] = "PSC_EXT_BOOT",
+ [0x1c] = "PSC_EXT_RUNTIME",
+ [0x1d] = "OESP_EXT",
+ [0x1e] = "SB_EXT",
+ [0x1f] = "FSI_SAFETY_0",
+ [0x20] = "FSI_SAFETY_1",
+ [0x21] = "FSI_SAFETY_2",
+ [0x22] = "FSI_SAFETY_3",
+ [0x23] = "FSI_CHSM",
+ [0x24] = "RCE_1",
+ [0x25] = "BPMP_OEM_FW",
+ [0x26 ... 0x3d] = "RSVD",
+ [0x3e] = "CBB_SMN",
+ [0x3f] = "CBB_RSVD"
+};
+
+static const struct tegra234_target_lookup tegra264_top0_cbb_target_map[] = {
+ { "RSVD", 0x000000 },
+ { "CBB_CENTRAL", 0xC020000 },
+ { "AXI2APB_1", 0x80000 },
+ { "AXI2APB_10", 0x81000 },
+ { "AXI2APB_11", 0x82000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "AXI2APB_14", 0x83000 },
+ { "AXI2APB_15", 0x84000 },
+ { "AXI2APB_16", 0x85000 },
+ { "AXI2APB_17", 0x86000 },
+ { "AXI2APB_2", 0x87000 },
+ { "AXI2APB_3", 0x88000 },
+ { "RSVD", 0x00000 },
+ { "AXI2APB_5", 0x8A000 },
+ { "AXI2APB_6", 0x8B000 },
+ { "AXI2APB_7", 0x8C000 },
+ { "AXI2APB_8", 0x8D000 },
+ { "AXI2APB_9", 0x8E000 },
+ { "FSI_SLAVE", 0x64000 },
+ { "DISP_USB_CBB_T", 0x65000 },
+ { "SYSTEM_CBB_T", 0x66000 },
+ { "UPHY0_CBB_T", 0x67000 },
+ { "VISION_CBB_T", 0x68000 },
+ { "CCPLEX_SLAVE", 0x69000 },
+ { "PCIE_C0", 0x6A000 },
+ { "SMN_UCF_RX_0", 0x6B000 },
+ { "SMN_UCF_RX_1", 0x6C000 },
+ { "AXI2APB_4", 0x89000 },
+};
+
+static const struct tegra234_target_lookup tegra264_sys_cbb_target_map[] = {
+ { "RSVD", 0x00000 },
+ { "AXI2APB_1", 0xE1000 },
+ { "RSVD", 0x00000 },
+ { "AON_SLAVE", 0x79000 },
+ { "APE_SLAVE", 0x73000 },
+ { "BPMP_SLAVE", 0x74000 },
+ { "OESP_SLAVE", 0x75000 },
+ { "PSC_SLAVE", 0x76000 },
+ { "SB_SLAVE", 0x7A000 },
+ { "SMN_SYSTEM_RX", 0x7B000 },
+ { "STM", 0x77000 },
+ { "RSVD", 0x00000 },
+ { "AXI2APB_3", 0xE3000 },
+ { "TOP_CBB_T", 0x7C000 },
+ { "AXI2APB_2", 0xE4000 },
+ { "AXI2APB_4", 0xE5000 },
+ { "AXI2APB_5", 0xE6000 },
+};
+
+static const struct tegra234_target_lookup tegra264_uphy0_cbb_target_map[] = {
+ [0 ... 20] = { "RSVD", 0x00000 },
+ { "AXI2APB_1", 0x71000 },
+ { "RSVD", 0x00000 },
+ { "AXI2APB_3", 0x75000 },
+ { "SMN_UPHY0_RX", 0x53000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "PCIE_C4", 0x4B000 },
+ { "AXI2APB_2", 0x74000 },
+ { "AXI2APB_4", 0x76000 },
+ { "AXI2APB_5", 0x77000 },
+ { "RSVD", 0x00000 },
+ { "AXI2APB_7", 0x79000 },
+ { "PCIE_C2", 0x56000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "PCIE_C1", 0x55000 },
+ { "RSVD", 0x00000 },
+ { "AXI2APB_10", 0x72000 },
+ { "AXI2APB_11", 0x7C000 },
+ { "AXI2APB_8", 0x7A000 },
+ { "AXI2APB_9", 0x7B000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "PCIE_C5", 0x4E000 },
+ { "PCIE_C3", 0x58000 },
+ { "RSVD", 0x00000 },
+ { "ISC_SLAVE", 0x54000 },
+ { "TOP_CBB_T", 0x57000 },
+ { "AXI2APB_12", 0x7D000 },
+ { "AXI2APB_13", 0x70000 },
+ { "AXI2APB_6", 0x7E000 },
+};
+
+static const struct tegra234_target_lookup tegra264_vision_cbb_target_map[] = {
+ [0 ... 5] = { "RSVD", 0x0 },
+ { "HOST1X", 0x45000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "AXI2APB_2", 0x71000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "SMN_VISION_RX", 0x47000 },
+ [13 ... 19] = { "RSVD", 0x0 },
+ { "RCE_0_SLAVE", 0x4B000 },
+ { "RCE_1_SLAVE", 0x4C000 },
+ { "AXI2APB_1", 0x72000 },
+ { "AXI2APB_3", 0x73000 },
+ { "TOP_CBB_T", 0x4D000 },
+
+};
+
+static const struct tegra234_fabric_lookup tegra264_cbb_fab_list[] = {
+ [T264_SYSTEM_CBB_FABRIC_ID] = { "system-cbb-fabric", true,
+ tegra264_sys_cbb_target_map,
+ ARRAY_SIZE(tegra264_sys_cbb_target_map) },
+ [T264_TOP_0_CBB_FABRIC_ID] = { "top0-cbb-fabric", true,
+ tegra264_top0_cbb_target_map,
+ ARRAY_SIZE(tegra264_top0_cbb_target_map) },
+ [T264_VISION_CBB_FABRIC_ID] = { "vision-cbb-fabric", true,
+ tegra264_vision_cbb_target_map,
+ ARRAY_SIZE(tegra264_vision_cbb_target_map) },
+ [T264_DISP_USB_CBB_FABRIC_ID] = { "disp-usb-cbb-fabric" },
+ [T264_UPHY0_CBB_FABRIC_ID] = { "uphy0-cbb-fabric", true,
+ tegra264_uphy0_cbb_target_map,
+ ARRAY_SIZE(tegra264_uphy0_cbb_target_map) },
+ [T264_AON_FABRIC_ID] = { "aon-fabric" },
+ [T264_PSC_FABRIC_ID] = { "psc-fabric" },
+ [T264_OESP_FABRIC_ID] = { "oesp-fabric" },
+ [T264_APE_FABRIC_ID] = { "ape-fabirc" },
+ [T264_BPMP_FABRIC_ID] = { "bpmp-fabric" },
+ [T264_RCE_0_FABRIC_ID] = { "rce0-fabric" },
+ [T264_RCE_1_FABRIC_ID] = { "rce1-fabric" },
+ [T264_DCE_FABRIC_ID] = { "dce-fabric" },
+ [T264_FSI_FABRIC_ID] = { "fsi-fabric" },
+ [T264_ISC_FABRIC_ID] = { "isc-fabric" },
+ [T264_SB_FABRIC_ID] = { "sb-fabric" },
+ [T264_ISC_CPU_FABRIC_ID] = { "isc-cpu-fabric" },
+};
+
+static const struct tegra234_cbb_fabric tegra264_top0_cbb_fabric = {
+ .fab_id = T264_TOP_0_CBB_FABRIC_ID,
+ .fab_list = tegra264_cbb_fab_list,
+ .initiator_id = tegra264_initiator_id,
+ .errors = tegra241_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra241_cbb_errors),
+ .err_intr_enbl = 0x7,
+ .err_status_clr = 0x1ff007f,
+ .notifier_offset = 0x90000,
+ .off_mask_erd = 0x4a004,
+ .firewall_base = 0x3c0000,
+ .firewall_ctl = 0x5b0,
+ .firewall_wr_ctl = 0x5a8,
+};
+
+static const struct tegra234_cbb_fabric tegra264_sys_cbb_fabric = {
+ .fab_id = T264_SYSTEM_CBB_FABRIC_ID,
+ .fab_list = tegra264_cbb_fab_list,
+ .initiator_id = tegra264_initiator_id,
+ .errors = tegra241_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra241_cbb_errors),
+ .err_intr_enbl = 0xf,
+ .err_status_clr = 0x1ff007f,
+ .notifier_offset = 0x40000,
+ .firewall_base = 0x29c000,
+ .firewall_ctl = 0x170,
+ .firewall_wr_ctl = 0x168,
+};
+
+static const struct tegra234_cbb_fabric tegra264_uphy0_cbb_fabric = {
+ .fab_id = T264_UPHY0_CBB_FABRIC_ID,
+ .fab_list = tegra264_cbb_fab_list,
+ .initiator_id = tegra264_initiator_id,
+ .errors = tegra241_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra241_cbb_errors),
+ .err_intr_enbl = 0x1,
+ .err_status_clr = 0x1ff007f,
+ .notifier_offset = 0x80000,
+ .firewall_base = 0x360000,
+ .firewall_ctl = 0x590,
+ .firewall_wr_ctl = 0x588,
+};
+
+static const struct tegra234_cbb_fabric tegra264_vision_cbb_fabric = {
+ .fab_id = T264_VISION_CBB_FABRIC_ID,
+ .fab_list = tegra264_cbb_fab_list,
+ .initiator_id = tegra264_initiator_id,
+ .errors = tegra241_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra241_cbb_errors),
+ .err_intr_enbl = 0x1,
+ .err_status_clr = 0x1ff007f,
+ .notifier_offset = 0x80000,
+ .firewall_base = 0x290000,
+ .firewall_ctl = 0x5d0,
+ .firewall_wr_ctl = 0x5c8,
+};
+
+static const struct tegra234_fabric_lookup t254_cbb_fab_list[] = {
+ [T254_C2C_FABRIC_ID] = { "c2c-fabric", true },
+ [T254_DISP_CLUSTER_FABRIC_ID] = { "display-cluster-fabric", true },
+ [T254_GPU_FABRIC_ID] = { "gpu-fabric", true },
+};
+
+static const struct tegra234_cbb_fabric t254_c2c_fabric = {
+ .fab_id = T254_C2C_FABRIC_ID,
+ .fab_list = t254_cbb_fab_list,
+ .errors = tegra241_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra241_cbb_errors),
+ .err_intr_enbl = 0xf,
+ .err_status_clr = 0x1ff007f,
+ .notifier_offset = 0x50000,
+ .off_mask_erd = 0x14004,
+ .firewall_base = 0x40000,
+ .firewall_ctl = 0x9b0,
+ .firewall_wr_ctl = 0x9a8,
+};
+
+static const struct tegra234_cbb_fabric t254_disp_fabric = {
+ .fab_id = T254_DISP_CLUSTER_FABRIC_ID,
+ .fab_list = t254_cbb_fab_list,
+ .errors = tegra241_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra241_cbb_errors),
+ .err_intr_enbl = 0x1,
+ .err_status_clr = 0x1ff007f,
+ .notifier_offset = 0x50000,
+ .firewall_base = 0x30000,
+ .firewall_ctl = 0x810,
+ .firewall_wr_ctl = 0x808,
+};
+
+static const struct tegra234_cbb_fabric t254_gpu_fabric = {
+ .fab_id = T254_GPU_FABRIC_ID,
+ .fab_list = t254_cbb_fab_list,
+ .errors = tegra241_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra241_cbb_errors),
+ .err_intr_enbl = 0x1f,
+ .err_status_clr = 0x1ff007f,
+ .notifier_offset = 0x50000,
+ .firewall_base = 0x30000,
+ .firewall_ctl = 0x930,
+ .firewall_wr_ctl = 0x928,
+};
+
static const struct of_device_id tegra234_cbb_dt_ids[] = {
{ .compatible = "nvidia,tegra234-cbb-fabric", .data = &tegra234_cbb_fabric },
{ .compatible = "nvidia,tegra234-aon-fabric", .data = &tegra234_aon_fabric },
@@ -1075,6 +1480,10 @@ static const struct of_device_id tegra234_cbb_dt_ids[] = {
{ .compatible = "nvidia,tegra234-dce-fabric", .data = &tegra234_dce_fabric },
{ .compatible = "nvidia,tegra234-rce-fabric", .data = &tegra234_rce_fabric },
{ .compatible = "nvidia,tegra234-sce-fabric", .data = &tegra234_sce_fabric },
+ { .compatible = "nvidia,tegra264-sys-cbb-fabric", .data = &tegra264_sys_cbb_fabric },
+ { .compatible = "nvidia,tegra264-top0-cbb-fabric", .data = &tegra264_top0_cbb_fabric },
+ { .compatible = "nvidia,tegra264-uphy0-cbb-fabric", .data = &tegra264_uphy0_cbb_fabric },
+ { .compatible = "nvidia,tegra264-vision-cbb-fabric", .data = &tegra264_vision_cbb_fabric },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, tegra234_cbb_dt_ids);
@@ -1088,6 +1497,9 @@ struct tegra234_cbb_acpi_uid {
static const struct tegra234_cbb_acpi_uid tegra234_cbb_acpi_uids[] = {
{ "NVDA1070", "1", &tegra241_cbb_fabric },
{ "NVDA1070", "2", &tegra241_bpmp_fabric },
+ { "NVDA1070", "3", &t254_c2c_fabric },
+ { "NVDA1070", "4", &t254_disp_fabric },
+ { "NVDA1070", "5", &t254_gpu_fabric },
{ },
};
@@ -1176,7 +1588,7 @@ static int __maybe_unused tegra234_cbb_resume_noirq(struct device *dev)
tegra234_cbb_error_enable(&cbb->base);
- dev_dbg(dev, "%s resumed\n", cbb->fabric->name);
+ dev_dbg(dev, "%s resumed\n", cbb->fabric->fab_list[cbb->fabric->fab_id].name);
return 0;
}
diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
index dff6d5ef4e46..d82b7670abb7 100644
--- a/drivers/soc/tegra/common.c
+++ b/drivers/soc/tegra/common.c
@@ -27,17 +27,7 @@ static const struct of_device_id tegra_machine_match[] = {
bool soc_is_tegra(void)
{
- const struct of_device_id *match;
- struct device_node *root;
-
- root = of_find_node_by_path("/");
- if (!root)
- return false;
-
- match = of_match_node(tegra_machine_match, root);
- of_node_put(root);
-
- return match != NULL;
+ return of_machine_device_match(tegra_machine_match);
}
static int tegra_core_dev_init_opp_state(struct device *dev)
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index a2c28f493a75..74d2fedea71c 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -3,11 +3,13 @@
* Copyright (c) 2013-2023, NVIDIA CORPORATION. All rights reserved.
*/
+#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/kobject.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
@@ -113,6 +115,28 @@ static void tegra_fuse_restore(void *base)
fuse->clk = NULL;
}
+static void tegra_fuse_print_sku_info(struct tegra_sku_info *tegra_sku_info)
+{
+ pr_info("Tegra Revision: %s SKU: %d CPU Process: %d SoC Process: %d\n",
+ tegra_revision_name[tegra_sku_info->revision],
+ tegra_sku_info->sku_id, tegra_sku_info->cpu_process_id,
+ tegra_sku_info->soc_process_id);
+ pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n",
+ tegra_sku_info->cpu_speedo_id, tegra_sku_info->soc_speedo_id);
+}
+
+static int tegra_fuse_add_lookups(struct tegra_fuse *fuse)
+{
+ fuse->lookups = kmemdup_array(fuse->soc->lookups, fuse->soc->num_lookups,
+ sizeof(*fuse->lookups), GFP_KERNEL);
+ if (!fuse->lookups)
+ return -ENOMEM;
+
+ nvmem_add_cell_lookups(fuse->lookups, fuse->soc->num_lookups);
+
+ return 0;
+}
+
static int tegra_fuse_probe(struct platform_device *pdev)
{
void __iomem *base = fuse->base;
@@ -130,15 +154,44 @@ static int tegra_fuse_probe(struct platform_device *pdev)
return PTR_ERR(fuse->base);
fuse->phys = res->start;
- fuse->clk = devm_clk_get(&pdev->dev, "fuse");
- if (IS_ERR(fuse->clk)) {
- if (PTR_ERR(fuse->clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
- PTR_ERR(fuse->clk));
+ /* Initialize the soc data and lookups if using ACPI boot. */
+ if (is_acpi_node(dev_fwnode(&pdev->dev)) && !fuse->soc) {
+ u8 chip;
- return PTR_ERR(fuse->clk);
+ tegra_acpi_init_apbmisc();
+
+ chip = tegra_get_chip_id();
+ switch (chip) {
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
+ case TEGRA194:
+ fuse->soc = &tegra194_fuse_soc;
+ break;
+#endif
+#if defined(CONFIG_ARCH_TEGRA_234_SOC)
+ case TEGRA234:
+ fuse->soc = &tegra234_fuse_soc;
+ break;
+#endif
+#if defined(CONFIG_ARCH_TEGRA_241_SOC)
+ case TEGRA241:
+ fuse->soc = &tegra241_fuse_soc;
+ break;
+#endif
+ default:
+ return dev_err_probe(&pdev->dev, -EINVAL, "Unsupported SoC: %02x\n", chip);
+ }
+
+ fuse->soc->init(fuse);
+
+ err = tegra_fuse_add_lookups(fuse);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "failed to add FUSE lookups\n");
}
+ fuse->clk = devm_clk_get_optional(&pdev->dev, "fuse");
+ if (IS_ERR(fuse->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(fuse->clk), "failed to get FUSE clock\n");
+
platform_set_drvdata(pdev, fuse);
fuse->dev = &pdev->dev;
@@ -179,12 +232,8 @@ static int tegra_fuse_probe(struct platform_device *pdev)
}
fuse->rst = devm_reset_control_get_optional(&pdev->dev, "fuse");
- if (IS_ERR(fuse->rst)) {
- err = PTR_ERR(fuse->rst);
- dev_err(&pdev->dev, "failed to get FUSE reset: %pe\n",
- fuse->rst);
- return err;
- }
+ if (IS_ERR(fuse->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(fuse->rst), "failed to get FUSE reset\n");
/*
* FUSE clock is enabled at a boot time, hence this resume/suspend
@@ -262,10 +311,17 @@ static const struct dev_pm_ops tegra_fuse_pm = {
SET_SYSTEM_SLEEP_PM_OPS(tegra_fuse_suspend, tegra_fuse_resume)
};
+static const struct acpi_device_id tegra_fuse_acpi_match[] = {
+ { "NVDA200F" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, tegra_fuse_acpi_match);
+
static struct platform_driver tegra_fuse_driver = {
.driver = {
.name = "tegra-fuse",
.of_match_table = tegra_fuse_match,
+ .acpi_match_table = tegra_fuse_acpi_match,
.pm = &tegra_fuse_pm,
.suppress_bind_attrs = true,
},
@@ -287,7 +343,16 @@ u32 __init tegra_fuse_read_early(unsigned int offset)
int tegra_fuse_readl(unsigned long offset, u32 *value)
{
- if (!fuse->read || !fuse->clk)
+ if (!fuse->dev)
+ return -EPROBE_DEFER;
+
+ /*
+ * Wait for fuse->clk to be initialized if device-tree boot is used.
+ */
+ if (is_of_node(dev_fwnode(fuse->dev)) && !fuse->clk)
+ return -EPROBE_DEFER;
+
+ if (!fuse->read)
return -EPROBE_DEFER;
if (IS_ERR(fuse->clk))
@@ -343,7 +408,8 @@ const struct attribute_group tegra_soc_attr_group = {
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
- IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_241_SOC)
static ssize_t platform_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -370,7 +436,7 @@ const struct attribute_group tegra194_soc_attr_group = {
};
#endif
-struct device * __init tegra_soc_device_register(void)
+struct device *tegra_soc_device_register(void)
{
struct soc_device_attribute *attr;
struct soc_device *dev;
@@ -407,6 +473,7 @@ static int __init tegra_init_fuse(void)
const struct of_device_id *match;
struct device_node *np;
struct resource regs;
+ int err;
tegra_init_apbmisc();
@@ -497,22 +564,13 @@ static int __init tegra_init_fuse(void)
fuse->soc->init(fuse);
- pr_info("Tegra Revision: %s SKU: %d CPU Process: %d SoC Process: %d\n",
- tegra_revision_name[tegra_sku_info.revision],
- tegra_sku_info.sku_id, tegra_sku_info.cpu_process_id,
- tegra_sku_info.soc_process_id);
- pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n",
- tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id);
+ tegra_fuse_print_sku_info(&tegra_sku_info);
- if (fuse->soc->lookups) {
- size_t size = sizeof(*fuse->lookups) * fuse->soc->num_lookups;
-
- fuse->lookups = kmemdup(fuse->soc->lookups, size, GFP_KERNEL);
- if (fuse->lookups)
- nvmem_add_cell_lookups(fuse->lookups, fuse->soc->num_lookups);
- }
+ err = tegra_fuse_add_lookups(fuse);
+ if (err)
+ pr_err("failed to add FUSE lookups\n");
- return 0;
+ return err;
}
early_initcall(tegra_init_fuse);
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index e94d46372a63..524fa1b0cd3d 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -38,7 +38,8 @@
defined(CONFIG_ARCH_TEGRA_210_SOC) || \
defined(CONFIG_ARCH_TEGRA_186_SOC) || \
defined(CONFIG_ARCH_TEGRA_194_SOC) || \
- defined(CONFIG_ARCH_TEGRA_234_SOC)
+ defined(CONFIG_ARCH_TEGRA_234_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_241_SOC)
static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
{
if (WARN_ON(!fuse->base))
@@ -116,6 +117,124 @@ const struct tegra_fuse_soc tegra30_fuse_soc = {
#endif
#ifdef CONFIG_ARCH_TEGRA_114_SOC
+static const struct nvmem_cell_info tegra114_fuse_cells[] = {
+ {
+ .name = "tsensor-cpu1",
+ .offset = 0x084,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu2",
+ .offset = 0x088,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-common",
+ .offset = 0x08c,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu0",
+ .offset = 0x098,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "xusb-pad-calibration",
+ .offset = 0x0f0,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu3",
+ .offset = 0x12c,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-gpu",
+ .offset = 0x154,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-mem0",
+ .offset = 0x158,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-mem1",
+ .offset = 0x15c,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-pllx",
+ .offset = 0x160,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ },
+};
+
+static const struct nvmem_cell_lookup tegra114_fuse_lookups[] = {
+ {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration",
+ .dev_id = "7009f000.padctl",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-common",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "common",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu0",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu1",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu1",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu2",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu2",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu3",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu3",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-mem0",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "mem0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-mem1",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "mem1",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-gpu",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "gpu",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-pllx",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "pllx",
+ },
+};
+
static const struct tegra_fuse_info tegra114_fuse_info = {
.read = tegra30_fuse_read,
.size = 0x2a0,
@@ -126,6 +245,10 @@ const struct tegra_fuse_soc tegra114_fuse_soc = {
.init = tegra30_fuse_init,
.speedo_init = tegra114_init_speedo_data,
.info = &tegra114_fuse_info,
+ .lookups = tegra114_fuse_lookups,
+ .num_lookups = ARRAY_SIZE(tegra114_fuse_lookups),
+ .cells = tegra114_fuse_cells,
+ .num_cells = ARRAY_SIZE(tegra114_fuse_cells),
.soc_attr_group = &tegra_soc_attr_group,
.clk_suspend_on = false,
};
@@ -646,15 +769,20 @@ static const struct nvmem_cell_lookup tegra234_fuse_lookups[] = {
};
static const struct nvmem_keepout tegra234_fuse_keepouts[] = {
- { .start = 0x01c, .end = 0x0c8 },
- { .start = 0x12c, .end = 0x184 },
+ { .start = 0x01c, .end = 0x064 },
+ { .start = 0x084, .end = 0x0a0 },
+ { .start = 0x0a4, .end = 0x0c8 },
+ { .start = 0x12c, .end = 0x164 },
+ { .start = 0x16c, .end = 0x184 },
{ .start = 0x190, .end = 0x198 },
{ .start = 0x1a0, .end = 0x204 },
- { .start = 0x21c, .end = 0x250 },
- { .start = 0x25c, .end = 0x2f0 },
+ { .start = 0x21c, .end = 0x2f0 },
{ .start = 0x310, .end = 0x3d8 },
- { .start = 0x400, .end = 0x4f0 },
- { .start = 0x4f8, .end = 0x7e8 },
+ { .start = 0x400, .end = 0x420 },
+ { .start = 0x444, .end = 0x490 },
+ { .start = 0x4bc, .end = 0x4f0 },
+ { .start = 0x4f8, .end = 0x54c },
+ { .start = 0x57c, .end = 0x7e8 },
{ .start = 0x8d0, .end = 0x8d8 },
{ .start = 0xacc, .end = 0xf00 }
};
@@ -678,3 +806,23 @@ const struct tegra_fuse_soc tegra234_fuse_soc = {
.clk_suspend_on = false,
};
#endif
+
+#if defined(CONFIG_ARCH_TEGRA_241_SOC)
+static const struct tegra_fuse_info tegra241_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x16008,
+ .spare = 0xcf0,
+};
+
+static const struct nvmem_keepout tegra241_fuse_keepouts[] = {
+ { .start = 0xc, .end = 0x1600c }
+};
+
+const struct tegra_fuse_soc tegra241_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .info = &tegra241_fuse_info,
+ .keepouts = tegra241_fuse_keepouts,
+ .num_keepouts = ARRAY_SIZE(tegra241_fuse_keepouts),
+ .soc_attr_group = &tegra194_soc_attr_group,
+};
+#endif
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
index 90f23be73894..9fee6ad6ad9e 100644
--- a/drivers/soc/tegra/fuse/fuse.h
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -69,6 +69,7 @@ struct tegra_fuse {
void tegra_init_revision(void);
void tegra_init_apbmisc(void);
+void tegra_acpi_init_apbmisc(void);
u32 __init tegra_fuse_read_spare(unsigned int spare);
u32 __init tegra_fuse_read_early(unsigned int offset);
@@ -123,7 +124,8 @@ extern const struct tegra_fuse_soc tegra186_fuse_soc;
#endif
#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
- IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_241_SOC)
extern const struct attribute_group tegra194_soc_attr_group;
#endif
@@ -135,4 +137,8 @@ extern const struct tegra_fuse_soc tegra194_fuse_soc;
extern const struct tegra_fuse_soc tegra234_fuse_soc;
#endif
+#ifdef CONFIG_ARCH_TEGRA_241_SOC
+extern const struct tegra_fuse_soc tegra241_fuse_soc;
+#endif
+
#endif
diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c
index 695d0b7f9a8a..06c2bcbee573 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra210.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra210.c
@@ -65,27 +65,52 @@ static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
sku_info->gpu_speedo_id = 0;
*threshold = THRESHOLD_INDEX_0;
- switch (sku) {
- case 0x00: /* Engineering SKU */
- case 0x01: /* Engineering SKU */
- case 0x07:
- case 0x17:
- case 0x27:
- if (speedo_rev >= 2)
+ if (sku_info->revision >= TEGRA_REVISION_A02) {
+ switch (sku) {
+ case 0x00: /* Engineering SKU */
+ case 0x01: /* Engineering SKU */
+ case 0x13:
+ sku_info->cpu_speedo_id = 5;
+ sku_info->gpu_speedo_id = 2;
+ break;
+
+ case 0x07:
+ case 0x17:
+ case 0x1F:
+ sku_info->cpu_speedo_id = 7;
+ sku_info->gpu_speedo_id = 2;
+ break;
+
+ case 0x27:
+ sku_info->cpu_speedo_id = 1;
+ sku_info->gpu_speedo_id = 2;
+ break;
+
+ case 0x83:
+ sku_info->cpu_speedo_id = 3;
+ sku_info->gpu_speedo_id = 3;
+ break;
+
+ case 0x87:
+ sku_info->cpu_speedo_id = 2;
sku_info->gpu_speedo_id = 1;
- break;
-
- case 0x13:
- if (speedo_rev >= 2)
- sku_info->gpu_speedo_id = 1;
-
- sku_info->cpu_speedo_id = 1;
- break;
-
- default:
+ break;
+
+ case 0x8F:
+ sku_info->soc_speedo_id = 2;
+ sku_info->cpu_speedo_id = 9;
+ sku_info->gpu_speedo_id = 2;
+ break;
+
+ default:
+ pr_err("Tegra210: unknown revision 2 or newer SKU %#04x\n", sku);
+ /* Using the default for the error case */
+ break;
+ }
+ } else if (sku == 0x00 || sku == 0x01 || sku == 0x07 || sku == 0x13 || sku == 0x17) {
+ sku_info->gpu_speedo_id = 1;
+ } else {
pr_err("Tegra210: unknown SKU %#04x\n", sku);
- /* Using the default for the error case */
- break;
}
}
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
index da970f3dbf35..0ce94fdc536f 100644
--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -3,9 +3,11 @@
* Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved.
*/
+#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -62,6 +64,7 @@ bool tegra_is_silicon(void)
switch (tegra_get_chip_id()) {
case TEGRA194:
case TEGRA234:
+ case TEGRA241:
case TEGRA264:
if (tegra_get_platform() == 0)
return true;
@@ -125,6 +128,7 @@ static const struct of_device_id apbmisc_match[] __initconst = {
{ .compatible = "nvidia,tegra186-misc", },
{ .compatible = "nvidia,tegra194-misc", },
{ .compatible = "nvidia,tegra234-misc", },
+ { .compatible = "nvidia,tegra264-misc", },
{},
};
@@ -160,9 +164,34 @@ void __init tegra_init_revision(void)
tegra_sku_info.platform = tegra_get_platform();
}
-void __init tegra_init_apbmisc(void)
+static void tegra_init_apbmisc_resources(struct resource *apbmisc,
+ struct resource *straps)
{
void __iomem *strapping_base;
+
+ apbmisc_base = ioremap(apbmisc->start, resource_size(apbmisc));
+ if (apbmisc_base)
+ chipid = readl_relaxed(apbmisc_base + 4);
+ else
+ pr_err("failed to map APBMISC registers\n");
+
+ strapping_base = ioremap(straps->start, resource_size(straps));
+ if (strapping_base) {
+ strapping = readl_relaxed(strapping_base);
+ iounmap(strapping_base);
+ } else {
+ pr_err("failed to map strapping options registers\n");
+ }
+}
+
+/**
+ * tegra_init_apbmisc - Initializes Tegra APBMISC and Strapping registers.
+ *
+ * This is called during early init as some of the old 32-bit ARM code needs
+ * information from the APBMISC registers very early during boot.
+ */
+void __init tegra_init_apbmisc(void)
+{
struct resource apbmisc, straps;
struct device_node *np;
@@ -219,23 +248,73 @@ void __init tegra_init_apbmisc(void)
}
}
- apbmisc_base = ioremap(apbmisc.start, resource_size(&apbmisc));
- if (!apbmisc_base) {
- pr_err("failed to map APBMISC registers\n");
- } else {
- chipid = readl_relaxed(apbmisc_base + 4);
+ tegra_init_apbmisc_resources(&apbmisc, &straps);
+ long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code");
+
+put:
+ of_node_put(np);
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id apbmisc_acpi_match[] = {
+ { "NVDA2010" },
+ { /* sentinel */ }
+};
+
+void tegra_acpi_init_apbmisc(void)
+{
+ struct resource *resources[2] = { NULL };
+ struct resource_entry *rentry;
+ struct acpi_device *adev = NULL;
+ struct list_head resource_list;
+ int rcount = 0;
+ int ret;
+
+ adev = acpi_dev_get_first_match_dev(apbmisc_acpi_match[0].id, NULL, -1);
+ if (!adev)
+ return;
+
+ INIT_LIST_HEAD(&resource_list);
+
+ ret = acpi_dev_get_memory_resources(adev, &resource_list);
+ if (ret < 0) {
+ pr_err("failed to get APBMISC memory resources");
+ goto out_put_acpi_dev;
}
- strapping_base = ioremap(straps.start, resource_size(&straps));
- if (!strapping_base) {
- pr_err("failed to map strapping options registers\n");
- } else {
- strapping = readl_relaxed(strapping_base);
- iounmap(strapping_base);
+ /*
+ * Get required memory resources.
+ *
+ * resources[0]: apbmisc.
+ * resources[1]: straps.
+ */
+ resource_list_for_each_entry(rentry, &resource_list) {
+ if (rcount >= ARRAY_SIZE(resources))
+ break;
+
+ resources[rcount++] = rentry->res;
}
- long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code");
+ if (!resources[0]) {
+ pr_err("failed to get APBMISC registers\n");
+ goto out_free_resource_list;
+ }
-put:
- of_node_put(np);
+ if (!resources[1]) {
+ pr_err("failed to get strapping options registers\n");
+ goto out_free_resource_list;
+ }
+
+ tegra_init_apbmisc_resources(resources[0], resources[1]);
+
+out_free_resource_list:
+ acpi_dev_free_resource_list(&resource_list);
+
+out_put_acpi_dev:
+ acpi_dev_put(adev);
+}
+#else
+void tegra_acpi_init_apbmisc(void)
+{
}
+#endif
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 162f52456f65..f3760a3b3026 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -3,7 +3,7 @@
* drivers/soc/tegra/pmc.c
*
* Copyright (c) 2010 Google, Inc
- * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved.
*
* Author:
* Colin Cross <ccross@google.com>
@@ -47,6 +47,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/syscore_ops.h>
#include <soc/tegra/common.h>
@@ -384,6 +385,7 @@ struct tegra_pmc_soc {
bool has_blink_output;
bool has_usb_sleepwalk;
bool supports_core_domain;
+ bool has_single_mmio_aperture;
};
/**
@@ -416,12 +418,12 @@ struct tegra_pmc_soc {
* @irq: chip implementation for the IRQ domain
* @clk_nb: pclk clock changes handler
* @core_domain_state_synced: flag marking the core domain's state as synced
- * @core_domain_registered: flag marking the core domain as registered
* @wake_type_level_map: Bitmap indicating level type for non-dual edge wakes
* @wake_type_dual_edge_map: Bitmap indicating if a wake is dual-edge or not
* @wake_sw_status_map: Bitmap to hold raw status of wakes without mask
* @wake_cntrl_level_map: Bitmap to hold wake levels to be programmed in
* cntrl register associated with each wake during system suspend.
+ * @syscore: syscore suspend/resume callbacks
*/
struct tegra_pmc {
struct device *dev;
@@ -460,13 +462,12 @@ struct tegra_pmc {
struct notifier_block clk_nb;
bool core_domain_state_synced;
- bool core_domain_registered;
unsigned long *wake_type_level_map;
unsigned long *wake_type_dual_edge_map;
unsigned long *wake_sw_status_map;
unsigned long *wake_cntrl_level_map;
- struct syscore_ops syscore;
+ struct syscore syscore;
};
static struct tegra_pmc *pmc = &(struct tegra_pmc) {
@@ -1180,7 +1181,7 @@ static int powergate_show(struct seq_file *s, void *data)
continue;
seq_printf(s, " %9s %7s\n", pmc->soc->powergates[i],
- status ? "yes" : "no");
+ str_yes_no(status));
}
return 0;
@@ -1232,7 +1233,7 @@ err:
}
static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
- struct device_node *np, bool off)
+ struct device_node *np)
{
struct device *dev = pg->pmc->dev;
int err;
@@ -1247,22 +1248,6 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
err = reset_control_acquire(pg->reset);
if (err < 0) {
pr_err("failed to acquire resets: %d\n", err);
- goto out;
- }
-
- if (off) {
- err = reset_control_assert(pg->reset);
- } else {
- err = reset_control_deassert(pg->reset);
- if (err < 0)
- goto out;
-
- reset_control_release(pg->reset);
- }
-
-out:
- if (err) {
- reset_control_release(pg->reset);
reset_control_put(pg->reset);
}
@@ -1295,6 +1280,7 @@ static int tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
pg->id = id;
pg->genpd.name = np->name;
+ pg->genpd.flags = GENPD_FLAG_NO_SYNC_STATE;
pg->genpd.power_off = tegra_genpd_power_off;
pg->genpd.power_on = tegra_genpd_power_on;
pg->pmc = pmc;
@@ -1307,20 +1293,43 @@ static int tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
goto set_available;
}
- err = tegra_powergate_of_get_resets(pg, np, off);
+ err = tegra_powergate_of_get_resets(pg, np);
if (err < 0) {
dev_err(dev, "failed to get resets for %pOFn: %d\n", np, err);
goto remove_clks;
}
- if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
- if (off)
- WARN_ON(tegra_powergate_power_up(pg, true));
+ /*
+ * If the power-domain is off, then ensure the resets are asserted.
+ * If the power-domain is on, then power down to ensure that when is
+ * it turned on the power-domain, clocks and resets are all in the
+ * expected state.
+ */
+ if (off) {
+ err = reset_control_assert(pg->reset);
+ if (err) {
+ pr_err("failed to assert resets: %d\n", err);
+ goto remove_resets;
+ }
+ } else {
+ err = tegra_powergate_power_down(pg);
+ if (err) {
+ dev_err(dev, "failed to turn off PM domain %s: %d\n",
+ pg->genpd.name, err);
+ goto remove_resets;
+ }
+ }
+ /*
+ * If PM_GENERIC_DOMAINS is not enabled, power-on
+ * the domain and skip the genpd registration.
+ */
+ if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
+ WARN_ON(tegra_powergate_power_up(pg, true));
goto remove_resets;
}
- err = pm_genpd_init(&pg->genpd, NULL, off);
+ err = pm_genpd_init(&pg->genpd, NULL, true);
if (err < 0) {
dev_err(dev, "failed to initialise PM domain %pOFn: %d\n", np,
err);
@@ -1393,13 +1402,6 @@ tegra_pmc_core_pd_set_performance_state(struct generic_pm_domain *genpd,
return 0;
}
-static unsigned int
-tegra_pmc_core_pd_opp_to_performance_state(struct generic_pm_domain *genpd,
- struct dev_pm_opp *opp)
-{
- return dev_pm_opp_get_level(opp);
-}
-
static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np)
{
struct generic_pm_domain *genpd;
@@ -1411,8 +1413,8 @@ static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np)
return -ENOMEM;
genpd->name = "core";
+ genpd->flags = GENPD_FLAG_NO_SYNC_STATE;
genpd->set_performance_state = tegra_pmc_core_pd_set_performance_state;
- genpd->opp_to_performance_state = tegra_pmc_core_pd_opp_to_performance_state;
err = devm_pm_opp_set_regulators(pmc->dev, rname);
if (err)
@@ -1431,8 +1433,6 @@ static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np)
goto remove_genpd;
}
- pmc->core_domain_registered = true;
-
return 0;
remove_genpd:
@@ -1445,7 +1445,7 @@ static int tegra_powergate_init(struct tegra_pmc *pmc,
struct device_node *parent)
{
struct of_phandle_args child_args, parent_args;
- struct device_node *np, *child;
+ struct device_node *np;
int err = 0;
/*
@@ -1464,12 +1464,10 @@ static int tegra_powergate_init(struct tegra_pmc *pmc,
if (!np)
return 0;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
err = tegra_powergate_add(pmc, child);
- if (err < 0) {
- of_node_put(child);
+ if (err < 0)
break;
- }
if (of_parse_phandle_with_args(child, "power-domains",
"#power-domain-cells",
@@ -1481,10 +1479,8 @@ static int tegra_powergate_init(struct tegra_pmc *pmc,
err = of_genpd_add_subdomain(&parent_args, &child_args);
of_node_put(parent_args.np);
- if (err) {
- of_node_put(child);
+ if (err)
break;
- }
}
of_node_put(np);
@@ -1785,30 +1781,6 @@ static int tegra_io_pad_get_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id)
return TEGRA_IO_PAD_VOLTAGE_3V3;
}
-/**
- * tegra_io_rail_power_on() - enable power to I/O rail
- * @id: Tegra I/O pad ID for which to enable power
- *
- * See also: tegra_io_pad_power_enable()
- */
-int tegra_io_rail_power_on(unsigned int id)
-{
- return tegra_io_pad_power_enable(id);
-}
-EXPORT_SYMBOL(tegra_io_rail_power_on);
-
-/**
- * tegra_io_rail_power_off() - disable power to I/O rail
- * @id: Tegra I/O pad ID for which to disable power
- *
- * See also: tegra_io_pad_power_disable()
- */
-int tegra_io_rail_power_off(unsigned int id)
-{
- return tegra_io_pad_power_disable(id);
-}
-EXPORT_SYMBOL(tegra_io_rail_power_off);
-
#ifdef CONFIG_PM_SLEEP
enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void)
{
@@ -2534,8 +2506,8 @@ static int tegra_pmc_irq_init(struct tegra_pmc *pmc)
pmc->irq.irq_set_type = pmc->soc->irq_set_type;
pmc->irq.irq_set_wake = pmc->soc->irq_set_wake;
- pmc->domain = irq_domain_add_hierarchy(parent, 0, 96, pmc->dev->of_node,
- &tegra_pmc_irq_domain_ops, pmc);
+ pmc->domain = irq_domain_create_hierarchy(parent, 0, 96, dev_fwnode(pmc->dev),
+ &tegra_pmc_irq_domain_ops, pmc);
if (!pmc->domain) {
dev_err(pmc->dev, "failed to allocate domain\n");
return -ENOMEM;
@@ -2917,31 +2889,36 @@ static int tegra_pmc_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wake");
- if (res) {
- pmc->wake = devm_ioremap_resource(&pdev->dev, res);
+ if (pmc->soc->has_single_mmio_aperture) {
+ pmc->wake = base;
+ pmc->aotag = base;
+ pmc->scratch = base;
+ } else {
+ pmc->wake = devm_platform_ioremap_resource_byname(pdev, "wake");
if (IS_ERR(pmc->wake))
return PTR_ERR(pmc->wake);
- } else {
- pmc->wake = base;
- }
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aotag");
- if (res) {
- pmc->aotag = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pmc->aotag))
- return PTR_ERR(pmc->aotag);
- } else {
- pmc->aotag = base;
- }
+ /* "aotag" is an optional aperture */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "aotag");
+ if (res) {
+ pmc->aotag = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmc->aotag))
+ return PTR_ERR(pmc->aotag);
+ } else {
+ pmc->aotag = NULL;
+ }
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scratch");
- if (res) {
- pmc->scratch = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pmc->scratch))
- return PTR_ERR(pmc->scratch);
- } else {
- pmc->scratch = base;
+ /* "scratch" is an optional aperture */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "scratch");
+ if (res) {
+ pmc->scratch = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmc->scratch))
+ return PTR_ERR(pmc->scratch);
+ } else {
+ pmc->scratch = NULL;
+ }
}
pmc->clk = devm_clk_get_optional(&pdev->dev, "pclk");
@@ -2953,12 +2930,15 @@ static int tegra_pmc_probe(struct platform_device *pdev)
* PMC should be last resort for restarting since it soft-resets
* CPU without resetting everything else.
*/
- err = devm_register_reboot_notifier(&pdev->dev,
- &tegra_pmc_reboot_notifier);
- if (err) {
- dev_err(&pdev->dev, "unable to register reboot notifier, %d\n",
- err);
- return err;
+ if (pmc->scratch) {
+ err = devm_register_reboot_notifier(&pdev->dev,
+ &tegra_pmc_reboot_notifier);
+ if (err) {
+ dev_err(&pdev->dev,
+ "unable to register reboot notifier, %d\n",
+ err);
+ return err;
+ }
}
err = devm_register_sys_off_handler(&pdev->dev,
@@ -3175,7 +3155,7 @@ static void tegra186_pmc_process_wake_events(struct tegra_pmc *pmc, unsigned int
}
}
-static void tegra186_pmc_wake_syscore_resume(void)
+static void tegra186_pmc_wake_syscore_resume(void *data)
{
u32 status, mask;
unsigned int i;
@@ -3188,7 +3168,7 @@ static void tegra186_pmc_wake_syscore_resume(void)
}
}
-static int tegra186_pmc_wake_syscore_suspend(void)
+static int tegra186_pmc_wake_syscore_suspend(void *data)
{
wke_read_sw_wake_status(pmc);
@@ -3207,6 +3187,11 @@ static int tegra186_pmc_wake_syscore_suspend(void)
return 0;
}
+static const struct syscore_ops tegra186_pmc_wake_syscore_ops = {
+ .suspend = tegra186_pmc_wake_syscore_suspend,
+ .resume = tegra186_pmc_wake_syscore_resume,
+};
+
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM)
static int tegra_pmc_suspend(struct device *dev)
{
@@ -3332,6 +3317,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
.num_pmc_clks = 0,
.has_blink_output = true,
.has_usb_sleepwalk = true,
+ .has_single_mmio_aperture = true,
};
static const char * const tegra30_powergates[] = {
@@ -3393,6 +3379,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
.has_usb_sleepwalk = true,
+ .has_single_mmio_aperture = true,
};
static const char * const tegra114_powergates[] = {
@@ -3450,6 +3437,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
.has_usb_sleepwalk = true,
+ .has_single_mmio_aperture = true,
};
static const char * const tegra124_powergates[] = {
@@ -3594,6 +3582,7 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
.has_usb_sleepwalk = true,
+ .has_single_mmio_aperture = true,
};
static const char * const tegra210_powergates[] = {
@@ -3757,6 +3746,7 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
.has_usb_sleepwalk = true,
+ .has_single_mmio_aperture = true,
};
static const struct tegra_io_pad_soc tegra186_io_pads[] = {
@@ -3852,10 +3842,8 @@ static const struct tegra_pmc_regs tegra186_pmc_regs = {
static void tegra186_pmc_init(struct tegra_pmc *pmc)
{
- pmc->syscore.suspend = tegra186_pmc_wake_syscore_suspend;
- pmc->syscore.resume = tegra186_pmc_wake_syscore_resume;
-
- register_syscore_ops(&pmc->syscore);
+ pmc->syscore.ops = &tegra186_pmc_wake_syscore_ops;
+ register_syscore(&pmc->syscore);
}
static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
@@ -3954,6 +3942,7 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = {
.num_pmc_clks = 0,
.has_blink_output = false,
.has_usb_sleepwalk = false,
+ .has_single_mmio_aperture = false,
};
static const struct tegra_io_pad_soc tegra194_io_pads[] = {
@@ -4094,6 +4083,7 @@ static const char * const tegra194_reset_sources[] = {
};
static const struct tegra_wake_event tegra194_wake_events[] = {
+ TEGRA_WAKE_GPIO("eqos", 20, 0, TEGRA194_MAIN_GPIO(G, 4)),
TEGRA_WAKE_IRQ("pmu", 24, 209),
TEGRA_WAKE_GPIO("power", 29, 1, TEGRA194_AON_GPIO(EE, 4)),
TEGRA_WAKE_IRQ("rtc", 73, 10),
@@ -4139,6 +4129,7 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = {
.num_pmc_clks = 0,
.has_blink_output = false,
.has_usb_sleepwalk = false,
+ .has_single_mmio_aperture = false,
};
static const struct tegra_io_pad_soc tegra234_io_pads[] = {
@@ -4228,10 +4219,19 @@ static const char * const tegra234_reset_sources[] = {
};
static const struct tegra_wake_event tegra234_wake_events[] = {
+ TEGRA_WAKE_GPIO("sd-wake", 8, 0, TEGRA234_MAIN_GPIO(G, 7)),
+ TEGRA_WAKE_GPIO("eqos", 20, 0, TEGRA234_MAIN_GPIO(G, 4)),
TEGRA_WAKE_IRQ("pmu", 24, 209),
TEGRA_WAKE_GPIO("power", 29, 1, TEGRA234_AON_GPIO(EE, 4)),
TEGRA_WAKE_GPIO("mgbe", 56, 0, TEGRA234_MAIN_GPIO(Y, 3)),
TEGRA_WAKE_IRQ("rtc", 73, 10),
+ TEGRA_WAKE_IRQ("usb3-port-0", 76, 167),
+ TEGRA_WAKE_IRQ("usb3-port-1", 77, 167),
+ TEGRA_WAKE_IRQ("usb3-port-2-3", 78, 167),
+ TEGRA_WAKE_IRQ("usb2-port-0", 79, 167),
+ TEGRA_WAKE_IRQ("usb2-port-1", 80, 167),
+ TEGRA_WAKE_IRQ("usb2-port-2", 81, 167),
+ TEGRA_WAKE_IRQ("usb2-port-3", 82, 167),
TEGRA_WAKE_IRQ("sw-wake", SW_WAKE_ID, 179),
};
@@ -4267,9 +4267,131 @@ static const struct tegra_pmc_soc tegra234_pmc_soc = {
.pmc_clks_data = NULL,
.num_pmc_clks = 0,
.has_blink_output = false,
+ .has_single_mmio_aperture = false,
+};
+
+static const struct tegra_pmc_regs tegra264_pmc_regs = {
+ .scratch0 = 0x684,
+ .rst_status = 0x4,
+ .rst_source_shift = 0x2,
+ .rst_source_mask = 0x1fc,
+ .rst_level_shift = 0x0,
+ .rst_level_mask = 0x3,
+};
+
+static const char * const tegra264_reset_sources[] = {
+ "SYS_RESET_N", /* 0x0 */
+ "CSDC_RTC_XTAL",
+ "VREFRO_POWER_BAD",
+ "SCPM_SOC_XTAL",
+ "SCPM_RTC_XTAL",
+ "FMON_32K",
+ "FMON_OSC",
+ "POD_RTC",
+ "POD_IO", /* 0x8 */
+ "POD_PLUS_IO_SPLL",
+ "POD_PLUS_SOC",
+ "VMON_PLUS_UV",
+ "VMON_PLUS_OV",
+ "FUSECRC_FAULT",
+ "OSC_FAULT",
+ "BPMP_BOOT_FAULT",
+ "SCPM_BPMP_CORE_CLK", /* 0x10 */
+ "SCPM_PSC_SE_CLK",
+ "VMON_SOC_MIN",
+ "VMON_SOC_MAX",
+ "VMON_MSS_MIN",
+ "VMON_MSS_MAX",
+ "POD_PLUS_IO_VMON",
+ "NVJTAG_SEL_MONITOR",
+ "NV_THERM_FAULT", /* 0x18 */
+ "FSI_THERM_FAULT",
+ "PSC_SW",
+ "SCPM_OESP_SE_CLK",
+ "SCPM_SB_SE_CLK",
+ "POD_CPU",
+ "POD_GPU",
+ "DCLS_GPU",
+ "POD_MSS", /* 0x20 */
+ "FMON_FSI",
+ "POD_FSI",
+ "VMON_FSI_MIN",
+ "VMON_FSI_MAX",
+ "VMON_CPU0_MIN",
+ "VMON_CPU0_MAX",
+ "BPMP_FMON",
+ "AO_WDT_POR", /* 0x28 */
+ "BPMP_WDT_POR",
+ "AO_TKE_WDT_POR",
+ "RCE0_WDT_POR",
+ "RCE1_WDT_POR",
+ "DCE_WDT_POR",
+ "FSI_R5_WDT_POR",
+ "FSI_R52_0_WDT_POR",
+ "FSI_R52_1_WDT_POR", /* 0x30 */
+ "FSI_R52_2_WDT_POR",
+ "FSI_R52_3_WDT_POR",
+ "TOP_0_WDT_POR",
+ "TOP_1_WDT_POR",
+ "TOP_2_WDT_POR",
+ "APE_C0_WDT_POR",
+ "APE_C1_WDT_POR",
+ "GPU_TKE_WDT_POR", /* 0x38 */
+ "PSC_WDT_POR",
+ "OESP_WDT_POR",
+ "SB_WDT_POR",
+ "SW_MAIN",
+ "L0L1_RST_OUT_N",
+ "FSI_HSM",
+ "CSITE_SW",
+ "AO_WDT_DBG", /* 0x40 */
+ "BPMP_WDT_DBG",
+ "AO_TKE_WDT_DBG",
+ "RCE0_WDT_DBG",
+ "RCE1_WDT_DBG",
+ "DCE_WDT_DBG",
+ "FSI_R5_WDT_DBG",
+ "FSI_R52_0_WDT_DBG",
+ "FSI_R52_1_WDT_DBG", /* 0x48 */
+ "FSI_R52_2_WDT_DBG",
+ "FSI_R52_3_WDT_DBG",
+ "TOP_0_WDT_DBG",
+ "TOP_1_WDT_DBG",
+ "TOP_2_WDT_DBG",
+ "APE_C0_WDT_DBG",
+ "APE_C1_WDT_DBG",
+ "PSC_WDT_DBG", /* 0x50 */
+ "OESP_WDT_DBG",
+ "SB_WDT_DBG",
+ "TSC_0_WDT_DBG",
+ "TSC_1_WDT_DBG",
+ "L2_RST_OUT_N",
+ "SC7"
+};
+
+static const struct tegra_wake_event tegra264_wake_events[] = {
+};
+
+static const struct tegra_pmc_soc tegra264_pmc_soc = {
+ .has_impl_33v_pwr = true,
+ .regs = &tegra264_pmc_regs,
+ .init = tegra186_pmc_init,
+ .setup_irq_polarity = tegra186_pmc_setup_irq_polarity,
+ .set_wake_filters = tegra186_pmc_set_wake_filters,
+ .irq_set_wake = tegra186_pmc_irq_set_wake,
+ .irq_set_type = tegra186_pmc_irq_set_type,
+ .reset_sources = tegra264_reset_sources,
+ .num_reset_sources = ARRAY_SIZE(tegra264_reset_sources),
+ .reset_levels = tegra186_reset_levels,
+ .num_reset_levels = ARRAY_SIZE(tegra186_reset_levels),
+ .wake_events = tegra264_wake_events,
+ .num_wake_events = ARRAY_SIZE(tegra264_wake_events),
+ .max_wake_events = 128,
+ .max_wake_vectors = 4,
};
static const struct of_device_id tegra_pmc_match[] = {
+ { .compatible = "nvidia,tegra264-pmc", .data = &tegra264_pmc_soc },
{ .compatible = "nvidia,tegra234-pmc", .data = &tegra234_pmc_soc },
{ .compatible = "nvidia,tegra194-pmc", .data = &tegra194_pmc_soc },
{ .compatible = "nvidia,tegra186-pmc", .data = &tegra186_pmc_soc },
@@ -4284,8 +4406,25 @@ static const struct of_device_id tegra_pmc_match[] = {
static void tegra_pmc_sync_state(struct device *dev)
{
+ struct device_node *np, *child;
int err;
+ np = of_get_child_by_name(dev->of_node, "powergates");
+ if (!np)
+ return;
+
+ for_each_child_of_node(np, child)
+ of_genpd_sync_state(child);
+
+ of_node_put(np);
+
+ np = of_get_child_by_name(dev->of_node, "core-domain");
+ if (!np)
+ return;
+
+ of_genpd_sync_state(np);
+ of_node_put(np);
+
/*
* Newer device-trees have power domains, but we need to prepare all
* device drivers with runtime PM and OPP support first, otherwise
@@ -4299,9 +4438,6 @@ static void tegra_pmc_sync_state(struct device *dev)
* no dependencies that will block the state syncing. We shouldn't
* mark the domain as synced in this case.
*/
- if (!pmc->core_domain_registered)
- return;
-
pmc->core_domain_state_synced = true;
/* this is a no-op if core regulator isn't used */
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index 2cae17b65fd9..1a93001c9e36 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -50,18 +50,6 @@ config WKUP_M3_IPC
to communicate and use the Wakeup M3 for PM features like suspend
resume and boots it using wkup_m3_rproc driver.
-config TI_SCI_PM_DOMAINS
- tristate "TI SCI PM Domains Driver"
- depends on TI_SCI_PROTOCOL
- depends on PM_GENERIC_DOMAINS
- help
- Generic power domain implementation for TI device implementing
- the TI SCI protocol.
-
- To compile this as a module, choose M here. The module will be
- called ti_sci_pm_domains. Note this is needed early in boot before
- rootfs may be available.
-
config TI_K3_RINGACC
tristate "K3 Ring accelerator Sub System"
depends on ARCH_K3 || COMPILE_TEST
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
index 148f54d9691d..7602b8a909b0 100644
--- a/drivers/soc/ti/k3-ringacc.c
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -161,7 +161,7 @@ struct k3_ring {
struct k3_ringacc_proxy_target_regs __iomem *proxy;
dma_addr_t ring_mem_dma;
void *ring_mem_virt;
- struct k3_ring_ops *ops;
+ const struct k3_ring_ops *ops;
u32 size;
enum k3_ring_size elm_size;
enum k3_ring_mode mode;
@@ -268,17 +268,17 @@ static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem);
static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem);
-static struct k3_ring_ops k3_ring_mode_ring_ops = {
+static const struct k3_ring_ops k3_ring_mode_ring_ops = {
.push_tail = k3_ringacc_ring_push_mem,
.pop_head = k3_ringacc_ring_pop_mem,
};
-static struct k3_ring_ops k3_dmaring_fwd_ops = {
+static const struct k3_ring_ops k3_dmaring_fwd_ops = {
.push_tail = k3_ringacc_ring_push_mem,
.pop_head = k3_dmaring_fwd_pop,
};
-static struct k3_ring_ops k3_dmaring_reverse_ops = {
+static const struct k3_ring_ops k3_dmaring_reverse_ops = {
/* Reverse side of the DMA ring can only be popped by SW */
.pop_head = k3_dmaring_reverse_pop,
};
@@ -288,7 +288,7 @@ static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem);
-static struct k3_ring_ops k3_ring_mode_msg_ops = {
+static const struct k3_ring_ops k3_ring_mode_msg_ops = {
.push_tail = k3_ringacc_ring_push_io,
.push_head = k3_ringacc_ring_push_head_io,
.pop_tail = k3_ringacc_ring_pop_tail_io,
@@ -300,7 +300,7 @@ static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem);
-static struct k3_ring_ops k3_ring_mode_proxy_ops = {
+static const struct k3_ring_ops k3_ring_mode_proxy_ops = {
.push_tail = k3_ringacc_ring_push_tail_proxy,
.push_head = k3_ringacc_ring_push_head_proxy,
.pop_tail = k3_ringacc_ring_pop_tail_proxy,
@@ -1291,7 +1291,7 @@ struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
mutex_lock(&k3_ringacc_list_lock);
list_for_each_entry(entry, &k3_ringacc_list, list)
- if (entry->dev->of_node == ringacc_np) {
+ if (device_match_of_node(entry->dev, ringacc_np)) {
ringacc = entry;
break;
}
@@ -1551,14 +1551,13 @@ static int k3_ringacc_probe(struct platform_device *pdev)
return 0;
}
-static int k3_ringacc_remove(struct platform_device *pdev)
+static void k3_ringacc_remove(struct platform_device *pdev)
{
struct k3_ringacc *ringacc = dev_get_drvdata(&pdev->dev);
mutex_lock(&k3_ringacc_list_lock);
list_del(&ringacc->list);
mutex_unlock(&k3_ringacc_list_lock);
- return 0;
}
static struct platform_driver k3_ringacc_driver = {
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index 6ea9b8c7d335..50c170a995f9 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -20,7 +20,7 @@
* 31-28 VARIANT Device variant
* 27-12 PARTNO Part number
* 11-1 MFG Indicates TI as manufacturer (0x17)
- * 1 Always 1
+ * 0 Always 1
*/
#define CTRLMMR_WKUP_JTAGID_VARIANT_SHIFT (28)
#define CTRLMMR_WKUP_JTAGID_VARIANT_MASK GENMASK(31, 28)
@@ -33,19 +33,41 @@
#define CTRLMMR_WKUP_JTAGID_MFG_TI 0x17
+#define JTAG_ID_PARTNO_AM65X 0xBB5A
+#define JTAG_ID_PARTNO_J721E 0xBB64
+#define JTAG_ID_PARTNO_J7200 0xBB6D
+#define JTAG_ID_PARTNO_AM64X 0xBB38
+#define JTAG_ID_PARTNO_J721S2 0xBB75
+#define JTAG_ID_PARTNO_AM62X 0xBB7E
+#define JTAG_ID_PARTNO_J784S4 0xBB80
+#define JTAG_ID_PARTNO_AM62AX 0xBB8D
+#define JTAG_ID_PARTNO_AM62PX 0xBB9D
+#define JTAG_ID_PARTNO_J722S 0xBBA0
+#define JTAG_ID_PARTNO_AM62LX 0xBBA7
+
static const struct k3_soc_id {
unsigned int id;
const char *family_name;
} k3_soc_ids[] = {
- { 0xBB5A, "AM65X" },
- { 0xBB64, "J721E" },
- { 0xBB6D, "J7200" },
- { 0xBB38, "AM64X" },
- { 0xBB75, "J721S2"},
- { 0xBB7E, "AM62X" },
- { 0xBB80, "J784S4" },
- { 0xBB8D, "AM62AX" },
- { 0xBB9D, "AM62PX" },
+ { JTAG_ID_PARTNO_AM65X, "AM65X" },
+ { JTAG_ID_PARTNO_J721E, "J721E" },
+ { JTAG_ID_PARTNO_J7200, "J7200" },
+ { JTAG_ID_PARTNO_AM64X, "AM64X" },
+ { JTAG_ID_PARTNO_J721S2, "J721S2"},
+ { JTAG_ID_PARTNO_AM62X, "AM62X" },
+ { JTAG_ID_PARTNO_J784S4, "J784S4" },
+ { JTAG_ID_PARTNO_AM62AX, "AM62AX" },
+ { JTAG_ID_PARTNO_AM62PX, "AM62PX" },
+ { JTAG_ID_PARTNO_J722S, "J722S" },
+ { JTAG_ID_PARTNO_AM62LX, "AM62LX" },
+};
+
+static const char * const j721e_rev_string_map[] = {
+ "1.0", "1.1", "2.0",
+};
+
+static const char * const am62lx_rev_string_map[] = {
+ "1.0", "1.1",
};
static int
@@ -60,9 +82,47 @@ k3_chipinfo_partno_to_names(unsigned int partno,
return 0;
}
- return -EINVAL;
+ return -ENODEV;
}
+static int
+k3_chipinfo_variant_to_sr(unsigned int partno, unsigned int variant,
+ struct soc_device_attribute *soc_dev_attr)
+{
+ switch (partno) {
+ case JTAG_ID_PARTNO_J721E:
+ if (variant >= ARRAY_SIZE(j721e_rev_string_map))
+ goto err_unknown_variant;
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%s",
+ j721e_rev_string_map[variant]);
+ break;
+ case JTAG_ID_PARTNO_AM62LX:
+ if (variant >= ARRAY_SIZE(am62lx_rev_string_map))
+ goto err_unknown_variant;
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%s",
+ am62lx_rev_string_map[variant]);
+ break;
+ default:
+ variant++;
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%x.0",
+ variant);
+ }
+
+ if (!soc_dev_attr->revision)
+ return -ENOMEM;
+
+ return 0;
+
+err_unknown_variant:
+ return -ENODEV;
+}
+
+static const struct regmap_config k3_chipinfo_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int k3_chipinfo_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
@@ -70,13 +130,18 @@ static int k3_chipinfo_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct soc_device *soc_dev;
struct regmap *regmap;
+ void __iomem *base;
u32 partno_id;
u32 variant;
u32 jtag_id;
u32 mfg;
int ret;
- regmap = device_node_to_regmap(node);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = regmap_init_mmio(dev, base, &k3_chipinfo_regmap_cfg);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
@@ -94,7 +159,6 @@ static int k3_chipinfo_probe(struct platform_device *pdev)
variant = (jtag_id & CTRLMMR_WKUP_JTAGID_VARIANT_MASK) >>
CTRLMMR_WKUP_JTAGID_VARIANT_SHIFT;
- variant++;
partno_id = (jtag_id & CTRLMMR_WKUP_JTAGID_PARTNO_MASK) >>
CTRLMMR_WKUP_JTAGID_PARTNO_SHIFT;
@@ -103,17 +167,16 @@ static int k3_chipinfo_probe(struct platform_device *pdev)
if (!soc_dev_attr)
return -ENOMEM;
- soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%x.0", variant);
- if (!soc_dev_attr->revision) {
- ret = -ENOMEM;
+ ret = k3_chipinfo_partno_to_names(partno_id, soc_dev_attr);
+ if (ret) {
+ dev_err(dev, "Unknown SoC JTAGID[0x%08X]: %d\n", jtag_id, ret);
goto err;
}
- ret = k3_chipinfo_partno_to_names(partno_id, soc_dev_attr);
+ ret = k3_chipinfo_variant_to_sr(partno_id, variant, soc_dev_attr);
if (ret) {
- dev_err(dev, "Unknown SoC JTAGID[0x%08X]\n", jtag_id);
- ret = -ENODEV;
- goto err_free_rev;
+ dev_err(dev, "Unknown SoC SR[0x%08X]: %d\n", jtag_id, ret);
+ goto err;
}
node = of_find_node_by_path("/");
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 0fbc37cd5123..553ae7ee20f1 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -402,7 +402,7 @@ static int of_channel_match_helper(struct device_node *np, const char *name,
* @name: slave channel name
* @config: dma configuration parameters
*
- * Returns pointer to appropriate DMA channel on success or error.
+ * Return: Pointer to appropriate DMA channel on success or NULL on error.
*/
void *knav_dma_open_channel(struct device *dev, const char *name,
struct knav_dma_cfg *config)
@@ -414,13 +414,13 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
if (!kdev) {
pr_err("keystone-navigator-dma driver not registered\n");
- return (void *)-EINVAL;
+ return NULL;
}
chan_num = of_channel_match_helper(dev->of_node, name, &instance);
if (chan_num < 0) {
dev_err(kdev->dev, "No DMA instance with name %s\n", name);
- return (void *)-EINVAL;
+ return NULL;
}
dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n",
@@ -431,7 +431,7 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
if (config->direction != DMA_MEM_TO_DEV &&
config->direction != DMA_DEV_TO_MEM) {
dev_err(kdev->dev, "bad direction\n");
- return (void *)-EINVAL;
+ return NULL;
}
/* Look for correct dma instance */
@@ -443,7 +443,7 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
}
if (!dma) {
dev_err(kdev->dev, "No DMA instance with name %s\n", instance);
- return (void *)-EINVAL;
+ return NULL;
}
/* Look for correct dma channel from dma instance */
@@ -463,14 +463,14 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
if (!chan) {
dev_err(kdev->dev, "channel %d is not in DMA %s\n",
chan_num, instance);
- return (void *)-EINVAL;
+ return NULL;
}
if (atomic_read(&chan->ref_count) >= 1) {
if (!check_config(chan, config)) {
dev_err(kdev->dev, "channel %d config miss-match\n",
chan_num);
- return (void *)-EINVAL;
+ return NULL;
}
}
@@ -602,7 +602,7 @@ static int dma_init(struct device_node *cloud, struct device_node *dma_node)
unsigned max_tx_chan, max_rx_chan, max_rx_flow, max_tx_sched;
struct device_node *node = dma_node;
struct knav_dma_device *dma;
- int ret, len, num_chan = 0;
+ int ret, num_chan = 0;
resource_size_t size;
u32 timeout;
u32 i;
@@ -615,25 +615,13 @@ static int dma_init(struct device_node *cloud, struct device_node *dma_node)
INIT_LIST_HEAD(&dma->list);
INIT_LIST_HEAD(&dma->chan_list);
- if (!of_find_property(cloud, "ti,navigator-cloud-address", &len)) {
- dev_err(kdev->dev, "unspecified navigator cloud addresses\n");
- return -ENODEV;
- }
-
- dma->logical_queue_managers = len / sizeof(u32);
- if (dma->logical_queue_managers > DMA_MAX_QMS) {
- dev_warn(kdev->dev, "too many queue mgrs(>%d) rest ignored\n",
- dma->logical_queue_managers);
- dma->logical_queue_managers = DMA_MAX_QMS;
- }
-
- ret = of_property_read_u32_array(cloud, "ti,navigator-cloud-address",
- dma->qm_base_address,
- dma->logical_queue_managers);
- if (ret) {
+ ret = of_property_read_variable_u32_array(cloud, "ti,navigator-cloud-address",
+ dma->qm_base_address, 1, DMA_MAX_QMS);
+ if (ret < 0) {
dev_err(kdev->dev, "invalid navigator cloud addresses\n");
return -ENODEV;
}
+ dma->logical_queue_managers = ret;
dma->reg_global = pktdma_get_regs(dma, node, 0, &size);
if (IS_ERR(dma->reg_global))
@@ -773,7 +761,7 @@ err_pm_disable:
return ret;
}
-static int knav_dma_remove(struct platform_device *pdev)
+static void knav_dma_remove(struct platform_device *pdev)
{
struct knav_dma_device *dma;
@@ -784,8 +772,6 @@ static int knav_dma_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static struct of_device_id of_match[] = {
@@ -798,7 +784,7 @@ MODULE_DEVICE_TABLE(of, of_match);
static struct platform_driver knav_dma_driver = {
.probe = knav_dma_probe,
.remove = knav_dma_remove,
- .driver = {
+ .driver = {
.name = "keystone-navigator-dma",
.of_match_table = of_match,
},
diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h
index a01eda720bf6..9325e8ce2e25 100644
--- a/drivers/soc/ti/knav_qmss.h
+++ b/drivers/soc/ti/knav_qmss.h
@@ -333,7 +333,7 @@ struct knav_range_info {
void *queue_base_inst;
unsigned flags;
struct list_head list;
- struct knav_range_ops *ops;
+ const struct knav_range_ops *ops;
struct knav_acc_info acc_info;
struct knav_acc_channel *acc;
unsigned num_irqs;
diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c
index 3d388646ed43..269b4e75ae40 100644
--- a/drivers/soc/ti/knav_qmss_acc.c
+++ b/drivers/soc/ti/knav_qmss_acc.c
@@ -450,7 +450,7 @@ static int knav_acc_free_range(struct knav_range_info *range)
return 0;
}
-static struct knav_range_ops knav_acc_range_ops = {
+static const struct knav_range_ops knav_acc_range_ops = {
.set_notify = knav_acc_set_notify,
.init_queue = knav_acc_init_queue,
.open_queue = knav_acc_open_queue,
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 0f252c2549ba..6e56e7609ccd 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -14,10 +14,12 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/soc/ti/knav_qmss.h>
@@ -117,11 +119,10 @@ static int knav_queue_setup_irq(struct knav_range_info *range,
if (range->flags & RANGE_HAS_IRQ) {
irq = range->irqs[queue].irq;
- ret = request_irq(irq, knav_queue_int_handler, 0,
- inst->irq_name, inst);
+ ret = request_irq(irq, knav_queue_int_handler, IRQF_NO_AUTOEN,
+ inst->irq_name, inst);
if (ret)
return ret;
- disable_irq(irq);
if (range->irqs[queue].cpu_mask) {
ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
if (ret) {
@@ -251,8 +252,7 @@ static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
return qh;
err:
- if (qh->stats)
- free_percpu(qh->stats);
+ free_percpu(qh->stats);
devm_kfree(inst->kdev->dev, qh);
return ERR_PTR(ret);
}
@@ -409,7 +409,7 @@ static int knav_gp_close_queue(struct knav_range_info *range,
return 0;
}
-static struct knav_range_ops knav_gp_range_ops = {
+static const struct knav_range_ops knav_gp_range_ops = {
.set_notify = knav_gp_set_notify,
.open_queue = knav_gp_open_queue,
.close_queue = knav_gp_close_queue,
@@ -721,7 +721,6 @@ static void kdesc_empty_pool(struct knav_pool *pool)
if (!desc) {
dev_dbg(pool->kdev->dev,
"couldn't unmap desc, continuing\n");
- continue;
}
}
WARN_ON(i != pool->num_desc);
@@ -1074,14 +1073,20 @@ static const char *knav_queue_find_name(struct device_node *node)
}
static int knav_queue_setup_regions(struct knav_device *kdev,
- struct device_node *regions)
+ struct device_node *node)
{
struct device *dev = kdev->dev;
+ struct device_node *regions __free(device_node) =
+ of_get_child_by_name(node, "descriptor-regions");
struct knav_region *region;
struct device_node *child;
u32 temp[2];
int ret;
+ if (!regions)
+ return dev_err_probe(dev, -ENODEV,
+ "descriptor-regions not specified\n");
+
for_each_child_of_node(regions, child) {
region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
if (!region) {
@@ -1102,11 +1107,6 @@ static int knav_queue_setup_regions(struct knav_device *kdev,
continue;
}
- if (!of_get_property(child, "link-index", NULL)) {
- dev_err(dev, "No link info for %s\n", region->name);
- devm_kfree(dev, region);
- continue;
- }
ret = of_property_read_u32(child, "link-index",
&region->link_index);
if (ret) {
@@ -1119,10 +1119,9 @@ static int knav_queue_setup_regions(struct knav_device *kdev,
INIT_LIST_HEAD(&region->pools);
list_add_tail(&region->list, &kdev->regions);
}
- if (list_empty(&kdev->regions)) {
- dev_err(dev, "no valid region information found\n");
- return -ENODEV;
- }
+ if (list_empty(&kdev->regions))
+ return dev_err_probe(dev, -ENODEV,
+ "no valid region information found\n");
/* Next, we run through the regions and set things up */
for_each_region(kdev, region)
@@ -1304,10 +1303,16 @@ static int knav_setup_queue_range(struct knav_device *kdev,
}
static int knav_setup_queue_pools(struct knav_device *kdev,
- struct device_node *queue_pools)
+ struct device_node *node)
{
+ struct device_node *queue_pools __free(device_node) =
+ of_get_child_by_name(node, "queue-pools");
struct device_node *type, *range;
+ if (!queue_pools)
+ return dev_err_probe(kdev->dev, -ENODEV,
+ "queue-pools not specified\n");
+
for_each_child_of_node(queue_pools, type) {
for_each_child_of_node(type, range) {
/* return value ignored, we init the rest... */
@@ -1316,10 +1321,9 @@ static int knav_setup_queue_pools(struct knav_device *kdev,
}
/* ... and barf if they all failed! */
- if (list_empty(&kdev->queue_ranges)) {
- dev_err(kdev->dev, "no valid queue range found\n");
- return -ENODEV;
- }
+ if (list_empty(&kdev->queue_ranges))
+ return dev_err_probe(kdev->dev, -ENODEV,
+ "no valid queue range found\n");
return 0;
}
@@ -1387,14 +1391,20 @@ static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
}
static int knav_queue_init_qmgrs(struct knav_device *kdev,
- struct device_node *qmgrs)
+ struct device_node *node)
{
struct device *dev = kdev->dev;
+ struct device_node *qmgrs __free(device_node) =
+ of_get_child_by_name(node, "qmgrs");
struct knav_qmgr_info *qmgr;
struct device_node *child;
u32 temp[2];
int ret;
+ if (!qmgrs)
+ return dev_err_probe(dev, -ENODEV,
+ "queue manager info not specified\n");
+
for_each_child_of_node(qmgrs, child) {
qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
if (!qmgr) {
@@ -1666,6 +1676,26 @@ static int knav_queue_start_pdsps(struct knav_device *kdev)
return 0;
}
+static int knav_queue_setup_pdsps(struct knav_device *kdev,
+ struct device_node *node)
+{
+ struct device_node *pdsps __free(device_node) =
+ of_get_child_by_name(node, "pdsps");
+
+ if (pdsps) {
+ int ret;
+
+ ret = knav_queue_init_pdsps(kdev, pdsps);
+ if (ret)
+ return ret;
+
+ ret = knav_queue_start_pdsps(kdev);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
{
struct knav_qmgr_info *qmgr;
@@ -1753,8 +1783,6 @@ MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
static int knav_queue_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
- struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
u32 temp[2];
int ret;
@@ -1770,8 +1798,7 @@ static int knav_queue_probe(struct platform_device *pdev)
return -ENOMEM;
}
- match = of_match_device(of_match_ptr(keystone_qmss_of_match), dev);
- if (match && match->data)
+ if (device_get_match_data(dev))
kdev->version = QMSS_66AK2G;
platform_set_drvdata(pdev, kdev);
@@ -1799,39 +1826,17 @@ static int knav_queue_probe(struct platform_device *pdev)
kdev->num_queues = temp[1];
/* Initialize queue managers using device tree configuration */
- qmgrs = of_get_child_by_name(node, "qmgrs");
- if (!qmgrs) {
- dev_err(dev, "queue manager info not specified\n");
- ret = -ENODEV;
- goto err;
- }
- ret = knav_queue_init_qmgrs(kdev, qmgrs);
- of_node_put(qmgrs);
+ ret = knav_queue_init_qmgrs(kdev, node);
if (ret)
goto err;
/* get pdsp configuration values from device tree */
- pdsps = of_get_child_by_name(node, "pdsps");
- if (pdsps) {
- ret = knav_queue_init_pdsps(kdev, pdsps);
- if (ret)
- goto err;
-
- ret = knav_queue_start_pdsps(kdev);
- if (ret)
- goto err;
- }
- of_node_put(pdsps);
+ ret = knav_queue_setup_pdsps(kdev, node);
+ if (ret)
+ goto err;
/* get usable queue range values from device tree */
- queue_pools = of_get_child_by_name(node, "queue-pools");
- if (!queue_pools) {
- dev_err(dev, "queue-pools not specified\n");
- ret = -ENODEV;
- goto err;
- }
- ret = knav_setup_queue_pools(kdev, queue_pools);
- of_node_put(queue_pools);
+ ret = knav_setup_queue_pools(kdev, node);
if (ret)
goto err;
@@ -1853,14 +1858,7 @@ static int knav_queue_probe(struct platform_device *pdev)
if (ret)
goto err;
- regions = of_get_child_by_name(node, "descriptor-regions");
- if (!regions) {
- dev_err(dev, "descriptor-regions not specified\n");
- ret = -ENODEV;
- goto err;
- }
- ret = knav_queue_setup_regions(kdev, regions);
- of_node_put(regions);
+ ret = knav_queue_setup_regions(kdev, node);
if (ret)
goto err;
@@ -1884,12 +1882,11 @@ err:
return ret;
}
-static int knav_queue_remove(struct platform_device *pdev)
+static void knav_queue_remove(struct platform_device *pdev)
{
/* TODO: Free resources */
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static struct platform_driver keystone_qmss_driver = {
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index f04c21157904..dc52a2197d24 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -145,7 +145,7 @@ static int am33xx_do_sram_idle(u32 wfi_flags)
return pm_ops->cpu_suspend(am33xx_do_wfi_sram, wfi_flags);
}
-static int __init am43xx_map_gic(void)
+static int am43xx_map_gic(void)
{
gic_dist_base = ioremap(AM43XX_GIC_DIST_BASE, SZ_4K);
@@ -383,54 +383,44 @@ static void am33xx_pm_free_sram(void)
*/
static int am33xx_pm_alloc_sram(void)
{
- struct device_node *np;
- int ret = 0;
+ struct device_node *np __free(device_node) =
+ of_find_compatible_node(NULL, NULL, "ti,omap3-mpu");
- np = of_find_compatible_node(NULL, NULL, "ti,omap3-mpu");
if (!np) {
np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
- if (!np) {
- dev_err(pm33xx_dev, "PM: %s: Unable to find device node for mpu\n",
- __func__);
- return -ENODEV;
- }
+ if (!np)
+ return dev_err_probe(pm33xx_dev, -ENODEV,
+ "PM: %s: Unable to find device node for mpu\n",
+ __func__);
}
sram_pool = of_gen_pool_get(np, "pm-sram", 0);
- if (!sram_pool) {
- dev_err(pm33xx_dev, "PM: %s: Unable to get sram pool for ocmcram\n",
- __func__);
- ret = -ENODEV;
- goto mpu_put_node;
- }
+ if (!sram_pool)
+ return dev_err_probe(pm33xx_dev, -ENODEV,
+ "PM: %s: Unable to get sram pool for ocmcram\n",
+ __func__);
sram_pool_data = of_gen_pool_get(np, "pm-sram", 1);
- if (!sram_pool_data) {
- dev_err(pm33xx_dev, "PM: %s: Unable to get sram data pool for ocmcram\n",
- __func__);
- ret = -ENODEV;
- goto mpu_put_node;
- }
+ if (!sram_pool_data)
+ return dev_err_probe(pm33xx_dev, -ENODEV,
+ "PM: %s: Unable to get sram data pool for ocmcram\n",
+ __func__);
ocmcram_location = gen_pool_alloc(sram_pool, *pm_sram->do_wfi_sz);
- if (!ocmcram_location) {
- dev_err(pm33xx_dev, "PM: %s: Unable to allocate memory from ocmcram\n",
- __func__);
- ret = -ENOMEM;
- goto mpu_put_node;
- }
+ if (!ocmcram_location)
+ return dev_err_probe(pm33xx_dev, -ENOMEM,
+ "PM: %s: Unable to allocate memory from ocmcram\n",
+ __func__);
ocmcram_location_data = gen_pool_alloc(sram_pool_data,
sizeof(struct emif_regs_amx3));
if (!ocmcram_location_data) {
- dev_err(pm33xx_dev, "PM: Unable to allocate memory from ocmcram\n");
gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz);
- ret = -ENOMEM;
+ return dev_err_probe(pm33xx_dev, -ENOMEM,
+ "PM: Unable to allocate memory from ocmcram\n");
}
-mpu_put_node:
- of_node_put(np);
- return ret;
+ return 0;
}
static int am33xx_pm_rtc_setup(void)
@@ -450,14 +440,14 @@ static int am33xx_pm_rtc_setup(void)
rtc_base_virt = of_iomap(np, 0);
if (!rtc_base_virt) {
- pr_warn("PM: could not iomap rtc");
+ pr_warn("PM: could not iomap rtc\n");
error = -ENODEV;
goto err_clk_put;
}
omap_rtc = rtc_class_open("rtc0");
if (!omap_rtc) {
- pr_warn("PM: rtc0 not available");
+ pr_warn("PM: rtc0 not available\n");
error = -EPROBE_DEFER;
goto err_iounmap;
}
@@ -583,7 +573,7 @@ err_wkup_m3_ipc_put:
return ret;
}
-static int am33xx_pm_remove(struct platform_device *pdev)
+static void am33xx_pm_remove(struct platform_device *pdev)
{
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -594,7 +584,6 @@ static int am33xx_pm_remove(struct platform_device *pdev)
am33xx_pm_free_sram();
iounmap(rtc_base_virt);
clk_put(rtc_fck);
- return 0;
}
static struct platform_driver am33xx_pm_driver = {
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
index f49f8492dde5..038576805bfa 100644
--- a/drivers/soc/ti/pruss.c
+++ b/drivers/soc/ti/pruss.c
@@ -380,39 +380,81 @@ put_clk_mux_np:
static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node)
{
- const struct pruss_private_data *data;
- struct device_node *clks_np;
struct device *dev = pruss->dev;
- int ret = 0;
-
- data = of_device_get_match_data(dev);
+ struct device_node *clks_np __free(device_node) =
+ of_get_child_by_name(cfg_node, "clocks");
+ const struct pruss_private_data *data = of_device_get_match_data(dev);
+ int ret;
- clks_np = of_get_child_by_name(cfg_node, "clocks");
- if (!clks_np) {
- dev_err(dev, "%pOF is missing its 'clocks' node\n", cfg_node);
- return -ENODEV;
- }
+ if (!clks_np)
+ return dev_err_probe(dev, -ENODEV,
+ "%pOF is missing its 'clocks' node\n",
+ cfg_node);
if (data && data->has_core_mux_clock) {
ret = pruss_clk_mux_setup(pruss, pruss->core_clk_mux,
"coreclk-mux", clks_np);
- if (ret) {
- dev_err(dev, "failed to setup coreclk-mux\n");
- goto put_clks_node;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to setup coreclk-mux\n");
}
ret = pruss_clk_mux_setup(pruss, pruss->iep_clk_mux, "iepclk-mux",
clks_np);
- if (ret) {
- dev_err(dev, "failed to setup iepclk-mux\n");
- goto put_clks_node;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to setup iepclk-mux\n");
-put_clks_node:
- of_node_put(clks_np);
+ return 0;
+}
- return ret;
+static int pruss_of_setup_memories(struct device *dev, struct pruss *pruss)
+{
+ struct device_node *np = dev_of_node(dev);
+ struct device_node *child __free(device_node) =
+ of_get_child_by_name(np, "memories");
+ const struct pruss_private_data *data = of_device_get_match_data(dev);
+ const char *mem_names[PRUSS_MEM_MAX] = { "dram0", "dram1", "shrdram2" };
+ int i;
+
+ if (!child)
+ return dev_err_probe(dev, -ENODEV,
+ "%pOF is missing its 'memories' node\n",
+ child);
+
+ for (i = 0; i < PRUSS_MEM_MAX; i++) {
+ struct resource res;
+ int index;
+
+ /*
+ * On AM437x one of two PRUSS units don't contain Shared RAM,
+ * skip it
+ */
+ if (data && data->has_no_sharedram && i == PRUSS_MEM_SHRD_RAM2)
+ continue;
+
+ index = of_property_match_string(child, "reg-names",
+ mem_names[i]);
+ if (index < 0)
+ return index;
+
+ if (of_address_to_resource(child, index, &res))
+ return -EINVAL;
+
+ pruss->mem_regions[i].va = devm_ioremap(dev, res.start,
+ resource_size(&res));
+ if (!pruss->mem_regions[i].va)
+ return dev_err_probe(dev, -ENOMEM,
+ "failed to parse and map memory resource %d %s\n",
+ i, mem_names[i]);
+ pruss->mem_regions[i].pa = res.start;
+ pruss->mem_regions[i].size = resource_size(&res);
+
+ dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %p\n",
+ mem_names[i], &pruss->mem_regions[i].pa,
+ pruss->mem_regions[i].size, pruss->mem_regions[i].va);
+ }
+
+ return 0;
}
static struct regmap_config regmap_conf = {
@@ -424,26 +466,21 @@ static struct regmap_config regmap_conf = {
static int pruss_cfg_of_init(struct device *dev, struct pruss *pruss)
{
struct device_node *np = dev_of_node(dev);
- struct device_node *child;
+ struct device_node *child __free(device_node) =
+ of_get_child_by_name(np, "cfg");
struct resource res;
int ret;
- child = of_get_child_by_name(np, "cfg");
- if (!child) {
- dev_err(dev, "%pOF is missing its 'cfg' node\n", child);
- return -ENODEV;
- }
+ if (!child)
+ return dev_err_probe(dev, -ENODEV,
+ "%pOF is missing its 'cfg' node\n", child);
- if (of_address_to_resource(child, 0, &res)) {
- ret = -ENOMEM;
- goto node_put;
- }
+ if (of_address_to_resource(child, 0, &res))
+ return -ENOMEM;
pruss->cfg_base = devm_ioremap(dev, res.start, resource_size(&res));
- if (!pruss->cfg_base) {
- ret = -ENOMEM;
- goto node_put;
- }
+ if (!pruss->cfg_base)
+ return -ENOMEM;
regmap_conf.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", child,
(u64)res.start);
@@ -452,34 +489,22 @@ static int pruss_cfg_of_init(struct device *dev, struct pruss *pruss)
pruss->cfg_regmap = devm_regmap_init_mmio(dev, pruss->cfg_base,
&regmap_conf);
kfree(regmap_conf.name);
- if (IS_ERR(pruss->cfg_regmap)) {
- dev_err(dev, "regmap_init_mmio failed for cfg, ret = %ld\n",
- PTR_ERR(pruss->cfg_regmap));
- ret = PTR_ERR(pruss->cfg_regmap);
- goto node_put;
- }
+ if (IS_ERR(pruss->cfg_regmap))
+ return dev_err_probe(dev, PTR_ERR(pruss->cfg_regmap),
+ "regmap_init_mmio failed for cfg\n");
ret = pruss_clk_init(pruss, child);
if (ret)
- dev_err(dev, "pruss_clk_init failed, ret = %d\n", ret);
+ return dev_err_probe(dev, ret, "pruss_clk_init failed\n");
-node_put:
- of_node_put(child);
- return ret;
+ return 0;
}
static int pruss_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev_of_node(dev);
- struct device_node *child;
struct pruss *pruss;
- struct resource res;
- int ret, i, index;
- const struct pruss_private_data *data;
- const char *mem_names[PRUSS_MEM_MAX] = { "dram0", "dram1", "shrdram2" };
-
- data = of_device_get_match_data(&pdev->dev);
+ int ret;
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
if (ret) {
@@ -494,48 +519,9 @@ static int pruss_probe(struct platform_device *pdev)
pruss->dev = dev;
mutex_init(&pruss->lock);
- child = of_get_child_by_name(np, "memories");
- if (!child) {
- dev_err(dev, "%pOF is missing its 'memories' node\n", child);
- return -ENODEV;
- }
-
- for (i = 0; i < PRUSS_MEM_MAX; i++) {
- /*
- * On AM437x one of two PRUSS units don't contain Shared RAM,
- * skip it
- */
- if (data && data->has_no_sharedram && i == PRUSS_MEM_SHRD_RAM2)
- continue;
-
- index = of_property_match_string(child, "reg-names",
- mem_names[i]);
- if (index < 0) {
- of_node_put(child);
- return index;
- }
-
- if (of_address_to_resource(child, index, &res)) {
- of_node_put(child);
- return -EINVAL;
- }
-
- pruss->mem_regions[i].va = devm_ioremap(dev, res.start,
- resource_size(&res));
- if (!pruss->mem_regions[i].va) {
- dev_err(dev, "failed to parse and map memory resource %d %s\n",
- i, mem_names[i]);
- of_node_put(child);
- return -ENOMEM;
- }
- pruss->mem_regions[i].pa = res.start;
- pruss->mem_regions[i].size = resource_size(&res);
-
- dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n",
- mem_names[i], &pruss->mem_regions[i].pa,
- pruss->mem_regions[i].size, pruss->mem_regions[i].va);
- }
- of_node_put(child);
+ ret = pruss_of_setup_memories(dev, pruss);
+ if (ret < 0)
+ return ret;
platform_set_drvdata(pdev, pruss);
@@ -565,7 +551,7 @@ rpm_disable:
return ret;
}
-static int pruss_remove(struct platform_device *pdev)
+static void pruss_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -573,8 +559,6 @@ static int pruss_remove(struct platform_device *pdev)
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
-
- return 0;
}
/* instance-specific driver private data */
@@ -609,7 +593,7 @@ static struct platform_driver pruss_driver = {
.name = "pruss",
.of_match_table = pruss_of_match,
},
- .probe = pruss_probe,
+ .probe = pruss_probe,
.remove = pruss_remove,
};
module_platform_driver(pruss_driver);
diff --git a/drivers/soc/ti/smartreflex.c b/drivers/soc/ti/smartreflex.c
index 62b2f1464e46..ced3a73929e3 100644
--- a/drivers/soc/ti/smartreflex.c
+++ b/drivers/soc/ti/smartreflex.c
@@ -202,10 +202,10 @@ static int sr_late_init(struct omap_sr *sr_info)
if (sr_class->notify && sr_class->notify_flags && sr_info->irq) {
ret = devm_request_irq(&sr_info->pdev->dev, sr_info->irq,
- sr_interrupt, 0, sr_info->name, sr_info);
+ sr_interrupt, IRQF_NO_AUTOEN,
+ sr_info->name, sr_info);
if (ret)
goto error;
- disable_irq(sr_info->irq);
}
return ret;
@@ -933,7 +933,7 @@ err_list_del:
return ret;
}
-static int omap_sr_remove(struct platform_device *pdev)
+static void omap_sr_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct omap_sr *sr_info = platform_get_drvdata(pdev);
@@ -945,7 +945,6 @@ static int omap_sr_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
clk_unprepare(sr_info->fck);
list_del(&sr_info->node);
- return 0;
}
static void omap_sr_shutdown(struct platform_device *pdev)
diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c
index c36364522157..193266f5e3f9 100644
--- a/drivers/soc/ti/ti_sci_inta_msi.c
+++ b/drivers/soc/ti/ti_sci_inta_msi.c
@@ -103,19 +103,15 @@ int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
if (ret)
return ret;
- msi_lock_descs(dev);
+ guard(msi_descs_lock)(dev);
nvec = ti_sci_inta_msi_alloc_descs(dev, res);
- if (nvec <= 0) {
- ret = nvec;
- goto unlock;
- }
+ if (nvec <= 0)
+ return nvec;
/* Use alloc ALL as it's unclear whether there are gaps in the indices */
ret = msi_domain_alloc_irqs_all_locked(dev, MSI_DEFAULT_DOMAIN, nvec);
if (ret)
dev_err(dev, "Failed to allocate IRQs %d\n", ret);
-unlock:
- msi_unlock_descs(dev);
return ret;
}
EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_alloc_irqs);
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 3aff106fc11a..5845fc652adc 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -16,7 +16,6 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/omap-mailbox.h>
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include <linux/suspend.h>
@@ -314,7 +313,6 @@ static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
{
struct device *dev = m3_ipc->dev;
- mbox_msg_t dummy_msg = 0;
int ret;
if (!m3_ipc->mbox) {
@@ -330,7 +328,7 @@ static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
* the RX callback to avoid multiple interrupts being received
* by the CM3.
*/
- ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
+ ret = mbox_send_message(m3_ipc->mbox, NULL);
if (ret < 0) {
dev_err(dev, "%s: mbox_send_message() failed: %d\n",
__func__, ret);
@@ -352,7 +350,6 @@ static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
{
struct device *dev = m3_ipc->dev;
- mbox_msg_t dummy_msg = 0;
int ret;
if (!m3_ipc->mbox) {
@@ -361,7 +358,7 @@ static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
return -EIO;
}
- ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
+ ret = mbox_send_message(m3_ipc->mbox, NULL);
if (ret < 0) {
dev_err(dev, "%s: mbox_send_message() failed: %d\n",
__func__, ret);
@@ -647,11 +644,9 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
m3_ipc->mbox = mbox_request_channel(&m3_ipc->mbox_client, 0);
- if (IS_ERR(m3_ipc->mbox)) {
- dev_err(dev, "IPC Request for A8->M3 Channel failed! %ld\n",
- PTR_ERR(m3_ipc->mbox));
- return PTR_ERR(m3_ipc->mbox);
- }
+ if (IS_ERR(m3_ipc->mbox))
+ return dev_err_probe(dev, PTR_ERR(m3_ipc->mbox),
+ "IPC Request for A8->M3 Channel failed!\n");
if (of_property_read_u32(dev->of_node, "ti,rproc", &rproc_phandle)) {
dev_err(&pdev->dev, "could not get rproc phandle\n");
@@ -713,7 +708,7 @@ err_free_mbox:
return ret;
}
-static int wkup_m3_ipc_remove(struct platform_device *pdev)
+static void wkup_m3_ipc_remove(struct platform_device *pdev)
{
wkup_m3_ipc_dbg_destroy(m3_ipc_state);
@@ -723,8 +718,6 @@ static int wkup_m3_ipc_remove(struct platform_device *pdev)
rproc_put(m3_ipc_state->rproc);
m3_ipc_state = NULL;
-
- return 0;
}
static int __maybe_unused wkup_m3_ipc_suspend(struct device *dev)
diff --git a/drivers/soc/versatile/Kconfig b/drivers/soc/versatile/Kconfig
index c3792c0a84ac..7bbf54a8d879 100644
--- a/drivers/soc/versatile/Kconfig
+++ b/drivers/soc/versatile/Kconfig
@@ -4,7 +4,7 @@
#
config SOC_INTEGRATOR_CM
bool "SoC bus device for the ARM Integrator platform core modules"
- depends on ARCH_INTEGRATOR
+ depends on ARCH_INTEGRATOR || COMPILE_TEST
select SOC_BUS
help
Include support for the SoC bus on the ARM Integrator platform
@@ -13,7 +13,7 @@ config SOC_INTEGRATOR_CM
config SOC_REALVIEW
bool "SoC bus device for the ARM RealView platforms"
- depends on ARCH_REALVIEW
+ depends on ARCH_REALVIEW || COMPILE_TEST
select SOC_BUS
help
Include support for the SoC bus on the ARM RealView platforms
diff --git a/drivers/soc/versatile/soc-integrator.c b/drivers/soc/versatile/soc-integrator.c
index bab4ad87aa75..d5099a3386b4 100644
--- a/drivers/soc/versatile/soc-integrator.c
+++ b/drivers/soc/versatile/soc-integrator.c
@@ -113,6 +113,7 @@ static int __init integrator_soc_init(void)
return -ENODEV;
syscon_regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(syscon_regmap))
return PTR_ERR(syscon_regmap);
diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c
index c6876d232d8f..cf91abe07d38 100644
--- a/drivers/soc/versatile/soc-realview.c
+++ b/drivers/soc/versatile/soc-realview.c
@@ -4,6 +4,7 @@
*
* Author: Linus Walleij <linus.walleij@linaro.org>
*/
+#include <linux/device.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -81,6 +82,13 @@ static struct attribute *realview_attrs[] = {
ATTRIBUTE_GROUPS(realview);
+static void realview_soc_socdev_release(void *data)
+{
+ struct soc_device *soc_dev = data;
+
+ soc_device_unregister(soc_dev);
+}
+
static int realview_soc_probe(struct platform_device *pdev)
{
struct regmap *syscon_regmap;
@@ -93,7 +101,7 @@ static int realview_soc_probe(struct platform_device *pdev)
if (IS_ERR(syscon_regmap))
return PTR_ERR(syscon_regmap);
- soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
@@ -106,10 +114,14 @@ static int realview_soc_probe(struct platform_device *pdev)
soc_dev_attr->family = "Versatile";
soc_dev_attr->custom_attr_group = realview_groups[0];
soc_dev = soc_device_register(soc_dev_attr);
- if (IS_ERR(soc_dev)) {
- kfree(soc_dev_attr);
+ if (IS_ERR(soc_dev))
return -ENODEV;
- }
+
+ ret = devm_add_action_or_reset(&pdev->dev, realview_soc_socdev_release,
+ soc_dev);
+ if (ret)
+ return ret;
+
ret = regmap_read(syscon_regmap, REALVIEW_SYS_ID_OFFSET,
&realview_coreid);
if (ret)
diff --git a/drivers/soc/vt8500/Kconfig b/drivers/soc/vt8500/Kconfig
new file mode 100644
index 000000000000..b4cc0ba1128b
--- /dev/null
+++ b/drivers/soc/vt8500/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+if ARCH_VT8500 || COMPILE_TEST
+
+menu "VIA/WonderMedia SoC drivers"
+
+config WMT_SOCINFO
+ bool "VIA/WonderMedia SoC Information driver"
+ default ARCH_VT8500
+ select SOC_BUS
+ help
+ Say yes to support decoding of VIA/WonderMedia system configuration
+ register information. This currently includes just the chip ID register
+ which helps identify the exact hardware revision of the SoC the kernel
+ is running on (to know if any revision-specific quirks are required)
+
+endmenu
+
+endif
diff --git a/drivers/soc/vt8500/Makefile b/drivers/soc/vt8500/Makefile
new file mode 100644
index 000000000000..05964c5f2890
--- /dev/null
+++ b/drivers/soc/vt8500/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_WMT_SOCINFO) += wmt-socinfo.o
diff --git a/drivers/soc/vt8500/wmt-socinfo.c b/drivers/soc/vt8500/wmt-socinfo.c
new file mode 100644
index 000000000000..461f8c1ae56e
--- /dev/null
+++ b/drivers/soc/vt8500/wmt-socinfo.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2025 Alexey Charkov <alchark@gmail.com>
+ * Based on aspeed-socinfo.c
+ */
+
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
+
+static const struct {
+ const char *name;
+ const u32 id;
+} chip_id_table[] = {
+ /* VIA */
+ { "VT8420", 0x3300 },
+ { "VT8430", 0x3357 },
+ { "VT8500", 0x3400 },
+
+ /* WonderMedia */
+ { "WM8425", 0x3429 },
+ { "WM8435", 0x3437 },
+ { "WM8440", 0x3451 },
+ { "WM8505", 0x3426 },
+ { "WM8650", 0x3465 },
+ { "WM8750", 0x3445 },
+ { "WM8850", 0x3481 },
+ { "WM8880", 0x3498 },
+};
+
+static const char *sccid_to_name(u32 sccid)
+{
+ u32 id = sccid >> 16;
+ unsigned int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(chip_id_table) ; ++i) {
+ if (chip_id_table[i].id == id)
+ return chip_id_table[i].name;
+ }
+
+ return "Unknown";
+}
+
+static int wmt_socinfo_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct soc_device_attribute *attrs;
+ struct soc_device *soc_dev;
+ char letter, digit;
+ void __iomem *reg;
+ u32 sccid;
+
+ reg = devm_of_iomap(&pdev->dev, np, 0, NULL);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ sccid = readl(reg);
+
+ attrs = devm_kzalloc(&pdev->dev, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ /*
+ * Machine: VIA APC Rock
+ * Family: WM8850
+ * Revision: A2
+ * SoC ID: raw silicon revision id (34810103 in hexadecimal)
+ */
+
+ attrs->family = sccid_to_name(sccid);
+
+ letter = (sccid >> 8) & 0xf;
+ letter = (letter - 1) + 'A';
+ digit = sccid & 0xff;
+ digit = (digit - 1) + '0';
+ attrs->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "%c%c", letter, digit);
+
+ attrs->soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%08x", sccid);
+
+ if (!attrs->revision || !attrs->soc_id)
+ return -ENOMEM;
+
+ soc_dev = soc_device_register(attrs);
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
+
+ dev_info(&pdev->dev,
+ "VIA/WonderMedia %s rev %s (%s)\n",
+ attrs->family,
+ attrs->revision,
+ attrs->soc_id);
+
+ platform_set_drvdata(pdev, soc_dev);
+ return 0;
+}
+
+static void wmt_socinfo_remove(struct platform_device *pdev)
+{
+ struct soc_device *soc_dev = platform_get_drvdata(pdev);
+
+ soc_device_unregister(soc_dev);
+}
+
+static const struct of_device_id wmt_socinfo_ids[] = {
+ { .compatible = "via,vt8500-scc-id" },
+ { /* Sentinel */ },
+};
+
+static struct platform_driver wmt_socinfo = {
+ .probe = wmt_socinfo_probe,
+ .remove = wmt_socinfo_remove,
+ .driver = {
+ .name = "wmt-socinfo",
+ .of_match_table = wmt_socinfo_ids,
+ },
+};
+module_platform_driver(wmt_socinfo);
+
+MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
+MODULE_DESCRIPTION("VIA/WonderMedia socinfo driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
index 8a755a5c8836..49d69d6e18fe 100644
--- a/drivers/soc/xilinx/Kconfig
+++ b/drivers/soc/xilinx/Kconfig
@@ -16,15 +16,6 @@ config ZYNQMP_POWER
If in doubt, say N.
-config ZYNQMP_PM_DOMAINS
- bool "Enable Zynq MPSoC generic PM domains"
- default y
- depends on PM && ZYNQMP_FIRMWARE
- select PM_GENERIC_DOMAINS
- help
- Say yes to enable device power management through PM domains
- If in doubt, say N.
-
config XLNX_EVENT_MANAGER
bool "Enable Xilinx Event Management Driver"
depends on ZYNQMP_FIRMWARE
diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
index 86a048a10a13..6fdf4d14b7e7 100644
--- a/drivers/soc/xilinx/xlnx_event_manager.c
+++ b/drivers/soc/xilinx/xlnx_event_manager.c
@@ -3,6 +3,7 @@
* Xilinx Event Management Driver
*
* Copyright (C) 2021 Xilinx, Inc.
+ * Copyright (C) 2024 Advanced Micro Devices, Inc.
*
* Abhyuday Godhasara <abhyuday.godhasara@xilinx.com>
*/
@@ -19,7 +20,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-static DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number1);
+static DEFINE_PER_CPU_READ_MOSTLY(int, dummy_cpu_number);
static int virq_sgi;
static int event_manager_availability = -EACCES;
@@ -35,7 +36,6 @@ static int event_manager_availability = -EACCES;
#define MAX_BITS (32U) /* Number of bits available for error mask */
-#define FIRMWARE_VERSION_MASK (0xFFFFU)
#define REGISTER_NOTIFIER_FIRMWARE_VERSION (2U)
static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER);
@@ -77,11 +77,26 @@ struct registered_event_data {
static bool xlnx_is_error_event(const u32 node_id)
{
- if (node_id == EVENT_ERROR_PMC_ERR1 ||
- node_id == EVENT_ERROR_PMC_ERR2 ||
- node_id == EVENT_ERROR_PSM_ERR1 ||
- node_id == EVENT_ERROR_PSM_ERR2)
- return true;
+ u32 pm_family_code;
+
+ zynqmp_pm_get_family_info(&pm_family_code);
+
+ if (pm_family_code == PM_VERSAL_FAMILY_CODE) {
+ if (node_id == VERSAL_EVENT_ERROR_PMC_ERR1 ||
+ node_id == VERSAL_EVENT_ERROR_PMC_ERR2 ||
+ node_id == VERSAL_EVENT_ERROR_PSM_ERR1 ||
+ node_id == VERSAL_EVENT_ERROR_PSM_ERR2)
+ return true;
+ } else if (pm_family_code == PM_VERSAL_NET_FAMILY_CODE) {
+ if (node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR1 ||
+ node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR2 ||
+ node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR3 ||
+ node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR1 ||
+ node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR2 ||
+ node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR3 ||
+ node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR4)
+ return true;
+ }
return false;
}
@@ -173,8 +188,10 @@ static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data)
INIT_LIST_HEAD(&eve_data->cb_list_head);
cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
- if (!cb_data)
+ if (!cb_data) {
+ kfree(eve_data);
return -ENOMEM;
+ }
cb_data->eve_cb = cb_fun;
cb_data->agent_data = data;
@@ -477,13 +494,13 @@ static void xlnx_call_notify_cb_handler(const u32 *payload)
}
}
if (!is_callback_found)
- pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
+ pr_warn("Unhandled SGI node 0x%x event 0x%x. Expected with Xen hypervisor\n",
payload[1], payload[2]);
}
static void xlnx_get_event_callback_data(u32 *buf)
{
- zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
+ zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, buf, 0);
}
static irqreturn_t xlnx_event_handler(int irq, void *dev_id)
@@ -555,7 +572,6 @@ static void xlnx_disable_percpu_irq(void *data)
static int xlnx_event_init_sgi(struct platform_device *pdev)
{
int ret = 0;
- int cpu = smp_processor_id();
/*
* IRQ related structures are used for the following:
* for each SGI interrupt ensure its mapped by GIC IRQ domain
@@ -592,9 +608,9 @@ static int xlnx_event_init_sgi(struct platform_device *pdev)
sgi_fwspec.param[0] = sgi_num;
virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec);
- per_cpu(cpu_number1, cpu) = cpu;
ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt",
- &cpu_number1);
+ &dummy_cpu_number);
+
WARN_ON(ret);
if (ret) {
irq_dispose_mapping(virq_sgi);
@@ -609,16 +625,12 @@ static int xlnx_event_init_sgi(struct platform_device *pdev)
static void xlnx_event_cleanup_sgi(struct platform_device *pdev)
{
- int cpu = smp_processor_id();
-
- per_cpu(cpu_number1, cpu) = cpu;
-
cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
on_each_cpu(xlnx_disable_percpu_irq, NULL, 1);
irq_clear_status_flags(virq_sgi, IRQ_PER_CPU);
- free_percpu_irq(virq_sgi, &cpu_number1);
+ free_percpu_irq(virq_sgi, &dummy_cpu_number);
irq_dispose_mapping(virq_sgi);
}
@@ -653,7 +665,11 @@ static int xlnx_event_manager_probe(struct platform_device *pdev)
ret = zynqmp_pm_register_sgi(sgi_num, 0);
if (ret) {
- dev_err(&pdev->dev, "SGI %d Registration over TF-A failed with %d\n", sgi_num, ret);
+ if (ret == -EOPNOTSUPP)
+ dev_err(&pdev->dev, "SGI registration not supported by TF-A or Xen\n");
+ else
+ dev_err(&pdev->dev, "SGI %d registration failed, err %d\n", sgi_num, ret);
+
xlnx_event_cleanup_sgi(pdev);
return ret;
}
@@ -695,7 +711,7 @@ static void xlnx_event_manager_remove(struct platform_device *pdev)
static struct platform_driver xlnx_event_manager_driver = {
.probe = xlnx_event_manager_probe,
- .remove_new = xlnx_event_manager_remove,
+ .remove = xlnx_event_manager_remove,
.driver = {
.name = "xlnx_event_manager",
},
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index c2c819701eec..9b7b2858b22a 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -30,9 +30,27 @@ struct zynqmp_pm_work_struct {
u32 args[CB_ARG_CNT];
};
-static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work;
+/**
+ * struct zynqmp_pm_event_info - event related information
+ * @cb_fun: Function pointer to store the callback function.
+ * @cb_type: Type of callback from pm_api_cb_id,
+ * PM_NOTIFY_CB - for Error Events,
+ * PM_INIT_SUSPEND_CB - for suspend callback.
+ * @node_id: Node-Id related to event.
+ * @event: Event Mask for the Error Event.
+ * @wake: Flag specifying whether the subsystem should be woken upon
+ * event notification.
+ */
+struct zynqmp_pm_event_info {
+ event_cb_func_t cb_fun;
+ enum pm_api_cb_id cb_type;
+ u32 node_id;
+ u32 event;
+ bool wake;
+};
+
+static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work, *zynqmp_pm_init_restart_work;
static struct mbox_chan *rx_chan;
-static bool event_registered;
enum pm_suspend_mode {
PM_SUSPEND_MODE_FIRST = 0,
@@ -51,7 +69,20 @@ static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD;
static void zynqmp_pm_get_callback_data(u32 *buf)
{
- zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
+ zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, buf, 0);
+}
+
+static void subsystem_restart_event_callback(const u32 *payload, void *data)
+{
+ /* First element is callback API ID, others are callback arguments */
+ if (work_pending(&zynqmp_pm_init_restart_work->callback_work))
+ return;
+
+ /* Copy callback arguments into work's structure */
+ memcpy(zynqmp_pm_init_restart_work->args, &payload[0],
+ sizeof(zynqmp_pm_init_restart_work->args));
+
+ queue_work(system_unbound_wq, &zynqmp_pm_init_restart_work->callback_work);
}
static void suspend_event_callback(const u32 *payload, void *data)
@@ -83,9 +114,11 @@ static irqreturn_t zynqmp_pm_isr(int irq, void *data)
pm_suspend(PM_SUSPEND_MEM);
break;
default:
- pr_err("%s Unsupported InitSuspendCb reason "
- "code %d\n", __func__, payload[1]);
+ pr_err("%s Unsupported InitSuspendCb reason code %d\n",
+ __func__, payload[1]);
}
+ } else {
+ pr_err("%s() Unsupported Callback %d\n", __func__, payload[0]);
}
return IRQ_HANDLED;
@@ -118,6 +151,37 @@ static void ipi_receive_callback(struct mbox_client *cl, void *data)
}
/**
+ * zynqmp_pm_subsystem_restart_work_fn - Initiate Subsystem restart
+ * @work: Pointer to work_struct
+ *
+ * Bottom-half of PM callback IRQ handler.
+ */
+static void zynqmp_pm_subsystem_restart_work_fn(struct work_struct *work)
+{
+ int ret;
+ struct zynqmp_pm_work_struct *pm_work = container_of(work, struct zynqmp_pm_work_struct,
+ callback_work);
+
+ /* First element is callback API ID, others are callback arguments */
+ if (pm_work->args[0] == PM_NOTIFY_CB) {
+ if (pm_work->args[2] == EVENT_SUBSYSTEM_RESTART) {
+ ret = zynqmp_pm_system_shutdown(ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM);
+ if (ret) {
+ pr_err("unable to set shutdown scope\n");
+ return;
+ }
+
+ kernel_restart(NULL);
+ } else {
+ pr_err("%s Unsupported Event - %d\n", __func__, pm_work->args[2]);
+ }
+ } else {
+ pr_err("%s() Unsupported Callback %d\n", __func__, pm_work->args[0]);
+ }
+}
+
+/**
* zynqmp_pm_init_suspend_work_fn - Initialize suspend
* @work: Pointer to work_struct
*
@@ -182,13 +246,51 @@ static ssize_t suspend_mode_store(struct device *dev,
static DEVICE_ATTR_RW(suspend_mode);
+static void unregister_event(struct device *dev, void *res)
+{
+ struct zynqmp_pm_event_info *event_info = res;
+
+ xlnx_unregister_event(event_info->cb_type, event_info->node_id,
+ event_info->event, event_info->cb_fun, NULL);
+}
+
+static int register_event(struct device *dev, const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, const bool wake, event_cb_func_t cb_fun)
+{
+ int ret;
+ struct zynqmp_pm_event_info *event_info;
+
+ event_info = devres_alloc(unregister_event, sizeof(struct zynqmp_pm_event_info),
+ GFP_KERNEL);
+ if (!event_info)
+ return -ENOMEM;
+
+ event_info->cb_type = cb_type;
+ event_info->node_id = node_id;
+ event_info->event = event;
+ event_info->wake = wake;
+ event_info->cb_fun = cb_fun;
+
+ ret = xlnx_register_event(event_info->cb_type, event_info->node_id,
+ event_info->event, event_info->wake, event_info->cb_fun, NULL);
+ if (ret) {
+ devres_free(event_info);
+ return ret;
+ }
+
+ devres_add(dev, event_info);
+ return 0;
+}
+
static int zynqmp_pm_probe(struct platform_device *pdev)
{
int ret, irq;
- u32 pm_api_version;
+ u32 pm_api_version, pm_family_code, node_id;
struct mbox_client *client;
- zynqmp_pm_get_api_version(&pm_api_version);
+ ret = zynqmp_pm_get_api_version(&pm_api_version);
+ if (ret)
+ return ret;
/* Check PM API version number */
if (pm_api_version < ZYNQMP_PM_VERSION)
@@ -201,21 +303,45 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
* is not available to use) or -ENODEV(Xilinx Event Manager not compiled),
* then use ipi-mailbox or interrupt method.
*/
- ret = xlnx_register_event(PM_INIT_SUSPEND_CB, 0, 0, false,
- suspend_event_callback, NULL);
+ ret = register_event(&pdev->dev, PM_INIT_SUSPEND_CB, 0, 0, false,
+ suspend_event_callback);
if (!ret) {
zynqmp_pm_init_suspend_work = devm_kzalloc(&pdev->dev,
sizeof(struct zynqmp_pm_work_struct),
GFP_KERNEL);
- if (!zynqmp_pm_init_suspend_work) {
- xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0,
- suspend_event_callback, NULL);
+ if (!zynqmp_pm_init_suspend_work)
return -ENOMEM;
- }
- event_registered = true;
INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work,
zynqmp_pm_init_suspend_work_fn);
+
+ ret = zynqmp_pm_get_family_info(&pm_family_code);
+ if (ret < 0)
+ return ret;
+
+ if (pm_family_code == PM_VERSAL_NET_FAMILY_CODE)
+ node_id = PM_DEV_ACPU_0_0;
+ else if (pm_family_code == PM_VERSAL_FAMILY_CODE)
+ node_id = PM_DEV_ACPU_0;
+ else
+ return -ENODEV;
+
+ ret = register_event(&pdev->dev, PM_NOTIFY_CB, node_id, EVENT_SUBSYSTEM_RESTART,
+ false, subsystem_restart_event_callback);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to Register with Xilinx Event manager %d\n",
+ ret);
+ return ret;
+ }
+
+ zynqmp_pm_init_restart_work = devm_kzalloc(&pdev->dev,
+ sizeof(struct zynqmp_pm_work_struct),
+ GFP_KERNEL);
+ if (!zynqmp_pm_init_restart_work)
+ return -ENOMEM;
+
+ INIT_WORK(&zynqmp_pm_init_restart_work->callback_work,
+ zynqmp_pm_subsystem_restart_work_fn);
} else if (ret != -EACCES && ret != -ENODEV) {
dev_err(&pdev->dev, "Failed to Register with Xilinx Event manager %d\n", ret);
return ret;
@@ -252,8 +378,8 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
dev_name(&pdev->dev),
&pdev->dev);
if (ret) {
- dev_err(&pdev->dev, "devm_request_threaded_irq '%d' "
- "failed with %d\n", irq, ret);
+ dev_err(&pdev->dev, "devm_request_threaded_irq '%d' failed with %d\n",
+ irq, ret);
return ret;
}
} else {
@@ -262,29 +388,18 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
}
ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
- if (ret) {
- if (event_registered) {
- xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback,
- NULL);
- event_registered = false;
- }
- dev_err(&pdev->dev, "unable to create sysfs interface\n");
+ if (ret)
return ret;
- }
return 0;
}
-static int zynqmp_pm_remove(struct platform_device *pdev)
+static void zynqmp_pm_remove(struct platform_device *pdev)
{
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
- if (event_registered)
- xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback, NULL);
if (!rx_chan)
mbox_free_channel(rx_chan);
-
- return 0;
}
static const struct of_device_id pm_of_match[] = {