summaryrefslogtreecommitdiff
path: root/drivers/soc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-01 16:35:31 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-01 16:35:31 -0800
commitfe53d1443a146326b49d57fe6336b5c2a725223f (patch)
tree0bb6de8614bec52f025a0608910e80d6e9315245 /drivers/soc
parentadbc128fa8b4e9ecfdd11d5dd0a7d9845c6ea510 (diff)
parent796543a64ebffdb638a22f428c4dadd037e34866 (diff)
Merge tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC driver updates from Arnd Bergmann: "A number of new drivers get added this time, along with many low-priority bugfixes. The most interesting changes by subsystem are: bus drivers: - Updates to the Broadcom bus interface driver to support newer SoC types - The TI OMAP sysc driver now supports updated DT bindings memory controllers: - A new driver for Tegra186 gets added - A new driver for the ti-emif sram, to allow relocating suspend/resume handlers there SoC specific: - A new driver for Qualcomm QMI, the interface to the modem on MSM SoCs - A new driver for power domains on the actions S700 SoC - A driver for the Xilinx Zynq VCU logicoreIP reset controllers: - A new driver for Amlogic Meson-AGX - various bug fixes tee subsystem: - A new user interface got added to enable asynchronous communication with the TEE supplicant. - A new method of using user space memory for communication with the TEE is added" * tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (84 commits) of: platform: fix OF node refcount leak soc: fsl: guts: Add a NULL check for devm_kasprintf() bus: ti-sysc: Fix smartreflex sysc mask psci: add CPU_IDLE dependency soc: xilinx: Fix Kconfig alignment soc: xilinx: xlnx_vcu: Use bitwise & rather than logical && on clkoutdiv soc: xilinx: xlnx_vcu: Depends on HAS_IOMEM for xlnx_vcu soc: bcm: brcmstb: Be multi-platform compatible soc: brcmstb: biuctrl: exit without warning on non brcmstb platforms Revert "soc: brcmstb: Only register SoC device on STB platforms" bus: omap: add MODULE_LICENSE tags soc: brcmstb: Only register SoC device on STB platforms tee: shm: Potential NULL dereference calling tee_shm_register() soc: xilinx: xlnx_vcu: Add Xilinx ZYNQMP VCU logicoreIP init driver dt-bindings: soc: xilinx: Add DT bindings to xlnx_vcu driver soc: xilinx: Create folder structure for soc specific drivers of: platform: populate /firmware/ node from of_platform_default_populate_init() soc: samsung: Add SPDX license identifiers soc: qcom: smp2p: Use common error handling code in qcom_smp2p_probe() tee: shm: don't put_page on null shm->pages ...
Diffstat (limited to 'drivers/soc')
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/actions/owl-sps.c42
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c192
-rw-r--r--drivers/soc/bcm/brcmstb/common.c43
-rw-r--r--drivers/soc/fsl/guts.c8
-rw-r--r--drivers/soc/imx/gpc.c16
-rw-r--r--drivers/soc/qcom/Kconfig10
-rw-r--r--drivers/soc/qcom/Makefile2
-rw-r--r--drivers/soc/qcom/qmi_encdec.c816
-rw-r--r--drivers/soc/qcom/qmi_interface.c848
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c4
-rw-r--r--drivers/soc/qcom/smp2p.c55
-rw-r--r--drivers/soc/qcom/smsm.c6
-rw-r--r--drivers/soc/samsung/Kconfig1
-rw-r--r--drivers/soc/samsung/Makefile1
-rw-r--r--drivers/soc/samsung/exynos-pmu.c16
-rw-r--r--drivers/soc/samsung/exynos-pmu.h5
-rw-r--r--drivers/soc/samsung/exynos3250-pmu.c16
-rw-r--r--drivers/soc/samsung/exynos4-pmu.c16
-rw-r--r--drivers/soc/samsung/exynos5250-pmu.c16
-rw-r--r--drivers/soc/samsung/exynos5420-pmu.c16
-rw-r--r--drivers/soc/samsung/pm_domains.c24
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c4
-rw-r--r--drivers/soc/xilinx/Kconfig20
-rw-r--r--drivers/soc/xilinx/Makefile2
-rw-r--r--drivers/soc/xilinx/xlnx_vcu.c630
27 files changed, 2689 insertions, 122 deletions
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index fc9e98047421..c07b4a85253f 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -16,6 +16,7 @@ source "drivers/soc/tegra/Kconfig"
source "drivers/soc/ti/Kconfig"
source "drivers/soc/ux500/Kconfig"
source "drivers/soc/versatile/Kconfig"
+source "drivers/soc/xilinx/Kconfig"
source "drivers/soc/zte/Kconfig"
endmenu
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 342768df3530..40523577bdaa 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -23,4 +23,5 @@ obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_SOC_TI) += ti/
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_PLAT_VERSATILE) += versatile/
+obj-y += xilinx/
obj-$(CONFIG_ARCH_ZX) += zte/
diff --git a/drivers/soc/actions/owl-sps.c b/drivers/soc/actions/owl-sps.c
index 875225bfa21c..8477f0f18e24 100644
--- a/drivers/soc/actions/owl-sps.c
+++ b/drivers/soc/actions/owl-sps.c
@@ -17,6 +17,7 @@
#include <linux/pm_domain.h>
#include <linux/soc/actions/owl-sps.h>
#include <dt-bindings/power/owl-s500-powergate.h>
+#include <dt-bindings/power/owl-s700-powergate.h>
struct owl_sps_domain_info {
const char *name;
@@ -203,8 +204,49 @@ static const struct owl_sps_info s500_sps_info = {
.domains = s500_sps_domains,
};
+static const struct owl_sps_domain_info s700_sps_domains[] = {
+ [S700_PD_VDE] = {
+ .name = "VDE",
+ .pwr_bit = 0,
+ },
+ [S700_PD_VCE_SI] = {
+ .name = "VCE_SI",
+ .pwr_bit = 1,
+ },
+ [S700_PD_USB2_1] = {
+ .name = "USB2_1",
+ .pwr_bit = 2,
+ },
+ [S700_PD_HDE] = {
+ .name = "HDE",
+ .pwr_bit = 7,
+ },
+ [S700_PD_DMA] = {
+ .name = "DMA",
+ .pwr_bit = 8,
+ },
+ [S700_PD_DS] = {
+ .name = "DS",
+ .pwr_bit = 9,
+ },
+ [S700_PD_USB3] = {
+ .name = "USB3",
+ .pwr_bit = 10,
+ },
+ [S700_PD_USB2_0] = {
+ .name = "USB2_0",
+ .pwr_bit = 11,
+ },
+};
+
+static const struct owl_sps_info s700_sps_info = {
+ .num_domains = ARRAY_SIZE(s700_sps_domains),
+ .domains = s700_sps_domains,
+};
+
static const struct of_device_id owl_sps_of_matches[] = {
{ .compatible = "actions,s500-sps", .data = &s500_sps_info },
+ { .compatible = "actions,s700-sps", .data = &s700_sps_info },
{ }
};
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
index 3c39415d484f..6d89ebf13b8a 100644
--- a/drivers/soc/bcm/brcmstb/biuctrl.c
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -21,11 +21,70 @@
#include <linux/syscore_ops.h>
#include <linux/soc/brcmstb/brcmstb.h>
-#define CPU_CREDIT_REG_OFFSET 0x184
#define CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK 0x70000000
+#define CPU_CREDIT_REG_MCPx_READ_CRED_MASK 0xf
+#define CPU_CREDIT_REG_MCPx_WRITE_CRED_MASK 0xf
+#define CPU_CREDIT_REG_MCPx_READ_CRED_SHIFT(x) ((x) * 8)
+#define CPU_CREDIT_REG_MCPx_WRITE_CRED_SHIFT(x) (((x) * 8) + 4)
+
+#define CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_SHIFT(x) ((x) * 8)
+#define CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_MASK 0xff
+
+#define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_THRESHOLD_MASK 0xf
+#define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_MASK 0xf
+#define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_SHIFT 4
+#define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_ENABLE BIT(8)
static void __iomem *cpubiuctrl_base;
static bool mcp_wr_pairing_en;
+static const int *cpubiuctrl_regs;
+
+static inline u32 cbc_readl(int reg)
+{
+ int offset = cpubiuctrl_regs[reg];
+
+ if (offset == -1)
+ return (u32)-1;
+
+ return readl_relaxed(cpubiuctrl_base + offset);
+}
+
+static inline void cbc_writel(u32 val, int reg)
+{
+ int offset = cpubiuctrl_regs[reg];
+
+ if (offset == -1)
+ return;
+
+ writel_relaxed(val, cpubiuctrl_base + offset);
+}
+
+enum cpubiuctrl_regs {
+ CPU_CREDIT_REG = 0,
+ CPU_MCP_FLOW_REG,
+ CPU_WRITEBACK_CTRL_REG
+};
+
+static const int b15_cpubiuctrl_regs[] = {
+ [CPU_CREDIT_REG] = 0x184,
+ [CPU_MCP_FLOW_REG] = -1,
+ [CPU_WRITEBACK_CTRL_REG] = -1,
+};
+
+/* Odd cases, e.g: 7260 */
+static const int b53_cpubiuctrl_no_wb_regs[] = {
+ [CPU_CREDIT_REG] = 0x0b0,
+ [CPU_MCP_FLOW_REG] = 0x0b4,
+ [CPU_WRITEBACK_CTRL_REG] = -1,
+};
+
+static const int b53_cpubiuctrl_regs[] = {
+ [CPU_CREDIT_REG] = 0x0b0,
+ [CPU_MCP_FLOW_REG] = 0x0b4,
+ [CPU_WRITEBACK_CTRL_REG] = 0x22c,
+};
+
+#define NUM_CPU_BIUCTRL_REGS 3
static int __init mcp_write_pairing_set(void)
{
@@ -34,15 +93,15 @@ static int __init mcp_write_pairing_set(void)
if (!cpubiuctrl_base)
return -1;
- creds = readl_relaxed(cpubiuctrl_base + CPU_CREDIT_REG_OFFSET);
+ creds = cbc_readl(CPU_CREDIT_REG);
if (mcp_wr_pairing_en) {
pr_info("MCP: Enabling write pairing\n");
- writel_relaxed(creds | CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK,
- cpubiuctrl_base + CPU_CREDIT_REG_OFFSET);
+ cbc_writel(creds | CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK,
+ CPU_CREDIT_REG);
} else if (creds & CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK) {
pr_info("MCP: Disabling write pairing\n");
- writel_relaxed(creds & ~CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK,
- cpubiuctrl_base + CPU_CREDIT_REG_OFFSET);
+ cbc_writel(creds & ~CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK,
+ CPU_CREDIT_REG);
} else {
pr_info("MCP: Write pairing already disabled\n");
}
@@ -50,17 +109,64 @@ static int __init mcp_write_pairing_set(void)
return 0;
}
-static int __init setup_hifcpubiuctrl_regs(void)
+static const u32 b53_mach_compat[] = {
+ 0x7268,
+ 0x7271,
+ 0x7278,
+};
+
+static void __init mcp_b53_set(void)
{
- struct device_node *np;
- int ret = 0;
+ unsigned int i;
+ u32 reg;
- np = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl");
- if (!np) {
- pr_err("missing BIU control node\n");
- return -ENODEV;
+ reg = brcmstb_get_family_id();
+
+ for (i = 0; i < ARRAY_SIZE(b53_mach_compat); i++) {
+ if (BRCM_ID(reg) == b53_mach_compat[i])
+ break;
}
+ if (i == ARRAY_SIZE(b53_mach_compat))
+ return;
+
+ /* Set all 3 MCP interfaces to 8 credits */
+ reg = cbc_readl(CPU_CREDIT_REG);
+ for (i = 0; i < 3; i++) {
+ reg &= ~(CPU_CREDIT_REG_MCPx_WRITE_CRED_MASK <<
+ CPU_CREDIT_REG_MCPx_WRITE_CRED_SHIFT(i));
+ reg &= ~(CPU_CREDIT_REG_MCPx_READ_CRED_MASK <<
+ CPU_CREDIT_REG_MCPx_READ_CRED_SHIFT(i));
+ reg |= 8 << CPU_CREDIT_REG_MCPx_WRITE_CRED_SHIFT(i);
+ reg |= 8 << CPU_CREDIT_REG_MCPx_READ_CRED_SHIFT(i);
+ }
+ cbc_writel(reg, CPU_CREDIT_REG);
+
+ /* Max out the number of in-flight Jwords reads on the MCP interface */
+ reg = cbc_readl(CPU_MCP_FLOW_REG);
+ for (i = 0; i < 3; i++)
+ reg |= CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_MASK <<
+ CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_SHIFT(i);
+ cbc_writel(reg, CPU_MCP_FLOW_REG);
+
+ /* Enable writeback throttling, set timeout to 128 cycles, 256 cycles
+ * threshold
+ */
+ reg = cbc_readl(CPU_WRITEBACK_CTRL_REG);
+ reg |= CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_ENABLE;
+ reg &= ~CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_THRESHOLD_MASK;
+ reg &= ~(CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_MASK <<
+ CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_SHIFT);
+ reg |= 8;
+ reg |= 7 << CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_SHIFT;
+ cbc_writel(reg, CPU_WRITEBACK_CTRL_REG);
+}
+
+static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
+{
+ struct device_node *cpu_dn;
+ int ret = 0;
+
cpubiuctrl_base = of_iomap(np, 0);
if (!cpubiuctrl_base) {
pr_err("failed to remap BIU control base\n");
@@ -69,27 +175,56 @@ static int __init setup_hifcpubiuctrl_regs(void)
}
mcp_wr_pairing_en = of_property_read_bool(np, "brcm,write-pairing");
+
+ cpu_dn = of_get_cpu_node(0, NULL);
+ if (!cpu_dn) {
+ pr_err("failed to obtain CPU device node\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (of_device_is_compatible(cpu_dn, "brcm,brahma-b15"))
+ cpubiuctrl_regs = b15_cpubiuctrl_regs;
+ else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53"))
+ cpubiuctrl_regs = b53_cpubiuctrl_regs;
+ else {
+ pr_err("unsupported CPU\n");
+ ret = -EINVAL;
+ }
+ of_node_put(cpu_dn);
+
+ if (BRCM_ID(brcmstb_get_family_id()) == 0x7260)
+ cpubiuctrl_regs = b53_cpubiuctrl_no_wb_regs;
out:
of_node_put(np);
return ret;
}
#ifdef CONFIG_PM_SLEEP
-static u32 cpu_credit_reg_dump; /* for save/restore */
+static u32 cpubiuctrl_reg_save[NUM_CPU_BIUCTRL_REGS];
static int brcmstb_cpu_credit_reg_suspend(void)
{
- if (cpubiuctrl_base)
- cpu_credit_reg_dump =
- readl_relaxed(cpubiuctrl_base + CPU_CREDIT_REG_OFFSET);
+ unsigned int i;
+
+ if (!cpubiuctrl_base)
+ return 0;
+
+ for (i = 0; i < NUM_CPU_BIUCTRL_REGS; i++)
+ cpubiuctrl_reg_save[i] = cbc_readl(i);
+
return 0;
}
static void brcmstb_cpu_credit_reg_resume(void)
{
- if (cpubiuctrl_base)
- writel_relaxed(cpu_credit_reg_dump,
- cpubiuctrl_base + CPU_CREDIT_REG_OFFSET);
+ unsigned int i;
+
+ if (!cpubiuctrl_base)
+ return;
+
+ for (i = 0; i < NUM_CPU_BIUCTRL_REGS; i++)
+ cbc_writel(cpubiuctrl_reg_save[i], i);
}
static struct syscore_ops brcmstb_cpu_credit_syscore_ops = {
@@ -99,19 +234,30 @@ static struct syscore_ops brcmstb_cpu_credit_syscore_ops = {
#endif
-void __init brcmstb_biuctrl_init(void)
+static int __init brcmstb_biuctrl_init(void)
{
+ struct device_node *np;
int ret;
- setup_hifcpubiuctrl_regs();
+ /* We might be running on a multi-platform kernel, don't make this a
+ * fatal error, just bail out early
+ */
+ np = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl");
+ if (!np)
+ return 0;
+
+ setup_hifcpubiuctrl_regs(np);
ret = mcp_write_pairing_set();
if (ret) {
pr_err("MCP: Unable to disable write pairing!\n");
- return;
+ return ret;
}
+ mcp_b53_set();
#ifdef CONFIG_PM_SLEEP
register_syscore_ops(&brcmstb_cpu_credit_syscore_ops);
#endif
+ return 0;
}
+early_initcall(brcmstb_biuctrl_init);
diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c
index a71730da6385..14185451901d 100644
--- a/drivers/soc/bcm/brcmstb/common.c
+++ b/drivers/soc/bcm/brcmstb/common.c
@@ -66,24 +66,47 @@ static const struct of_device_id sun_top_ctrl_match[] = {
{ }
};
-static int __init brcmstb_soc_device_init(void)
+static int __init brcmstb_soc_device_early_init(void)
{
- struct soc_device_attribute *soc_dev_attr;
- struct soc_device *soc_dev;
struct device_node *sun_top_ctrl;
void __iomem *sun_top_ctrl_base;
int ret = 0;
+ /* We could be on a multi-platform kernel, don't make this fatal but
+ * bail out early
+ */
sun_top_ctrl = of_find_matching_node(NULL, sun_top_ctrl_match);
if (!sun_top_ctrl)
- return -ENODEV;
+ return ret;
sun_top_ctrl_base = of_iomap(sun_top_ctrl, 0);
- if (!sun_top_ctrl_base)
- return -ENODEV;
+ if (!sun_top_ctrl_base) {
+ ret = -ENODEV;
+ goto out;
+ }
family_id = readl(sun_top_ctrl_base);
product_id = readl(sun_top_ctrl_base + 0x4);
+ iounmap(sun_top_ctrl_base);
+out:
+ of_node_put(sun_top_ctrl);
+ return ret;
+}
+early_initcall(brcmstb_soc_device_early_init);
+
+static int __init brcmstb_soc_device_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct device_node *sun_top_ctrl;
+ struct soc_device *soc_dev;
+ int ret = 0;
+
+ /* We could be on a multi-platform kernel, don't make this fatal but
+ * bail out early
+ */
+ sun_top_ctrl = of_find_matching_node(NULL, sun_top_ctrl_match);
+ if (!sun_top_ctrl)
+ return ret;
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr) {
@@ -107,14 +130,10 @@ static int __init brcmstb_soc_device_init(void)
kfree(soc_dev_attr->soc_id);
kfree(soc_dev_attr->revision);
kfree(soc_dev_attr);
- ret = -ENODEV;
- goto out;
+ ret = -ENOMEM;
}
-
- return 0;
-
out:
- iounmap(sun_top_ctrl_base);
+ of_node_put(sun_top_ctrl);
return ret;
}
arch_initcall(brcmstb_soc_device_init);
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index d89a6a80c8ef..302e0c8d69d9 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -167,10 +167,16 @@ static int fsl_guts_probe(struct platform_device *pdev)
} else {
soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL, "QorIQ");
}
+ if (!soc_dev_attr.family)
+ return -ENOMEM;
soc_dev_attr.soc_id = devm_kasprintf(dev, GFP_KERNEL,
"svr:0x%08x", svr);
+ if (!soc_dev_attr.soc_id)
+ return -ENOMEM;
soc_dev_attr.revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d",
(svr >> 4) & 0xf, svr & 0xf);
+ if (!soc_dev_attr.revision)
+ return -ENOMEM;
soc_dev = soc_device_register(&soc_dev_attr);
if (IS_ERR(soc_dev))
@@ -214,6 +220,8 @@ static const struct of_device_id fsl_guts_of_match[] = {
{ .compatible = "fsl,ls1043a-dcfg", },
{ .compatible = "fsl,ls2080a-dcfg", },
{ .compatible = "fsl,ls1088a-dcfg", },
+ { .compatible = "fsl,ls1012a-dcfg", },
+ { .compatible = "fsl,ls1046a-dcfg", },
{}
};
MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
index 47e7aa963dbb..53f7275d6cbd 100644
--- a/drivers/soc/imx/gpc.c
+++ b/drivers/soc/imx/gpc.c
@@ -273,7 +273,15 @@ static struct imx_pm_domain imx_gpc_domains[] = {
},
.reg_offs = 0x240,
.cntr_pdn_bit = 4,
- }
+ }, {
+ .base = {
+ .name = "PCI",
+ .power_off = imx6_pm_domain_power_off,
+ .power_on = imx6_pm_domain_power_on,
+ },
+ .reg_offs = 0x200,
+ .cntr_pdn_bit = 6,
+ },
};
struct imx_gpc_dt_data {
@@ -296,10 +304,16 @@ static const struct imx_gpc_dt_data imx6sl_dt_data = {
.err009619_present = false,
};
+static const struct imx_gpc_dt_data imx6sx_dt_data = {
+ .num_domains = 4,
+ .err009619_present = false,
+};
+
static const struct of_device_id imx_gpc_dt_ids[] = {
{ .compatible = "fsl,imx6q-gpc", .data = &imx6q_dt_data },
{ .compatible = "fsl,imx6qp-gpc", .data = &imx6qp_dt_data },
{ .compatible = "fsl,imx6sl-gpc", .data = &imx6sl_dt_data },
+ { .compatible = "fsl,imx6sx-gpc", .data = &imx6sx_dt_data },
{ }
};
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index b81374bb6713..e050eb83341d 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -35,6 +35,15 @@ config QCOM_PM
modes. It interface with various system drivers to put the cores in
low power modes.
+config QCOM_QMI_HELPERS
+ tristate
+ depends on ARCH_QCOM
+ help
+ Helper library for handling QMI encoded messages. QMI encoded
+ messages are used in communication between the majority of QRTR
+ clients and this helpers provide the common functionality needed for
+ doing this from a kernel driver.
+
config QCOM_RMTFS_MEM
tristate "Qualcomm Remote Filesystem memory driver"
depends on ARCH_QCOM
@@ -75,6 +84,7 @@ config QCOM_SMEM_STATE
config QCOM_SMP2P
tristate "Qualcomm Shared Memory Point to Point support"
+ depends on MAILBOX
depends on QCOM_SMEM
select QCOM_SMEM_STATE
help
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 40c56f67e94a..dcebf2814e6d 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -3,6 +3,8 @@ obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
obj-$(CONFIG_QCOM_PM) += spm.o
+obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
+qmi_helpers-y += qmi_encdec.o qmi_interface.o
obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o
obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o
obj-$(CONFIG_QCOM_SMEM) += smem.o
diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
new file mode 100644
index 000000000000..3aaab71d1b2c
--- /dev/null
+++ b/drivers/soc/qcom/qmi_encdec.c
@@ -0,0 +1,816 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ */
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/soc/qcom/qmi.h>
+
+#define QMI_ENCDEC_ENCODE_TLV(type, length, p_dst) do { \
+ *p_dst++ = type; \
+ *p_dst++ = ((u8)((length) & 0xFF)); \
+ *p_dst++ = ((u8)(((length) >> 8) & 0xFF)); \
+} while (0)
+
+#define QMI_ENCDEC_DECODE_TLV(p_type, p_length, p_src) do { \
+ *p_type = (u8)*p_src++; \
+ *p_length = (u8)*p_src++; \
+ *p_length |= ((u8)*p_src) << 8; \
+} while (0)
+
+#define QMI_ENCDEC_ENCODE_N_BYTES(p_dst, p_src, size) \
+do { \
+ memcpy(p_dst, p_src, size); \
+ p_dst = (u8 *)p_dst + size; \
+ p_src = (u8 *)p_src + size; \
+} while (0)
+
+#define QMI_ENCDEC_DECODE_N_BYTES(p_dst, p_src, size) \
+do { \
+ memcpy(p_dst, p_src, size); \
+ p_dst = (u8 *)p_dst + size; \
+ p_src = (u8 *)p_src + size; \
+} while (0)
+
+#define UPDATE_ENCODE_VARIABLES(temp_si, buf_dst, \
+ encoded_bytes, tlv_len, encode_tlv, rc) \
+do { \
+ buf_dst = (u8 *)buf_dst + rc; \
+ encoded_bytes += rc; \
+ tlv_len += rc; \
+ temp_si = temp_si + 1; \
+ encode_tlv = 1; \
+} while (0)
+
+#define UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc) \
+do { \
+ buf_src = (u8 *)buf_src + rc; \
+ decoded_bytes += rc; \
+} while (0)
+
+#define TLV_LEN_SIZE sizeof(u16)
+#define TLV_TYPE_SIZE sizeof(u8)
+#define OPTIONAL_TLV_TYPE_START 0x10
+
+static int qmi_encode(struct qmi_elem_info *ei_array, void *out_buf,
+ const void *in_c_struct, u32 out_buf_len,
+ int enc_level);
+
+static int qmi_decode(struct qmi_elem_info *ei_array, void *out_c_struct,
+ const void *in_buf, u32 in_buf_len, int dec_level);
+
+/**
+ * skip_to_next_elem() - Skip to next element in the structure to be encoded
+ * @ei_array: Struct info describing the element to be skipped.
+ * @level: Depth level of encoding/decoding to identify nested structures.
+ *
+ * This function is used while encoding optional elements. If the flag
+ * corresponding to an optional element is not set, then encoding the
+ * optional element can be skipped. This function can be used to perform
+ * that operation.
+ *
+ * Return: struct info of the next element that can be encoded.
+ */
+static struct qmi_elem_info *skip_to_next_elem(struct qmi_elem_info *ei_array,
+ int level)
+{
+ struct qmi_elem_info *temp_ei = ei_array;
+ u8 tlv_type;
+
+ if (level > 1) {
+ temp_ei = temp_ei + 1;
+ } else {
+ do {
+ tlv_type = temp_ei->tlv_type;
+ temp_ei = temp_ei + 1;
+ } while (tlv_type == temp_ei->tlv_type);
+ }
+
+ return temp_ei;
+}
+
+/**
+ * qmi_calc_min_msg_len() - Calculate the minimum length of a QMI message
+ * @ei_array: Struct info array describing the structure.
+ * @level: Level to identify the depth of the nested structures.
+ *
+ * Return: Expected minimum length of the QMI message or 0 on error.
+ */
+static int qmi_calc_min_msg_len(struct qmi_elem_info *ei_array,
+ int level)
+{
+ int min_msg_len = 0;
+ struct qmi_elem_info *temp_ei = ei_array;
+
+ if (!ei_array)
+ return min_msg_len;
+
+ while (temp_ei->data_type != QMI_EOTI) {
+ /* Optional elements do not count in minimum length */
+ if (temp_ei->data_type == QMI_OPT_FLAG) {
+ temp_ei = skip_to_next_elem(temp_ei, level);
+ continue;
+ }
+
+ if (temp_ei->data_type == QMI_DATA_LEN) {
+ min_msg_len += (temp_ei->elem_size == sizeof(u8) ?
+ sizeof(u8) : sizeof(u16));
+ temp_ei++;
+ continue;
+ } else if (temp_ei->data_type == QMI_STRUCT) {
+ min_msg_len += qmi_calc_min_msg_len(temp_ei->ei_array,
+ (level + 1));
+ temp_ei++;
+ } else if (temp_ei->data_type == QMI_STRING) {
+ if (level > 1)
+ min_msg_len += temp_ei->elem_len <= U8_MAX ?
+ sizeof(u8) : sizeof(u16);
+ min_msg_len += temp_ei->elem_len * temp_ei->elem_size;
+ temp_ei++;
+ } else {
+ min_msg_len += (temp_ei->elem_len * temp_ei->elem_size);
+ temp_ei++;
+ }
+
+ /*
+ * Type & Length info. not prepended for elements in the
+ * nested structure.
+ */
+ if (level == 1)
+ min_msg_len += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+ }
+
+ return min_msg_len;
+}
+
+/**
+ * qmi_encode_basic_elem() - Encodes elements of basic/primary data type
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @elem_len: Number of elements, in the buf_src, to be encoded.
+ * @elem_size: Size of a single instance of the element to be encoded.
+ *
+ * This function encodes the "elem_len" number of data elements, each of
+ * size "elem_size" bytes from the source buffer "buf_src" and stores the
+ * encoded information in the destination buffer "buf_dst". The elements are
+ * of primary data type which include u8 - u64 or similar. This
+ * function returns the number of bytes of encoded information.
+ *
+ * Return: The number of bytes of encoded information.
+ */
+static int qmi_encode_basic_elem(void *buf_dst, const void *buf_src,
+ u32 elem_len, u32 elem_size)
+{
+ u32 i, rc = 0;
+
+ for (i = 0; i < elem_len; i++) {
+ QMI_ENCDEC_ENCODE_N_BYTES(buf_dst, buf_src, elem_size);
+ rc += elem_size;
+ }
+
+ return rc;
+}
+
+/**
+ * qmi_encode_struct_elem() - Encodes elements of struct data type
+ * @ei_array: Struct info array descibing the struct element.
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @elem_len: Number of elements, in the buf_src, to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Depth of the nested structure from the main structure.
+ *
+ * This function encodes the "elem_len" number of struct elements, each of
+ * size "ei_array->elem_size" bytes from the source buffer "buf_src" and
+ * stores the encoded information in the destination buffer "buf_dst". The
+ * elements are of struct data type which includes any C structure. This
+ * function returns the number of bytes of encoded information.
+ *
+ * Return: The number of bytes of encoded information on success or negative
+ * errno on error.
+ */
+static int qmi_encode_struct_elem(struct qmi_elem_info *ei_array,
+ void *buf_dst, const void *buf_src,
+ u32 elem_len, u32 out_buf_len,
+ int enc_level)
+{
+ int i, rc, encoded_bytes = 0;
+ struct qmi_elem_info *temp_ei = ei_array;
+
+ for (i = 0; i < elem_len; i++) {
+ rc = qmi_encode(temp_ei->ei_array, buf_dst, buf_src,
+ out_buf_len - encoded_bytes, enc_level);
+ if (rc < 0) {
+ pr_err("%s: STRUCT Encode failure\n", __func__);
+ return rc;
+ }
+ buf_dst = buf_dst + rc;
+ buf_src = buf_src + temp_ei->elem_size;
+ encoded_bytes += rc;
+ }
+
+ return encoded_bytes;
+}
+
+/**
+ * qmi_encode_string_elem() - Encodes elements of string data type
+ * @ei_array: Struct info array descibing the string element.
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Depth of the string element from the main structure.
+ *
+ * This function encodes a string element of maximum length "ei_array->elem_len"
+ * bytes from the source buffer "buf_src" and stores the encoded information in
+ * the destination buffer "buf_dst". This function returns the number of bytes
+ * of encoded information.
+ *
+ * Return: The number of bytes of encoded information on success or negative
+ * errno on error.
+ */
+static int qmi_encode_string_elem(struct qmi_elem_info *ei_array,
+ void *buf_dst, const void *buf_src,
+ u32 out_buf_len, int enc_level)
+{
+ int rc;
+ int encoded_bytes = 0;
+ struct qmi_elem_info *temp_ei = ei_array;
+ u32 string_len = 0;
+ u32 string_len_sz = 0;
+
+ string_len = strlen(buf_src);
+ string_len_sz = temp_ei->elem_len <= U8_MAX ?
+ sizeof(u8) : sizeof(u16);
+ if (string_len > temp_ei->elem_len) {
+ pr_err("%s: String to be encoded is longer - %d > %d\n",
+ __func__, string_len, temp_ei->elem_len);
+ return -EINVAL;
+ }
+
+ if (enc_level == 1) {
+ if (string_len + TLV_LEN_SIZE + TLV_TYPE_SIZE >
+ out_buf_len) {
+ pr_err("%s: Output len %d > Out Buf len %d\n",
+ __func__, string_len, out_buf_len);
+ return -ETOOSMALL;
+ }
+ } else {
+ if (string_len + string_len_sz > out_buf_len) {
+ pr_err("%s: Output len %d > Out Buf len %d\n",
+ __func__, string_len, out_buf_len);
+ return -ETOOSMALL;
+ }
+ rc = qmi_encode_basic_elem(buf_dst, &string_len,
+ 1, string_len_sz);
+ encoded_bytes += rc;
+ }
+
+ rc = qmi_encode_basic_elem(buf_dst + encoded_bytes, buf_src,
+ string_len, temp_ei->elem_size);
+ encoded_bytes += rc;
+
+ return encoded_bytes;
+}
+
+/**
+ * qmi_encode() - Core Encode Function
+ * @ei_array: Struct info array describing the structure to be encoded.
+ * @out_buf: Buffer to hold the encoded QMI message.
+ * @in_c_struct: Pointer to the C structure to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Encode level to indicate the depth of the nested structure,
+ * within the main structure, being encoded.
+ *
+ * Return: The number of bytes of encoded information on success or negative
+ * errno on error.
+ */
+static int qmi_encode(struct qmi_elem_info *ei_array, void *out_buf,
+ const void *in_c_struct, u32 out_buf_len,
+ int enc_level)
+{
+ struct qmi_elem_info *temp_ei = ei_array;
+ u8 opt_flag_value = 0;
+ u32 data_len_value = 0, data_len_sz;
+ u8 *buf_dst = (u8 *)out_buf;
+ u8 *tlv_pointer;
+ u32 tlv_len;
+ u8 tlv_type;
+ u32 encoded_bytes = 0;
+ const void *buf_src;
+ int encode_tlv = 0;
+ int rc;
+
+ if (!ei_array)
+ return 0;
+
+ tlv_pointer = buf_dst;
+ tlv_len = 0;
+ if (enc_level == 1)
+ buf_dst = buf_dst + (TLV_LEN_SIZE + TLV_TYPE_SIZE);
+
+ while (temp_ei->data_type != QMI_EOTI) {
+ buf_src = in_c_struct + temp_ei->offset;
+ tlv_type = temp_ei->tlv_type;
+
+ if (temp_ei->array_type == NO_ARRAY) {
+ data_len_value = 1;
+ } else if (temp_ei->array_type == STATIC_ARRAY) {
+ data_len_value = temp_ei->elem_len;
+ } else if (data_len_value <= 0 ||
+ temp_ei->elem_len < data_len_value) {
+ pr_err("%s: Invalid data length\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (temp_ei->data_type) {
+ case QMI_OPT_FLAG:
+ rc = qmi_encode_basic_elem(&opt_flag_value, buf_src,
+ 1, sizeof(u8));
+ if (opt_flag_value)
+ temp_ei = temp_ei + 1;
+ else
+ temp_ei = skip_to_next_elem(temp_ei, enc_level);
+ break;
+
+ case QMI_DATA_LEN:
+ memcpy(&data_len_value, buf_src, temp_ei->elem_size);
+ data_len_sz = temp_ei->elem_size == sizeof(u8) ?
+ sizeof(u8) : sizeof(u16);
+ /* Check to avoid out of range buffer access */
+ if ((data_len_sz + encoded_bytes + TLV_LEN_SIZE +
+ TLV_TYPE_SIZE) > out_buf_len) {
+ pr_err("%s: Too Small Buffer @DATA_LEN\n",
+ __func__);
+ return -ETOOSMALL;
+ }
+ rc = qmi_encode_basic_elem(buf_dst, &data_len_value,
+ 1, data_len_sz);
+ UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+ encoded_bytes, tlv_len,
+ encode_tlv, rc);
+ if (!data_len_value)
+ temp_ei = skip_to_next_elem(temp_ei, enc_level);
+ else
+ encode_tlv = 0;
+ break;
+
+ case QMI_UNSIGNED_1_BYTE:
+ case QMI_UNSIGNED_2_BYTE:
+ case QMI_UNSIGNED_4_BYTE:
+ case QMI_UNSIGNED_8_BYTE:
+ case QMI_SIGNED_2_BYTE_ENUM:
+ case QMI_SIGNED_4_BYTE_ENUM:
+ /* Check to avoid out of range buffer access */
+ if (((data_len_value * temp_ei->elem_size) +
+ encoded_bytes + TLV_LEN_SIZE + TLV_TYPE_SIZE) >
+ out_buf_len) {
+ pr_err("%s: Too Small Buffer @data_type:%d\n",
+ __func__, temp_ei->data_type);
+ return -ETOOSMALL;
+ }
+ rc = qmi_encode_basic_elem(buf_dst, buf_src,
+ data_len_value,
+ temp_ei->elem_size);
+ UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+ encoded_bytes, tlv_len,
+ encode_tlv, rc);
+ break;
+
+ case QMI_STRUCT:
+ rc = qmi_encode_struct_elem(temp_ei, buf_dst, buf_src,
+ data_len_value,
+ out_buf_len - encoded_bytes,
+ enc_level + 1);
+ if (rc < 0)
+ return rc;
+ UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+ encoded_bytes, tlv_len,
+ encode_tlv, rc);
+ break;
+
+ case QMI_STRING:
+ rc = qmi_encode_string_elem(temp_ei, buf_dst, buf_src,
+ out_buf_len - encoded_bytes,
+ enc_level);
+ if (rc < 0)
+ return rc;
+ UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+ encoded_bytes, tlv_len,
+ encode_tlv, rc);
+ break;
+ default:
+ pr_err("%s: Unrecognized data type\n", __func__);
+ return -EINVAL;
+ }
+
+ if (encode_tlv && enc_level == 1) {
+ QMI_ENCDEC_ENCODE_TLV(tlv_type, tlv_len, tlv_pointer);
+ encoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+ tlv_pointer = buf_dst;
+ tlv_len = 0;
+ buf_dst = buf_dst + TLV_LEN_SIZE + TLV_TYPE_SIZE;
+ encode_tlv = 0;
+ }
+ }
+
+ return encoded_bytes;
+}
+
+/**
+ * qmi_decode_basic_elem() - Decodes elements of basic/primary data type
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @elem_len: Number of elements to be decoded.
+ * @elem_size: Size of a single instance of the element to be decoded.
+ *
+ * This function decodes the "elem_len" number of elements in QMI wire format,
+ * each of size "elem_size" bytes from the source buffer "buf_src" and stores
+ * the decoded elements in the destination buffer "buf_dst". The elements are
+ * of primary data type which include u8 - u64 or similar. This
+ * function returns the number of bytes of decoded information.
+ *
+ * Return: The total size of the decoded data elements, in bytes.
+ */
+static int qmi_decode_basic_elem(void *buf_dst, const void *buf_src,
+ u32 elem_len, u32 elem_size)
+{
+ u32 i, rc = 0;
+
+ for (i = 0; i < elem_len; i++) {
+ QMI_ENCDEC_DECODE_N_BYTES(buf_dst, buf_src, elem_size);
+ rc += elem_size;
+ }
+
+ return rc;
+}
+
+/**
+ * qmi_decode_struct_elem() - Decodes elements of struct data type
+ * @ei_array: Struct info array descibing the struct element.
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @elem_len: Number of elements to be decoded.
+ * @tlv_len: Total size of the encoded inforation corresponding to
+ * this struct element.
+ * @dec_level: Depth of the nested structure from the main structure.
+ *
+ * This function decodes the "elem_len" number of elements in QMI wire format,
+ * each of size "(tlv_len/elem_len)" bytes from the source buffer "buf_src"
+ * and stores the decoded elements in the destination buffer "buf_dst". The
+ * elements are of struct data type which includes any C structure. This
+ * function returns the number of bytes of decoded information.
+ *
+ * Return: The total size of the decoded data elements on success, negative
+ * errno on error.
+ */
+static int qmi_decode_struct_elem(struct qmi_elem_info *ei_array,
+ void *buf_dst, const void *buf_src,
+ u32 elem_len, u32 tlv_len,
+ int dec_level)
+{
+ int i, rc, decoded_bytes = 0;
+ struct qmi_elem_info *temp_ei = ei_array;
+
+ for (i = 0; i < elem_len && decoded_bytes < tlv_len; i++) {
+ rc = qmi_decode(temp_ei->ei_array, buf_dst, buf_src,
+ tlv_len - decoded_bytes, dec_level);
+ if (rc < 0)
+ return rc;
+ buf_src = buf_src + rc;
+ buf_dst = buf_dst + temp_ei->elem_size;
+ decoded_bytes += rc;
+ }
+
+ if ((dec_level <= 2 && decoded_bytes != tlv_len) ||
+ (dec_level > 2 && (i < elem_len || decoded_bytes > tlv_len))) {
+ pr_err("%s: Fault in decoding: dl(%d), db(%d), tl(%d), i(%d), el(%d)\n",
+ __func__, dec_level, decoded_bytes, tlv_len,
+ i, elem_len);
+ return -EFAULT;
+ }
+
+ return decoded_bytes;
+}
+
+/**
+ * qmi_decode_string_elem() - Decodes elements of string data type
+ * @ei_array: Struct info array descibing the string element.
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @tlv_len: Total size of the encoded inforation corresponding to
+ * this string element.
+ * @dec_level: Depth of the string element from the main structure.
+ *
+ * This function decodes the string element of maximum length
+ * "ei_array->elem_len" from the source buffer "buf_src" and puts it into
+ * the destination buffer "buf_dst". This function returns number of bytes
+ * decoded from the input buffer.
+ *
+ * Return: The total size of the decoded data elements on success, negative
+ * errno on error.
+ */
+static int qmi_decode_string_elem(struct qmi_elem_info *ei_array,
+ void *buf_dst, const void *buf_src,
+ u32 tlv_len, int dec_level)
+{
+ int rc;
+ int decoded_bytes = 0;
+ u32 string_len = 0;
+ u32 string_len_sz = 0;
+ struct qmi_elem_info *temp_ei = ei_array;
+
+ if (dec_level == 1) {
+ string_len = tlv_len;
+ } else {
+ string_len_sz = temp_ei->elem_len <= U8_MAX ?
+ sizeof(u8) : sizeof(u16);
+ rc = qmi_decode_basic_elem(&string_len, buf_src,
+ 1, string_len_sz);
+ decoded_bytes += rc;
+ }
+
+ if (string_len > temp_ei->elem_len) {
+ pr_err("%s: String len %d > Max Len %d\n",
+ __func__, string_len, temp_ei->elem_len);
+ return -ETOOSMALL;
+ } else if (string_len > tlv_len) {
+ pr_err("%s: String len %d > Input Buffer Len %d\n",
+ __func__, string_len, tlv_len);
+ return -EFAULT;
+ }
+
+ rc = qmi_decode_basic_elem(buf_dst, buf_src + decoded_bytes,
+ string_len, temp_ei->elem_size);
+ *((char *)buf_dst + string_len) = '\0';
+ decoded_bytes += rc;
+
+ return decoded_bytes;
+}
+
+/**
+ * find_ei() - Find element info corresponding to TLV Type
+ * @ei_array: Struct info array of the message being decoded.
+ * @type: TLV Type of the element being searched.
+ *
+ * Every element that got encoded in the QMI message will have a type
+ * information associated with it. While decoding the QMI message,
+ * this function is used to find the struct info regarding the element
+ * that corresponds to the type being decoded.
+ *
+ * Return: Pointer to struct info, if found
+ */
+static struct qmi_elem_info *find_ei(struct qmi_elem_info *ei_array,
+ u32 type)
+{
+ struct qmi_elem_info *temp_ei = ei_array;
+
+ while (temp_ei->data_type != QMI_EOTI) {
+ if (temp_ei->tlv_type == (u8)type)
+ return temp_ei;
+ temp_ei = temp_ei + 1;
+ }
+
+ return NULL;
+}
+
+/**
+ * qmi_decode() - Core Decode Function
+ * @ei_array: Struct info array describing the structure to be decoded.
+ * @out_c_struct: Buffer to hold the decoded C struct
+ * @in_buf: Buffer containing the QMI message to be decoded
+ * @in_buf_len: Length of the QMI message to be decoded
+ * @dec_level: Decode level to indicate the depth of the nested structure,
+ * within the main structure, being decoded
+ *
+ * Return: The number of bytes of decoded information on success, negative
+ * errno on error.
+ */
+static int qmi_decode(struct qmi_elem_info *ei_array, void *out_c_struct,
+ const void *in_buf, u32 in_buf_len,
+ int dec_level)
+{
+ struct qmi_elem_info *temp_ei = ei_array;
+ u8 opt_flag_value = 1;
+ u32 data_len_value = 0, data_len_sz = 0;
+ u8 *buf_dst = out_c_struct;
+ const u8 *tlv_pointer;
+ u32 tlv_len = 0;
+ u32 tlv_type;
+ u32 decoded_bytes = 0;
+ const void *buf_src = in_buf;
+ int rc;
+
+ while (decoded_bytes < in_buf_len) {
+ if (dec_level >= 2 && temp_ei->data_type == QMI_EOTI)
+ return decoded_bytes;
+
+ if (dec_level == 1) {
+ tlv_pointer = buf_src;
+ QMI_ENCDEC_DECODE_TLV(&tlv_type,
+ &tlv_len, tlv_pointer);
+ buf_src += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+ decoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+ temp_ei = find_ei(ei_array, tlv_type);
+ if (!temp_ei && tlv_type < OPTIONAL_TLV_TYPE_START) {
+ pr_err("%s: Inval element info\n", __func__);
+ return -EINVAL;
+ } else if (!temp_ei) {
+ UPDATE_DECODE_VARIABLES(buf_src,
+ decoded_bytes, tlv_len);
+ continue;
+ }
+ } else {
+ /*
+ * No length information for elements in nested
+ * structures. So use remaining decodable buffer space.
+ */
+ tlv_len = in_buf_len - decoded_bytes;
+ }
+
+ buf_dst = out_c_struct + temp_ei->offset;
+ if (temp_ei->data_type == QMI_OPT_FLAG) {
+ memcpy(buf_dst, &opt_flag_value, sizeof(u8));
+ temp_ei = temp_ei + 1;
+ buf_dst = out_c_struct + temp_ei->offset;
+ }
+
+ if (temp_ei->data_type == QMI_DATA_LEN) {
+ data_len_sz = temp_ei->elem_size == sizeof(u8) ?
+ sizeof(u8) : sizeof(u16);
+ rc = qmi_decode_basic_elem(&data_len_value, buf_src,
+ 1, data_len_sz);
+ memcpy(buf_dst, &data_len_value, sizeof(u32));
+ temp_ei = temp_ei + 1;
+ buf_dst = out_c_struct + temp_ei->offset;
+ tlv_len -= data_len_sz;
+ UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+ }
+
+ if (temp_ei->array_type == NO_ARRAY) {
+ data_len_value = 1;
+ } else if (temp_ei->array_type == STATIC_ARRAY) {
+ data_len_value = temp_ei->elem_len;
+ } else if (data_len_value > temp_ei->elem_len) {
+ pr_err("%s: Data len %d > max spec %d\n",
+ __func__, data_len_value, temp_ei->elem_len);
+ return -ETOOSMALL;
+ }
+
+ switch (temp_ei->data_type) {
+ case QMI_UNSIGNED_1_BYTE:
+ case QMI_UNSIGNED_2_BYTE:
+ case QMI_UNSIGNED_4_BYTE:
+ case QMI_UNSIGNED_8_BYTE:
+ case QMI_SIGNED_2_BYTE_ENUM:
+ case QMI_SIGNED_4_BYTE_ENUM:
+ rc = qmi_decode_basic_elem(buf_dst, buf_src,
+ data_len_value,
+ temp_ei->elem_size);
+ UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+ break;
+
+ case QMI_STRUCT:
+ rc = qmi_decode_struct_elem(temp_ei, buf_dst, buf_src,
+ data_len_value, tlv_len,
+ dec_level + 1);
+ if (rc < 0)
+ return rc;
+ UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+ break;
+
+ case QMI_STRING:
+ rc = qmi_decode_string_elem(temp_ei, buf_dst, buf_src,
+ tlv_len, dec_level);
+ if (rc < 0)
+ return rc;
+ UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+ break;
+
+ default:
+ pr_err("%s: Unrecognized data type\n", __func__);
+ return -EINVAL;
+ }
+ temp_ei = temp_ei + 1;
+ }
+
+ return decoded_bytes;
+}
+
+/**
+ * qmi_encode_message() - Encode C structure as QMI encoded message
+ * @type: Type of QMI message
+ * @msg_id: Message ID of the message
+ * @len: Passed as max length of the message, updated to actual size
+ * @txn_id: Transaction ID
+ * @ei: QMI message descriptor
+ * @c_struct: Reference to structure to encode
+ *
+ * Return: Buffer with encoded message, or negative ERR_PTR() on error
+ */
+void *qmi_encode_message(int type, unsigned int msg_id, size_t *len,
+ unsigned int txn_id, struct qmi_elem_info *ei,
+ const void *c_struct)
+{
+ struct qmi_header *hdr;
+ ssize_t msglen = 0;
+ void *msg;
+ int ret;
+
+ /* Check the possibility of a zero length QMI message */
+ if (!c_struct) {
+ ret = qmi_calc_min_msg_len(ei, 1);
+ if (ret) {
+ pr_err("%s: Calc. len %d != 0, but NULL c_struct\n",
+ __func__, ret);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ msg = kzalloc(sizeof(*hdr) + *len, GFP_KERNEL);
+ if (!msg)
+ return ERR_PTR(-ENOMEM);
+
+ /* Encode message, if we have a message */
+ if (c_struct) {
+ msglen = qmi_encode(ei, msg + sizeof(*hdr), c_struct, *len, 1);
+ if (msglen < 0) {
+ kfree(msg);
+ return ERR_PTR(msglen);
+ }
+ }
+
+ hdr = msg;
+ hdr->type = type;
+ hdr->txn_id = txn_id;
+ hdr->msg_id = msg_id;
+ hdr->msg_len = msglen;
+
+ *len = sizeof(*hdr) + msglen;
+
+ return msg;
+}
+EXPORT_SYMBOL(qmi_encode_message);
+
+/**
+ * qmi_decode_message() - Decode QMI encoded message to C structure
+ * @buf: Buffer with encoded message
+ * @len: Amount of data in @buf
+ * @ei: QMI message descriptor
+ * @c_struct: Reference to structure to decode into
+ *
+ * Return: The number of bytes of decoded information on success, negative
+ * errno on error.
+ */
+int qmi_decode_message(const void *buf, size_t len,
+ struct qmi_elem_info *ei, void *c_struct)
+{
+ if (!ei)
+ return -EINVAL;
+
+ if (!c_struct || !buf || !len)
+ return -EINVAL;
+
+ return qmi_decode(ei, c_struct, buf + sizeof(struct qmi_header),
+ len - sizeof(struct qmi_header), 1);
+}
+EXPORT_SYMBOL(qmi_decode_message);
+
+/* Common header in all QMI responses */
+struct qmi_elem_info qmi_response_type_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_2_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct qmi_response_type_v01, result),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_SIGNED_2_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct qmi_response_type_v01, error),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+EXPORT_SYMBOL(qmi_response_type_v01_ei);
+
+MODULE_DESCRIPTION("QMI encoder/decoder helper");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
new file mode 100644
index 000000000000..877611d5c42b
--- /dev/null
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -0,0 +1,848 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Linaro Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/qrtr.h>
+#include <linux/net.h>
+#include <linux/completion.h>
+#include <linux/idr.h>
+#include <linux/string.h>
+#include <net/sock.h>
+#include <linux/workqueue.h>
+#include <linux/soc/qcom/qmi.h>
+
+static struct socket *qmi_sock_create(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq);
+
+/**
+ * qmi_recv_new_server() - handler of NEW_SERVER control message
+ * @qmi: qmi handle
+ * @service: service id of the new server
+ * @instance: instance id of the new server
+ * @node: node of the new server
+ * @port: port of the new server
+ *
+ * Calls the new_server callback to inform the client about a newly registered
+ * server matching the currently registered service lookup.
+ */
+static void qmi_recv_new_server(struct qmi_handle *qmi,
+ unsigned int service, unsigned int instance,
+ unsigned int node, unsigned int port)
+{
+ struct qmi_ops *ops = &qmi->ops;
+ struct qmi_service *svc;
+ int ret;
+
+ if (!ops->new_server)
+ return;
+
+ /* Ignore EOF marker */
+ if (!node && !port)
+ return;
+
+ svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+ if (!svc)
+ return;
+
+ svc->service = service;
+ svc->version = instance & 0xff;
+ svc->instance = instance >> 8;
+ svc->node = node;
+ svc->port = port;
+
+ ret = ops->new_server(qmi, svc);
+ if (ret < 0)
+ kfree(svc);
+ else
+ list_add(&svc->list_node, &qmi->lookup_results);
+}
+
+/**
+ * qmi_recv_del_server() - handler of DEL_SERVER control message
+ * @qmi: qmi handle
+ * @node: node of the dying server, a value of -1 matches all nodes
+ * @port: port of the dying server, a value of -1 matches all ports
+ *
+ * Calls the del_server callback for each previously seen server, allowing the
+ * client to react to the disappearing server.
+ */
+static void qmi_recv_del_server(struct qmi_handle *qmi,
+ unsigned int node, unsigned int port)
+{
+ struct qmi_ops *ops = &qmi->ops;
+ struct qmi_service *svc;
+ struct qmi_service *tmp;
+
+ list_for_each_entry_safe(svc, tmp, &qmi->lookup_results, list_node) {
+ if (node != -1 && svc->node != node)
+ continue;
+ if (port != -1 && svc->port != port)
+ continue;
+
+ if (ops->del_server)
+ ops->del_server(qmi, svc);
+
+ list_del(&svc->list_node);
+ kfree(svc);
+ }
+}
+
+/**
+ * qmi_recv_bye() - handler of BYE control message
+ * @qmi: qmi handle
+ * @node: id of the dying node
+ *
+ * Signals the client that all previously registered services on this node are
+ * now gone and then calls the bye callback to allow the client client further
+ * cleaning up resources associated with this remote.
+ */
+static void qmi_recv_bye(struct qmi_handle *qmi,
+ unsigned int node)
+{
+ struct qmi_ops *ops = &qmi->ops;
+
+ qmi_recv_del_server(qmi, node, -1);
+
+ if (ops->bye)
+ ops->bye(qmi, node);
+}
+
+/**
+ * qmi_recv_del_client() - handler of DEL_CLIENT control message
+ * @qmi: qmi handle
+ * @node: node of the dying client
+ * @port: port of the dying client
+ *
+ * Signals the client about a dying client, by calling the del_client callback.
+ */
+static void qmi_recv_del_client(struct qmi_handle *qmi,
+ unsigned int node, unsigned int port)
+{
+ struct qmi_ops *ops = &qmi->ops;
+
+ if (ops->del_client)
+ ops->del_client(qmi, node, port);
+}
+
+static void qmi_recv_ctrl_pkt(struct qmi_handle *qmi,
+ const void *buf, size_t len)
+{
+ const struct qrtr_ctrl_pkt *pkt = buf;
+
+ if (len < sizeof(struct qrtr_ctrl_pkt)) {
+ pr_debug("ignoring short control packet\n");
+ return;
+ }
+
+ switch (le32_to_cpu(pkt->cmd)) {
+ case QRTR_TYPE_BYE:
+ qmi_recv_bye(qmi, le32_to_cpu(pkt->client.node));
+ break;
+ case QRTR_TYPE_NEW_SERVER:
+ qmi_recv_new_server(qmi,
+ le32_to_cpu(pkt->server.service),
+ le32_to_cpu(pkt->server.instance),
+ le32_to_cpu(pkt->server.node),
+ le32_to_cpu(pkt->server.port));
+ break;
+ case QRTR_TYPE_DEL_SERVER:
+ qmi_recv_del_server(qmi,
+ le32_to_cpu(pkt->server.node),
+ le32_to_cpu(pkt->server.port));
+ break;
+ case QRTR_TYPE_DEL_CLIENT:
+ qmi_recv_del_client(qmi,
+ le32_to_cpu(pkt->client.node),
+ le32_to_cpu(pkt->client.port));
+ break;
+ }
+}
+
+static void qmi_send_new_lookup(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+ struct qrtr_ctrl_pkt pkt;
+ struct sockaddr_qrtr sq;
+ struct msghdr msg = { };
+ struct kvec iv = { &pkt, sizeof(pkt) };
+ int ret;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_LOOKUP);
+ pkt.server.service = cpu_to_le32(svc->service);
+ pkt.server.instance = cpu_to_le32(svc->version | svc->instance << 8);
+
+ sq.sq_family = qmi->sq.sq_family;
+ sq.sq_node = qmi->sq.sq_node;
+ sq.sq_port = QRTR_PORT_CTRL;
+
+ msg.msg_name = &sq;
+ msg.msg_namelen = sizeof(sq);
+
+ mutex_lock(&qmi->sock_lock);
+ if (qmi->sock) {
+ ret = kernel_sendmsg(qmi->sock, &msg, &iv, 1, sizeof(pkt));
+ if (ret < 0)
+ pr_err("failed to send lookup registration: %d\n", ret);
+ }
+ mutex_unlock(&qmi->sock_lock);
+}
+
+/**
+ * qmi_add_lookup() - register a new lookup with the name service
+ * @qmi: qmi handle
+ * @service: service id of the request
+ * @instance: instance id of the request
+ * @version: version number of the request
+ *
+ * Registering a lookup query with the name server will cause the name server
+ * to send NEW_SERVER and DEL_SERVER control messages to this socket as
+ * matching services are registered.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qmi_add_lookup(struct qmi_handle *qmi, unsigned int service,
+ unsigned int version, unsigned int instance)
+{
+ struct qmi_service *svc;
+
+ svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+ if (!svc)
+ return -ENOMEM;
+
+ svc->service = service;
+ svc->version = version;
+ svc->instance = instance;
+
+ list_add(&svc->list_node, &qmi->lookups);
+
+ qmi_send_new_lookup(qmi, svc);
+
+ return 0;
+}
+EXPORT_SYMBOL(qmi_add_lookup);
+
+static void qmi_send_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+ struct qrtr_ctrl_pkt pkt;
+ struct sockaddr_qrtr sq;
+ struct msghdr msg = { };
+ struct kvec iv = { &pkt, sizeof(pkt) };
+ int ret;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_SERVER);
+ pkt.server.service = cpu_to_le32(svc->service);
+ pkt.server.instance = cpu_to_le32(svc->version | svc->instance << 8);
+ pkt.server.node = cpu_to_le32(qmi->sq.sq_node);
+ pkt.server.port = cpu_to_le32(qmi->sq.sq_port);
+
+ sq.sq_family = qmi->sq.sq_family;
+ sq.sq_node = qmi->sq.sq_node;
+ sq.sq_port = QRTR_PORT_CTRL;
+
+ msg.msg_name = &sq;
+ msg.msg_namelen = sizeof(sq);
+
+ mutex_lock(&qmi->sock_lock);
+ if (qmi->sock) {
+ ret = kernel_sendmsg(qmi->sock, &msg, &iv, 1, sizeof(pkt));
+ if (ret < 0)
+ pr_err("send service registration failed: %d\n", ret);
+ }
+ mutex_unlock(&qmi->sock_lock);
+}
+
+/**
+ * qmi_add_server() - register a service with the name service
+ * @qmi: qmi handle
+ * @service: type of the service
+ * @instance: instance of the service
+ * @version: version of the service
+ *
+ * Register a new service with the name service. This allows clients to find
+ * and start sending messages to the client associated with @qmi.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qmi_add_server(struct qmi_handle *qmi, unsigned int service,
+ unsigned int version, unsigned int instance)
+{
+ struct qmi_service *svc;
+
+ svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+ if (!svc)
+ return -ENOMEM;
+
+ svc->service = service;
+ svc->version = version;
+ svc->instance = instance;
+
+ list_add(&svc->list_node, &qmi->services);
+
+ qmi_send_new_server(qmi, svc);
+
+ return 0;
+}
+EXPORT_SYMBOL(qmi_add_server);
+
+/**
+ * qmi_txn_init() - allocate transaction id within the given QMI handle
+ * @qmi: QMI handle
+ * @txn: transaction context
+ * @ei: description of how to decode a matching response (optional)
+ * @c_struct: pointer to the object to decode the response into (optional)
+ *
+ * This allocates a transaction id within the QMI handle. If @ei and @c_struct
+ * are specified any responses to this transaction will be decoded as described
+ * by @ei into @c_struct.
+ *
+ * A client calling qmi_txn_init() must call either qmi_txn_wait() or
+ * qmi_txn_cancel() to free up the allocated resources.
+ *
+ * Return: Transaction id on success, negative errno on failure.
+ */
+int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
+ struct qmi_elem_info *ei, void *c_struct)
+{
+ int ret;
+
+ memset(txn, 0, sizeof(*txn));
+
+ mutex_init(&txn->lock);
+ init_completion(&txn->completion);
+ txn->qmi = qmi;
+ txn->ei = ei;
+ txn->dest = c_struct;
+
+ mutex_lock(&qmi->txn_lock);
+ ret = idr_alloc_cyclic(&qmi->txns, txn, 0, INT_MAX, GFP_KERNEL);
+ if (ret < 0)
+ pr_err("failed to allocate transaction id\n");
+
+ txn->id = ret;
+ mutex_unlock(&qmi->txn_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(qmi_txn_init);
+
+/**
+ * qmi_txn_wait() - wait for a response on a transaction
+ * @txn: transaction handle
+ * @timeout: timeout, in jiffies
+ *
+ * If the transaction is decoded by the means of @ei and @c_struct the return
+ * value will be the returned value of qmi_decode_message(), otherwise it's up
+ * to the specified message handler to fill out the result.
+ *
+ * Return: the transaction response on success, negative errno on failure.
+ */
+int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout)
+{
+ struct qmi_handle *qmi = txn->qmi;
+ int ret;
+
+ ret = wait_for_completion_interruptible_timeout(&txn->completion,
+ timeout);
+
+ mutex_lock(&qmi->txn_lock);
+ mutex_lock(&txn->lock);
+ idr_remove(&qmi->txns, txn->id);
+ mutex_unlock(&txn->lock);
+ mutex_unlock(&qmi->txn_lock);
+
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
+ return -ETIMEDOUT;
+ else
+ return txn->result;
+}
+EXPORT_SYMBOL(qmi_txn_wait);
+
+/**
+ * qmi_txn_cancel() - cancel an ongoing transaction
+ * @txn: transaction id
+ */
+void qmi_txn_cancel(struct qmi_txn *txn)
+{
+ struct qmi_handle *qmi = txn->qmi;
+
+ mutex_lock(&qmi->txn_lock);
+ mutex_lock(&txn->lock);
+ idr_remove(&qmi->txns, txn->id);
+ mutex_unlock(&txn->lock);
+ mutex_unlock(&qmi->txn_lock);
+}
+EXPORT_SYMBOL(qmi_txn_cancel);
+
+/**
+ * qmi_invoke_handler() - find and invoke a handler for a message
+ * @qmi: qmi handle
+ * @sq: sockaddr of the sender
+ * @txn: transaction object for the message
+ * @buf: buffer containing the message
+ * @len: length of @buf
+ *
+ * Find handler and invoke handler for the incoming message.
+ */
+static void qmi_invoke_handler(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *buf, size_t len)
+{
+ const struct qmi_msg_handler *handler;
+ const struct qmi_header *hdr = buf;
+ void *dest;
+ int ret;
+
+ if (!qmi->handlers)
+ return;
+
+ for (handler = qmi->handlers; handler->fn; handler++) {
+ if (handler->type == hdr->type &&
+ handler->msg_id == hdr->msg_id)
+ break;
+ }
+
+ if (!handler->fn)
+ return;
+
+ dest = kzalloc(handler->decoded_size, GFP_KERNEL);
+ if (!dest)
+ return;
+
+ ret = qmi_decode_message(buf, len, handler->ei, dest);
+ if (ret < 0)
+ pr_err("failed to decode incoming message\n");
+ else
+ handler->fn(qmi, sq, txn, dest);
+
+ kfree(dest);
+}
+
+/**
+ * qmi_handle_net_reset() - invoked to handle ENETRESET on a QMI handle
+ * @qmi: the QMI context
+ *
+ * As a result of registering a name service with the QRTR all open sockets are
+ * flagged with ENETRESET and this function will be called. The typical case is
+ * the initial boot, where this signals that the local node id has been
+ * configured and as such any bound sockets needs to be rebound. So close the
+ * socket, inform the client and re-initialize the socket.
+ *
+ * For clients it's generally sufficient to react to the del_server callbacks,
+ * but server code is expected to treat the net_reset callback as a "bye" from
+ * all nodes.
+ *
+ * Finally the QMI handle will send out registration requests for any lookups
+ * and services.
+ */
+static void qmi_handle_net_reset(struct qmi_handle *qmi)
+{
+ struct sockaddr_qrtr sq;
+ struct qmi_service *svc;
+ struct socket *sock;
+
+ sock = qmi_sock_create(qmi, &sq);
+ if (IS_ERR(sock))
+ return;
+
+ mutex_lock(&qmi->sock_lock);
+ sock_release(qmi->sock);
+ qmi->sock = NULL;
+ mutex_unlock(&qmi->sock_lock);
+
+ qmi_recv_del_server(qmi, -1, -1);
+
+ if (qmi->ops.net_reset)
+ qmi->ops.net_reset(qmi);
+
+ mutex_lock(&qmi->sock_lock);
+ qmi->sock = sock;
+ qmi->sq = sq;
+ mutex_unlock(&qmi->sock_lock);
+
+ list_for_each_entry(svc, &qmi->lookups, list_node)
+ qmi_send_new_lookup(qmi, svc);
+
+ list_for_each_entry(svc, &qmi->services, list_node)
+ qmi_send_new_server(qmi, svc);
+}
+
+static void qmi_handle_message(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ const void *buf, size_t len)
+{
+ const struct qmi_header *hdr;
+ struct qmi_txn tmp_txn;
+ struct qmi_txn *txn = NULL;
+ int ret;
+
+ if (len < sizeof(*hdr)) {
+ pr_err("ignoring short QMI packet\n");
+ return;
+ }
+
+ hdr = buf;
+
+ /* If this is a response, find the matching transaction handle */
+ if (hdr->type == QMI_RESPONSE) {
+ mutex_lock(&qmi->txn_lock);
+ txn = idr_find(&qmi->txns, hdr->txn_id);
+
+ /* Ignore unexpected responses */
+ if (!txn) {
+ mutex_unlock(&qmi->txn_lock);
+ return;
+ }
+
+ mutex_lock(&txn->lock);
+ mutex_unlock(&qmi->txn_lock);
+
+ if (txn->dest && txn->ei) {
+ ret = qmi_decode_message(buf, len, txn->ei, txn->dest);
+ if (ret < 0)
+ pr_err("failed to decode incoming message\n");
+
+ txn->result = ret;
+ complete(&txn->completion);
+ } else {
+ qmi_invoke_handler(qmi, sq, txn, buf, len);
+ }
+
+ mutex_unlock(&txn->lock);
+ } else {
+ /* Create a txn based on the txn_id of the incoming message */
+ memset(&tmp_txn, 0, sizeof(tmp_txn));
+ tmp_txn.id = hdr->txn_id;
+
+ qmi_invoke_handler(qmi, sq, &tmp_txn, buf, len);
+ }
+}
+
+static void qmi_data_ready_work(struct work_struct *work)
+{
+ struct qmi_handle *qmi = container_of(work, struct qmi_handle, work);
+ struct qmi_ops *ops = &qmi->ops;
+ struct sockaddr_qrtr sq;
+ struct msghdr msg = { .msg_name = &sq, .msg_namelen = sizeof(sq) };
+ struct kvec iv;
+ ssize_t msglen;
+
+ for (;;) {
+ iv.iov_base = qmi->recv_buf;
+ iv.iov_len = qmi->recv_buf_size;
+
+ mutex_lock(&qmi->sock_lock);
+ if (qmi->sock)
+ msglen = kernel_recvmsg(qmi->sock, &msg, &iv, 1,
+ iv.iov_len, MSG_DONTWAIT);
+ else
+ msglen = -EPIPE;
+ mutex_unlock(&qmi->sock_lock);
+ if (msglen == -EAGAIN)
+ break;
+
+ if (msglen == -ENETRESET) {
+ qmi_handle_net_reset(qmi);
+
+ /* The old qmi->sock is gone, our work is done */
+ break;
+ }
+
+ if (msglen < 0) {
+ pr_err("qmi recvmsg failed: %zd\n", msglen);
+ break;
+ }
+
+ if (sq.sq_node == qmi->sq.sq_node &&
+ sq.sq_port == QRTR_PORT_CTRL) {
+ qmi_recv_ctrl_pkt(qmi, qmi->recv_buf, msglen);
+ } else if (ops->msg_handler) {
+ ops->msg_handler(qmi, &sq, qmi->recv_buf, msglen);
+ } else {
+ qmi_handle_message(qmi, &sq, qmi->recv_buf, msglen);
+ }
+ }
+}
+
+static void qmi_data_ready(struct sock *sk)
+{
+ struct qmi_handle *qmi = sk->sk_user_data;
+
+ /*
+ * This will be NULL if we receive data while being in
+ * qmi_handle_release()
+ */
+ if (!qmi)
+ return;
+
+ queue_work(qmi->wq, &qmi->work);
+}
+
+static struct socket *qmi_sock_create(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq)
+{
+ struct socket *sock;
+ int sl = sizeof(*sq);
+ int ret;
+
+ ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM,
+ PF_QIPCRTR, &sock);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ ret = kernel_getsockname(sock, (struct sockaddr *)sq, &sl);
+ if (ret < 0) {
+ sock_release(sock);
+ return ERR_PTR(ret);
+ }
+
+ sock->sk->sk_user_data = qmi;
+ sock->sk->sk_data_ready = qmi_data_ready;
+ sock->sk->sk_error_report = qmi_data_ready;
+
+ return sock;
+}
+
+/**
+ * qmi_handle_init() - initialize a QMI client handle
+ * @qmi: QMI handle to initialize
+ * @recv_buf_size: maximum size of incoming message
+ * @ops: reference to callbacks for QRTR notifications
+ * @handlers: NULL-terminated list of QMI message handlers
+ *
+ * This initializes the QMI client handle to allow sending and receiving QMI
+ * messages. As messages are received the appropriate handler will be invoked.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qmi_handle_init(struct qmi_handle *qmi, size_t recv_buf_size,
+ const struct qmi_ops *ops,
+ const struct qmi_msg_handler *handlers)
+{
+ int ret;
+
+ mutex_init(&qmi->txn_lock);
+ mutex_init(&qmi->sock_lock);
+
+ idr_init(&qmi->txns);
+
+ INIT_LIST_HEAD(&qmi->lookups);
+ INIT_LIST_HEAD(&qmi->lookup_results);
+ INIT_LIST_HEAD(&qmi->services);
+
+ INIT_WORK(&qmi->work, qmi_data_ready_work);
+
+ qmi->handlers = handlers;
+ if (ops)
+ qmi->ops = *ops;
+
+ if (recv_buf_size < sizeof(struct qrtr_ctrl_pkt))
+ recv_buf_size = sizeof(struct qrtr_ctrl_pkt);
+ else
+ recv_buf_size += sizeof(struct qmi_header);
+
+ qmi->recv_buf_size = recv_buf_size;
+ qmi->recv_buf = kzalloc(recv_buf_size, GFP_KERNEL);
+ if (!qmi->recv_buf)
+ return -ENOMEM;
+
+ qmi->wq = alloc_workqueue("qmi_msg_handler", WQ_UNBOUND, 1);
+ if (!qmi->wq) {
+ ret = -ENOMEM;
+ goto err_free_recv_buf;
+ }
+
+ qmi->sock = qmi_sock_create(qmi, &qmi->sq);
+ if (IS_ERR(qmi->sock)) {
+ pr_err("failed to create QMI socket\n");
+ ret = PTR_ERR(qmi->sock);
+ goto err_destroy_wq;
+ }
+
+ return 0;
+
+err_destroy_wq:
+ destroy_workqueue(qmi->wq);
+err_free_recv_buf:
+ kfree(qmi->recv_buf);
+
+ return ret;
+}
+EXPORT_SYMBOL(qmi_handle_init);
+
+/**
+ * qmi_handle_release() - release the QMI client handle
+ * @qmi: QMI client handle
+ *
+ * This closes the underlying socket and stops any handling of QMI messages.
+ */
+void qmi_handle_release(struct qmi_handle *qmi)
+{
+ struct socket *sock = qmi->sock;
+ struct qmi_service *svc, *tmp;
+
+ sock->sk->sk_user_data = NULL;
+ cancel_work_sync(&qmi->work);
+
+ qmi_recv_del_server(qmi, -1, -1);
+
+ mutex_lock(&qmi->sock_lock);
+ sock_release(sock);
+ qmi->sock = NULL;
+ mutex_unlock(&qmi->sock_lock);
+
+ destroy_workqueue(qmi->wq);
+
+ idr_destroy(&qmi->txns);
+
+ kfree(qmi->recv_buf);
+
+ /* Free registered lookup requests */
+ list_for_each_entry_safe(svc, tmp, &qmi->lookups, list_node) {
+ list_del(&svc->list_node);
+ kfree(svc);
+ }
+
+ /* Free registered service information */
+ list_for_each_entry_safe(svc, tmp, &qmi->services, list_node) {
+ list_del(&svc->list_node);
+ kfree(svc);
+ }
+}
+EXPORT_SYMBOL(qmi_handle_release);
+
+/**
+ * qmi_send_message() - send a QMI message
+ * @qmi: QMI client handle
+ * @sq: destination sockaddr
+ * @txn: transaction object to use for the message
+ * @type: type of message to send
+ * @msg_id: message id
+ * @len: max length of the QMI message
+ * @ei: QMI message description
+ * @c_struct: object to be encoded
+ *
+ * This function encodes @c_struct using @ei into a message of type @type,
+ * with @msg_id and @txn into a buffer of maximum size @len, and sends this to
+ * @sq.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+static ssize_t qmi_send_message(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq, struct qmi_txn *txn,
+ int type, int msg_id, size_t len,
+ struct qmi_elem_info *ei, const void *c_struct)
+{
+ struct msghdr msghdr = {};
+ struct kvec iv;
+ void *msg;
+ int ret;
+
+ msg = qmi_encode_message(type,
+ msg_id, &len,
+ txn->id, ei,
+ c_struct);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ iv.iov_base = msg;
+ iv.iov_len = len;
+
+ if (sq) {
+ msghdr.msg_name = sq;
+ msghdr.msg_namelen = sizeof(*sq);
+ }
+
+ mutex_lock(&qmi->sock_lock);
+ if (qmi->sock) {
+ ret = kernel_sendmsg(qmi->sock, &msghdr, &iv, 1, len);
+ if (ret < 0)
+ pr_err("failed to send QMI message\n");
+ } else {
+ ret = -EPIPE;
+ }
+ mutex_unlock(&qmi->sock_lock);
+
+ kfree(msg);
+
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * qmi_send_request() - send a request QMI message
+ * @qmi: QMI client handle
+ * @sq: destination sockaddr
+ * @txn: transaction object to use for the message
+ * @msg_id: message id
+ * @len: max length of the QMI message
+ * @ei: QMI message description
+ * @c_struct: object to be encoded
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, int msg_id, size_t len,
+ struct qmi_elem_info *ei, const void *c_struct)
+{
+ return qmi_send_message(qmi, sq, txn, QMI_REQUEST, msg_id, len, ei,
+ c_struct);
+}
+EXPORT_SYMBOL(qmi_send_request);
+
+/**
+ * qmi_send_response() - send a response QMI message
+ * @qmi: QMI client handle
+ * @sq: destination sockaddr
+ * @txn: transaction object to use for the message
+ * @msg_id: message id
+ * @len: max length of the QMI message
+ * @ei: QMI message description
+ * @c_struct: object to be encoded
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, int msg_id, size_t len,
+ struct qmi_elem_info *ei, const void *c_struct)
+{
+ return qmi_send_message(qmi, sq, txn, QMI_RESPONSE, msg_id, len, ei,
+ c_struct);
+}
+EXPORT_SYMBOL(qmi_send_response);
+
+/**
+ * qmi_send_indication() - send an indication QMI message
+ * @qmi: QMI client handle
+ * @sq: destination sockaddr
+ * @msg_id: message id
+ * @len: max length of the QMI message
+ * @ei: QMI message description
+ * @c_struct: object to be encoded
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ int msg_id, size_t len, struct qmi_elem_info *ei,
+ const void *c_struct)
+{
+ struct qmi_txn txn;
+ ssize_t rval;
+ int ret;
+
+ ret = qmi_txn_init(qmi, &txn, NULL, NULL);
+ if (ret < 0)
+ return ret;
+
+ rval = qmi_send_message(qmi, sq, &txn, QMI_INDICATION, msg_id, len, ei,
+ c_struct);
+
+ /* We don't care about future messages on this txn */
+ qmi_txn_cancel(&txn);
+
+ return rval;
+}
+EXPORT_SYMBOL(qmi_send_indication);
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
index ce35ff748adf..0a43b2e8906f 100644
--- a/drivers/soc/qcom/rmtfs_mem.c
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -267,3 +267,7 @@ static void qcom_rmtfs_mem_exit(void)
unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
}
module_exit(qcom_rmtfs_mem_exit);
+
+MODULE_AUTHOR("Linaro Ltd");
+MODULE_DESCRIPTION("Qualcomm Remote Filesystem memory driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index f51fb2ea7200..c22503cd1edf 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -126,6 +127,8 @@ struct smp2p_entry {
* @ipc_regmap: regmap for the outbound ipc
* @ipc_offset: offset within the regmap
* @ipc_bit: bit in regmap@offset to kick to signal remote processor
+ * @mbox_client: mailbox client handle
+ * @mbox_chan: apcs ipc mailbox channel handle
* @inbound: list of inbound entries
* @outbound: list of outbound entries
*/
@@ -146,6 +149,9 @@ struct qcom_smp2p {
int ipc_offset;
int ipc_bit;
+ struct mbox_client mbox_client;
+ struct mbox_chan *mbox_chan;
+
struct list_head inbound;
struct list_head outbound;
};
@@ -154,7 +160,13 @@ static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
{
/* Make sure any updated data is written before the kick */
wmb();
- regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit));
+
+ if (smp2p->mbox_chan) {
+ mbox_send_message(smp2p->mbox_chan, NULL);
+ mbox_client_txdone(smp2p->mbox_chan, 0);
+ } else {
+ regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit));
+ }
}
/**
@@ -453,10 +465,6 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, smp2p);
- ret = smp2p_parse_ipc(smp2p);
- if (ret)
- return ret;
-
key = "qcom,smem";
ret = of_property_read_u32_array(pdev->dev.of_node, key,
smp2p->smem_items, 2);
@@ -465,17 +473,13 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
key = "qcom,local-pid";
ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->local_pid);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to read %s\n", key);
- return -EINVAL;
- }
+ if (ret)
+ goto report_read_failure;
key = "qcom,remote-pid";
ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->remote_pid);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to read %s\n", key);
- return -EINVAL;
- }
+ if (ret)
+ goto report_read_failure;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@@ -483,9 +487,23 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
return irq;
}
+ smp2p->mbox_client.dev = &pdev->dev;
+ smp2p->mbox_client.knows_txdone = true;
+ smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0);
+ if (IS_ERR(smp2p->mbox_chan)) {
+ if (PTR_ERR(smp2p->mbox_chan) != -ENODEV)
+ return PTR_ERR(smp2p->mbox_chan);
+
+ smp2p->mbox_chan = NULL;
+
+ ret = smp2p_parse_ipc(smp2p);
+ if (ret)
+ return ret;
+ }
+
ret = qcom_smp2p_alloc_outbound_item(smp2p);
if (ret < 0)
- return ret;
+ goto release_mbox;
for_each_available_child_of_node(pdev->dev.of_node, node) {
entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL);
@@ -540,7 +558,14 @@ unwind_interfaces:
smp2p->out->valid_entries = 0;
+release_mbox:
+ mbox_free_channel(smp2p->mbox_chan);
+
return ret;
+
+report_read_failure:
+ dev_err(&pdev->dev, "failed to read %s\n", key);
+ return -EINVAL;
}
static int qcom_smp2p_remove(struct platform_device *pdev)
@@ -554,6 +579,8 @@ static int qcom_smp2p_remove(struct platform_device *pdev)
list_for_each_entry(entry, &smp2p->outbound, node)
qcom_smem_state_unregister(entry->state);
+ mbox_free_channel(smp2p->mbox_chan);
+
smp2p->out->valid_entries = 0;
return 0;
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
index 403bea9d546b..50214b620865 100644
--- a/drivers/soc/qcom/smsm.c
+++ b/drivers/soc/qcom/smsm.c
@@ -496,8 +496,10 @@ static int qcom_smsm_probe(struct platform_device *pdev)
if (!smsm->hosts)
return -ENOMEM;
- local_node = of_find_node_with_property(of_node_get(pdev->dev.of_node),
- "#qcom,smem-state-cells");
+ for_each_child_of_node(pdev->dev.of_node, local_node) {
+ if (of_find_property(local_node, "#qcom,smem-state-cells", NULL))
+ break;
+ }
if (!local_node) {
dev_err(&pdev->dev, "no state entry\n");
return -EINVAL;
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index 8b25bd55e648..2186285fda92 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# SAMSUNG SoC drivers
#
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
index 4d7694a4e7a4..29f294baac6e 100644
--- a/drivers/soc/samsung/Makefile
+++ b/drivers/soc/samsung/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o
obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index 938f8ccfcb74..f56adbd9fb8b 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -1,13 +1,9 @@
-/*
- * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * EXYNOS - CPU PMU(Power Management Unit) support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
+// http://www.samsung.com/
+//
+// EXYNOS - CPU PMU(Power Management Unit) support
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h
index 86b3f2f8966d..977e4daf5a0f 100644
--- a/drivers/soc/samsung/exynos-pmu.h
+++ b/drivers/soc/samsung/exynos-pmu.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Header for EXYNOS PMU Driver support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __EXYNOS_PMU_H
diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c
index af2f54e14b83..275d348ed9c9 100644
--- a/drivers/soc/samsung/exynos3250-pmu.c
+++ b/drivers/soc/samsung/exynos3250-pmu.c
@@ -1,13 +1,9 @@
-/*
- * Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * EXYNOS3250 - CPU PMU (Power Management Unit) support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+// http://www.samsung.com/
+//
+// EXYNOS3250 - CPU PMU (Power Management Unit) support
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos4-pmu.c b/drivers/soc/samsung/exynos4-pmu.c
index 5dbfe4e31f4c..a7cdbf1aac0c 100644
--- a/drivers/soc/samsung/exynos4-pmu.c
+++ b/drivers/soc/samsung/exynos4-pmu.c
@@ -1,13 +1,9 @@
-/*
- * Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * EXYNOS4 - CPU PMU(Power Management Unit) support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+// http://www.samsung.com/
+//
+// EXYNOS4 - CPU PMU(Power Management Unit) support
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5250-pmu.c b/drivers/soc/samsung/exynos5250-pmu.c
index 8d94f0819f32..19b38e008145 100644
--- a/drivers/soc/samsung/exynos5250-pmu.c
+++ b/drivers/soc/samsung/exynos5250-pmu.c
@@ -1,13 +1,9 @@
-/*
- * Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * EXYNOS5250 - CPU PMU (Power Management Unit) support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+// http://www.samsung.com/
+//
+// EXYNOS5250 - CPU PMU (Power Management Unit) support
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c
index 0a89fa79c678..b236d3b47b49 100644
--- a/drivers/soc/samsung/exynos5420-pmu.c
+++ b/drivers/soc/samsung/exynos5420-pmu.c
@@ -1,13 +1,9 @@
-/*
- * Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * EXYNOS5420 - CPU PMU (Power Management Unit) support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+// http://www.samsung.com/
+//
+// EXYNOS5420 - CPU PMU (Power Management Unit) support
#include <linux/pm.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
diff --git a/drivers/soc/samsung/pm_domains.c b/drivers/soc/samsung/pm_domains.c
index 7c4fec1f93b5..b6a436594a19 100644
--- a/drivers/soc/samsung/pm_domains.c
+++ b/drivers/soc/samsung/pm_domains.c
@@ -1,17 +1,13 @@
-/*
- * Exynos Generic power domain support.
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Implementation of Exynos specific power domain control which is used in
- * conjunction with runtime-pm. Support for both device-tree and non-device-tree
- * based power domain support is included.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
+// SPDX-License-Identifier: GPL-2.0
+//
+// Exynos Generic power domain support.
+//
+// Copyright (c) 2012 Samsung Electronics Co., Ltd.
+// http://www.samsung.com
+//
+// Implementation of Exynos specific power domain control which is used in
+// conjunction with runtime-pm. Support for both device-tree and non-device-tree
+// based power domain support is included.
#include <linux/io.h>
#include <linux/err.h>
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 39225de9d7f1..77d6b5c03aae 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -225,7 +225,7 @@ static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
if (!knav_queue_is_busy(inst)) {
struct knav_range_info *range = inst->range;
- inst->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL);
+ inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
if (range->ops && range->ops->open_queue)
ret = range->ops->open_queue(range, inst, flags);
@@ -779,7 +779,7 @@ void *knav_pool_create(const char *name,
goto err;
}
- pool->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL);
+ pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
pool->kdev = kdev;
pool->dev = kdev->dev;
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
new file mode 100644
index 000000000000..687c8f3cd955
--- /dev/null
+++ b/drivers/soc/xilinx/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+menu "Xilinx SoC drivers"
+
+config XILINX_VCU
+ tristate "Xilinx VCU logicoreIP Init"
+ depends on HAS_IOMEM
+ help
+ Provides the driver to enable and disable the isolation between the
+ processing system and programmable logic part by using the logicoreIP
+ register set. This driver also configures the frequency based on the
+ clock information from the logicoreIP register set.
+
+ If you say yes here you get support for the logicoreIP.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called xlnx_vcu.
+
+endmenu
diff --git a/drivers/soc/xilinx/Makefile b/drivers/soc/xilinx/Makefile
new file mode 100644
index 000000000000..dee8fd51e303
--- /dev/null
+++ b/drivers/soc/xilinx/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_XILINX_VCU) += xlnx_vcu.o
diff --git a/drivers/soc/xilinx/xlnx_vcu.c b/drivers/soc/xilinx/xlnx_vcu.c
new file mode 100644
index 000000000000..a840c0272135
--- /dev/null
+++ b/drivers/soc/xilinx/xlnx_vcu.c
@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx VCU Init
+ *
+ * Copyright (C) 2016 - 2017 Xilinx, Inc.
+ *
+ * Contacts Dhaval Shah <dshah@xilinx.com>
+ */
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+/* Address map for different registers implemented in the VCU LogiCORE IP. */
+#define VCU_ECODER_ENABLE 0x00
+#define VCU_DECODER_ENABLE 0x04
+#define VCU_MEMORY_DEPTH 0x08
+#define VCU_ENC_COLOR_DEPTH 0x0c
+#define VCU_ENC_VERTICAL_RANGE 0x10
+#define VCU_ENC_FRAME_SIZE_X 0x14
+#define VCU_ENC_FRAME_SIZE_Y 0x18
+#define VCU_ENC_COLOR_FORMAT 0x1c
+#define VCU_ENC_FPS 0x20
+#define VCU_MCU_CLK 0x24
+#define VCU_CORE_CLK 0x28
+#define VCU_PLL_BYPASS 0x2c
+#define VCU_ENC_CLK 0x30
+#define VCU_PLL_CLK 0x34
+#define VCU_ENC_VIDEO_STANDARD 0x38
+#define VCU_STATUS 0x3c
+#define VCU_AXI_ENC_CLK 0x40
+#define VCU_AXI_DEC_CLK 0x44
+#define VCU_AXI_MCU_CLK 0x48
+#define VCU_DEC_VIDEO_STANDARD 0x4c
+#define VCU_DEC_FRAME_SIZE_X 0x50
+#define VCU_DEC_FRAME_SIZE_Y 0x54
+#define VCU_DEC_FPS 0x58
+#define VCU_BUFFER_B_FRAME 0x5c
+#define VCU_WPP_EN 0x60
+#define VCU_PLL_CLK_DEC 0x64
+#define VCU_GASKET_INIT 0x74
+#define VCU_GASKET_VALUE 0x03
+
+/* vcu slcr registers, bitmask and shift */
+#define VCU_PLL_CTRL 0x24
+#define VCU_PLL_CTRL_RESET_MASK 0x01
+#define VCU_PLL_CTRL_RESET_SHIFT 0
+#define VCU_PLL_CTRL_BYPASS_MASK 0x01
+#define VCU_PLL_CTRL_BYPASS_SHIFT 3
+#define VCU_PLL_CTRL_FBDIV_MASK 0x7f
+#define VCU_PLL_CTRL_FBDIV_SHIFT 8
+#define VCU_PLL_CTRL_POR_IN_MASK 0x01
+#define VCU_PLL_CTRL_POR_IN_SHIFT 1
+#define VCU_PLL_CTRL_PWR_POR_MASK 0x01
+#define VCU_PLL_CTRL_PWR_POR_SHIFT 2
+#define VCU_PLL_CTRL_CLKOUTDIV_MASK 0x03
+#define VCU_PLL_CTRL_CLKOUTDIV_SHIFT 16
+#define VCU_PLL_CTRL_DEFAULT 0
+#define VCU_PLL_DIV2 2
+
+#define VCU_PLL_CFG 0x28
+#define VCU_PLL_CFG_RES_MASK 0x0f
+#define VCU_PLL_CFG_RES_SHIFT 0
+#define VCU_PLL_CFG_CP_MASK 0x0f
+#define VCU_PLL_CFG_CP_SHIFT 5
+#define VCU_PLL_CFG_LFHF_MASK 0x03
+#define VCU_PLL_CFG_LFHF_SHIFT 10
+#define VCU_PLL_CFG_LOCK_CNT_MASK 0x03ff
+#define VCU_PLL_CFG_LOCK_CNT_SHIFT 13
+#define VCU_PLL_CFG_LOCK_DLY_MASK 0x7f
+#define VCU_PLL_CFG_LOCK_DLY_SHIFT 25
+#define VCU_ENC_CORE_CTRL 0x30
+#define VCU_ENC_MCU_CTRL 0x34
+#define VCU_DEC_CORE_CTRL 0x38
+#define VCU_DEC_MCU_CTRL 0x3c
+#define VCU_PLL_DIVISOR_MASK 0x3f
+#define VCU_PLL_DIVISOR_SHIFT 4
+#define VCU_SRCSEL_MASK 0x01
+#define VCU_SRCSEL_SHIFT 0
+#define VCU_SRCSEL_PLL 1
+
+#define VCU_PLL_STATUS 0x60
+#define VCU_PLL_STATUS_LOCK_STATUS_MASK 0x01
+
+#define MHZ 1000000
+#define FVCO_MIN (1500U * MHZ)
+#define FVCO_MAX (3000U * MHZ)
+#define DIVISOR_MIN 0
+#define DIVISOR_MAX 63
+#define FRAC 100
+#define LIMIT (10 * MHZ)
+
+/**
+ * struct xvcu_device - Xilinx VCU init device structure
+ * @dev: Platform device
+ * @pll_ref: pll ref clock source
+ * @aclk: axi clock source
+ * @logicore_reg_ba: logicore reg base address
+ * @vcu_slcr_ba: vcu_slcr Register base address
+ * @coreclk: core clock frequency
+ */
+struct xvcu_device {
+ struct device *dev;
+ struct clk *pll_ref;
+ struct clk *aclk;
+ void __iomem *logicore_reg_ba;
+ void __iomem *vcu_slcr_ba;
+ u32 coreclk;
+};
+
+/**
+ * struct xvcu_pll_cfg - Helper data
+ * @fbdiv: The integer portion of the feedback divider to the PLL
+ * @cp: PLL charge pump control
+ * @res: PLL loop filter resistor control
+ * @lfhf: PLL loop filter high frequency capacitor control
+ * @lock_dly: Lock circuit configuration settings for lock windowsize
+ * @lock_cnt: Lock circuit counter setting
+ */
+struct xvcu_pll_cfg {
+ u32 fbdiv;
+ u32 cp;
+ u32 res;
+ u32 lfhf;
+ u32 lock_dly;
+ u32 lock_cnt;
+};
+
+static const struct xvcu_pll_cfg xvcu_pll_cfg[] = {
+ { 25, 3, 10, 3, 63, 1000 },
+ { 26, 3, 10, 3, 63, 1000 },
+ { 27, 4, 6, 3, 63, 1000 },
+ { 28, 4, 6, 3, 63, 1000 },
+ { 29, 4, 6, 3, 63, 1000 },
+ { 30, 4, 6, 3, 63, 1000 },
+ { 31, 6, 1, 3, 63, 1000 },
+ { 32, 6, 1, 3, 63, 1000 },
+ { 33, 4, 10, 3, 63, 1000 },
+ { 34, 5, 6, 3, 63, 1000 },
+ { 35, 5, 6, 3, 63, 1000 },
+ { 36, 5, 6, 3, 63, 1000 },
+ { 37, 5, 6, 3, 63, 1000 },
+ { 38, 5, 6, 3, 63, 975 },
+ { 39, 3, 12, 3, 63, 950 },
+ { 40, 3, 12, 3, 63, 925 },
+ { 41, 3, 12, 3, 63, 900 },
+ { 42, 3, 12, 3, 63, 875 },
+ { 43, 3, 12, 3, 63, 850 },
+ { 44, 3, 12, 3, 63, 850 },
+ { 45, 3, 12, 3, 63, 825 },
+ { 46, 3, 12, 3, 63, 800 },
+ { 47, 3, 12, 3, 63, 775 },
+ { 48, 3, 12, 3, 63, 775 },
+ { 49, 3, 12, 3, 63, 750 },
+ { 50, 3, 12, 3, 63, 750 },
+ { 51, 3, 2, 3, 63, 725 },
+ { 52, 3, 2, 3, 63, 700 },
+ { 53, 3, 2, 3, 63, 700 },
+ { 54, 3, 2, 3, 63, 675 },
+ { 55, 3, 2, 3, 63, 675 },
+ { 56, 3, 2, 3, 63, 650 },
+ { 57, 3, 2, 3, 63, 650 },
+ { 58, 3, 2, 3, 63, 625 },
+ { 59, 3, 2, 3, 63, 625 },
+ { 60, 3, 2, 3, 63, 625 },
+ { 61, 3, 2, 3, 63, 600 },
+ { 62, 3, 2, 3, 63, 600 },
+ { 63, 3, 2, 3, 63, 600 },
+ { 64, 3, 2, 3, 63, 600 },
+ { 65, 3, 2, 3, 63, 600 },
+ { 66, 3, 2, 3, 63, 600 },
+ { 67, 3, 2, 3, 63, 600 },
+ { 68, 3, 2, 3, 63, 600 },
+ { 69, 3, 2, 3, 63, 600 },
+ { 70, 3, 2, 3, 63, 600 },
+ { 71, 3, 2, 3, 63, 600 },
+ { 72, 3, 2, 3, 63, 600 },
+ { 73, 3, 2, 3, 63, 600 },
+ { 74, 3, 2, 3, 63, 600 },
+ { 75, 3, 2, 3, 63, 600 },
+ { 76, 3, 2, 3, 63, 600 },
+ { 77, 3, 2, 3, 63, 600 },
+ { 78, 3, 2, 3, 63, 600 },
+ { 79, 3, 2, 3, 63, 600 },
+ { 80, 3, 2, 3, 63, 600 },
+ { 81, 3, 2, 3, 63, 600 },
+ { 82, 3, 2, 3, 63, 600 },
+ { 83, 4, 2, 3, 63, 600 },
+ { 84, 4, 2, 3, 63, 600 },
+ { 85, 4, 2, 3, 63, 600 },
+ { 86, 4, 2, 3, 63, 600 },
+ { 87, 4, 2, 3, 63, 600 },
+ { 88, 4, 2, 3, 63, 600 },
+ { 89, 4, 2, 3, 63, 600 },
+ { 90, 4, 2, 3, 63, 600 },
+ { 91, 4, 2, 3, 63, 600 },
+ { 92, 4, 2, 3, 63, 600 },
+ { 93, 4, 2, 3, 63, 600 },
+ { 94, 4, 2, 3, 63, 600 },
+ { 95, 4, 2, 3, 63, 600 },
+ { 96, 4, 2, 3, 63, 600 },
+ { 97, 4, 2, 3, 63, 600 },
+ { 98, 4, 2, 3, 63, 600 },
+ { 99, 4, 2, 3, 63, 600 },
+ { 100, 4, 2, 3, 63, 600 },
+ { 101, 4, 2, 3, 63, 600 },
+ { 102, 4, 2, 3, 63, 600 },
+ { 103, 5, 2, 3, 63, 600 },
+ { 104, 5, 2, 3, 63, 600 },
+ { 105, 5, 2, 3, 63, 600 },
+ { 106, 5, 2, 3, 63, 600 },
+ { 107, 3, 4, 3, 63, 600 },
+ { 108, 3, 4, 3, 63, 600 },
+ { 109, 3, 4, 3, 63, 600 },
+ { 110, 3, 4, 3, 63, 600 },
+ { 111, 3, 4, 3, 63, 600 },
+ { 112, 3, 4, 3, 63, 600 },
+ { 113, 3, 4, 3, 63, 600 },
+ { 114, 3, 4, 3, 63, 600 },
+ { 115, 3, 4, 3, 63, 600 },
+ { 116, 3, 4, 3, 63, 600 },
+ { 117, 3, 4, 3, 63, 600 },
+ { 118, 3, 4, 3, 63, 600 },
+ { 119, 3, 4, 3, 63, 600 },
+ { 120, 3, 4, 3, 63, 600 },
+ { 121, 3, 4, 3, 63, 600 },
+ { 122, 3, 4, 3, 63, 600 },
+ { 123, 3, 4, 3, 63, 600 },
+ { 124, 3, 4, 3, 63, 600 },
+ { 125, 3, 4, 3, 63, 600 },
+};
+
+/**
+ * xvcu_read - Read from the VCU register space
+ * @iomem: vcu reg space base address
+ * @offset: vcu reg offset from base
+ *
+ * Return: Returns 32bit value from VCU register specified
+ *
+ */
+static inline u32 xvcu_read(void __iomem *iomem, u32 offset)
+{
+ return ioread32(iomem + offset);
+}
+
+/**
+ * xvcu_write - Write to the VCU register space
+ * @iomem: vcu reg space base address
+ * @offset: vcu reg offset from base
+ * @value: Value to write
+ */
+static inline void xvcu_write(void __iomem *iomem, u32 offset, u32 value)
+{
+ iowrite32(value, iomem + offset);
+}
+
+/**
+ * xvcu_write_field_reg - Write to the vcu reg field
+ * @iomem: vcu reg space base address
+ * @offset: vcu reg offset from base
+ * @field: vcu reg field to write to
+ * @mask: vcu reg mask
+ * @shift: vcu reg number of bits to shift the bitfield
+ */
+static void xvcu_write_field_reg(void __iomem *iomem, int offset,
+ u32 field, u32 mask, int shift)
+{
+ u32 val = xvcu_read(iomem, offset);
+
+ val &= ~(mask << shift);
+ val |= (field & mask) << shift;
+
+ xvcu_write(iomem, offset, val);
+}
+
+/**
+ * xvcu_set_vcu_pll_info - Set the VCU PLL info
+ * @xvcu: Pointer to the xvcu_device structure
+ *
+ * Programming the VCU PLL based on the user configuration
+ * (ref clock freq, core clock freq, mcu clock freq).
+ * Core clock frequency has higher priority than mcu clock frequency
+ * Errors in following cases
+ * - When mcu or clock clock get from logicoreIP is 0
+ * - When VCU PLL DIV related bits value other than 1
+ * - When proper data not found for given data
+ * - When sis570_1 clocksource related operation failed
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int xvcu_set_vcu_pll_info(struct xvcu_device *xvcu)
+{
+ u32 refclk, coreclk, mcuclk, inte, deci;
+ u32 divisor_mcu, divisor_core, fvco;
+ u32 clkoutdiv, vcu_pll_ctrl, pll_clk;
+ u32 cfg_val, mod, ctrl;
+ int ret, i;
+ const struct xvcu_pll_cfg *found = NULL;
+
+ inte = xvcu_read(xvcu->logicore_reg_ba, VCU_PLL_CLK);
+ deci = xvcu_read(xvcu->logicore_reg_ba, VCU_PLL_CLK_DEC);
+ coreclk = xvcu_read(xvcu->logicore_reg_ba, VCU_CORE_CLK) * MHZ;
+ mcuclk = xvcu_read(xvcu->logicore_reg_ba, VCU_MCU_CLK) * MHZ;
+ if (!mcuclk || !coreclk) {
+ dev_err(xvcu->dev, "Invalid mcu and core clock data\n");
+ return -EINVAL;
+ }
+
+ refclk = (inte * MHZ) + (deci * (MHZ / FRAC));
+ dev_dbg(xvcu->dev, "Ref clock from logicoreIP is %uHz\n", refclk);
+ dev_dbg(xvcu->dev, "Core clock from logicoreIP is %uHz\n", coreclk);
+ dev_dbg(xvcu->dev, "Mcu clock from logicoreIP is %uHz\n", mcuclk);
+
+ clk_disable_unprepare(xvcu->pll_ref);
+ ret = clk_set_rate(xvcu->pll_ref, refclk);
+ if (ret)
+ dev_warn(xvcu->dev, "failed to set logicoreIP refclk rate\n");
+
+ ret = clk_prepare_enable(xvcu->pll_ref);
+ if (ret) {
+ dev_err(xvcu->dev, "failed to enable pll_ref clock source\n");
+ return ret;
+ }
+
+ refclk = clk_get_rate(xvcu->pll_ref);
+
+ /*
+ * The divide-by-2 should be always enabled (==1)
+ * to meet the timing in the design.
+ * Otherwise, it's an error
+ */
+ vcu_pll_ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_PLL_CTRL);
+ clkoutdiv = vcu_pll_ctrl >> VCU_PLL_CTRL_CLKOUTDIV_SHIFT;
+ clkoutdiv = clkoutdiv & VCU_PLL_CTRL_CLKOUTDIV_MASK;
+ if (clkoutdiv != 1) {
+ dev_err(xvcu->dev, "clkoutdiv value is invalid\n");
+ return -EINVAL;
+ }
+
+ for (i = ARRAY_SIZE(xvcu_pll_cfg) - 1; i >= 0; i--) {
+ const struct xvcu_pll_cfg *cfg = &xvcu_pll_cfg[i];
+
+ fvco = cfg->fbdiv * refclk;
+ if (fvco >= FVCO_MIN && fvco <= FVCO_MAX) {
+ pll_clk = fvco / VCU_PLL_DIV2;
+ if (fvco % VCU_PLL_DIV2 != 0)
+ pll_clk++;
+ mod = pll_clk % coreclk;
+ if (mod < LIMIT) {
+ divisor_core = pll_clk / coreclk;
+ } else if (coreclk - mod < LIMIT) {
+ divisor_core = pll_clk / coreclk;
+ divisor_core++;
+ } else {
+ continue;
+ }
+ if (divisor_core >= DIVISOR_MIN &&
+ divisor_core <= DIVISOR_MAX) {
+ found = cfg;
+ divisor_mcu = pll_clk / mcuclk;
+ mod = pll_clk % mcuclk;
+ if (mcuclk - mod < LIMIT)
+ divisor_mcu++;
+ break;
+ }
+ }
+ }
+
+ if (!found) {
+ dev_err(xvcu->dev, "Invalid clock combination.\n");
+ return -EINVAL;
+ }
+
+ xvcu->coreclk = pll_clk / divisor_core;
+ mcuclk = pll_clk / divisor_mcu;
+ dev_dbg(xvcu->dev, "Actual Ref clock freq is %uHz\n", refclk);
+ dev_dbg(xvcu->dev, "Actual Core clock freq is %uHz\n", xvcu->coreclk);
+ dev_dbg(xvcu->dev, "Actual Mcu clock freq is %uHz\n", mcuclk);
+
+ vcu_pll_ctrl &= ~(VCU_PLL_CTRL_FBDIV_MASK << VCU_PLL_CTRL_FBDIV_SHIFT);
+ vcu_pll_ctrl |= (found->fbdiv & VCU_PLL_CTRL_FBDIV_MASK) <<
+ VCU_PLL_CTRL_FBDIV_SHIFT;
+ vcu_pll_ctrl &= ~(VCU_PLL_CTRL_POR_IN_MASK <<
+ VCU_PLL_CTRL_POR_IN_SHIFT);
+ vcu_pll_ctrl |= (VCU_PLL_CTRL_DEFAULT & VCU_PLL_CTRL_POR_IN_MASK) <<
+ VCU_PLL_CTRL_POR_IN_SHIFT;
+ vcu_pll_ctrl &= ~(VCU_PLL_CTRL_PWR_POR_MASK <<
+ VCU_PLL_CTRL_PWR_POR_SHIFT);
+ vcu_pll_ctrl |= (VCU_PLL_CTRL_DEFAULT & VCU_PLL_CTRL_PWR_POR_MASK) <<
+ VCU_PLL_CTRL_PWR_POR_SHIFT;
+ xvcu_write(xvcu->vcu_slcr_ba, VCU_PLL_CTRL, vcu_pll_ctrl);
+
+ /* Set divisor for the core and mcu clock */
+ ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_ENC_CORE_CTRL);
+ ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
+ ctrl |= (divisor_core & VCU_PLL_DIVISOR_MASK) <<
+ VCU_PLL_DIVISOR_SHIFT;
+ ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
+ ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
+ xvcu_write(xvcu->vcu_slcr_ba, VCU_ENC_CORE_CTRL, ctrl);
+
+ ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_DEC_CORE_CTRL);
+ ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
+ ctrl |= (divisor_core & VCU_PLL_DIVISOR_MASK) <<
+ VCU_PLL_DIVISOR_SHIFT;
+ ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
+ ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
+ xvcu_write(xvcu->vcu_slcr_ba, VCU_DEC_CORE_CTRL, ctrl);
+
+ ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_ENC_MCU_CTRL);
+ ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
+ ctrl |= (divisor_mcu & VCU_PLL_DIVISOR_MASK) << VCU_PLL_DIVISOR_SHIFT;
+ ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
+ ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
+ xvcu_write(xvcu->vcu_slcr_ba, VCU_ENC_MCU_CTRL, ctrl);
+
+ ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_DEC_MCU_CTRL);
+ ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
+ ctrl |= (divisor_mcu & VCU_PLL_DIVISOR_MASK) << VCU_PLL_DIVISOR_SHIFT;
+ ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
+ ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
+ xvcu_write(xvcu->vcu_slcr_ba, VCU_DEC_MCU_CTRL, ctrl);
+
+ /* Set RES, CP, LFHF, LOCK_CNT and LOCK_DLY cfg values */
+ cfg_val = (found->res << VCU_PLL_CFG_RES_SHIFT) |
+ (found->cp << VCU_PLL_CFG_CP_SHIFT) |
+ (found->lfhf << VCU_PLL_CFG_LFHF_SHIFT) |
+ (found->lock_cnt << VCU_PLL_CFG_LOCK_CNT_SHIFT) |
+ (found->lock_dly << VCU_PLL_CFG_LOCK_DLY_SHIFT);
+ xvcu_write(xvcu->vcu_slcr_ba, VCU_PLL_CFG, cfg_val);
+
+ return 0;
+}
+
+/**
+ * xvcu_set_pll - PLL init sequence
+ * @xvcu: Pointer to the xvcu_device structure
+ *
+ * Call the api to set the PLL info and once that is done then
+ * init the PLL sequence to make the PLL stable.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int xvcu_set_pll(struct xvcu_device *xvcu)
+{
+ u32 lock_status;
+ unsigned long timeout;
+ int ret;
+
+ ret = xvcu_set_vcu_pll_info(xvcu);
+ if (ret) {
+ dev_err(xvcu->dev, "failed to set pll info\n");
+ return ret;
+ }
+
+ xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
+ 1, VCU_PLL_CTRL_BYPASS_MASK,
+ VCU_PLL_CTRL_BYPASS_SHIFT);
+ xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
+ 1, VCU_PLL_CTRL_RESET_MASK,
+ VCU_PLL_CTRL_RESET_SHIFT);
+ xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
+ 0, VCU_PLL_CTRL_RESET_MASK,
+ VCU_PLL_CTRL_RESET_SHIFT);
+ /*
+ * Defined the timeout for the max time to wait the
+ * PLL_STATUS to be locked.
+ */
+ timeout = jiffies + msecs_to_jiffies(2000);
+ do {
+ lock_status = xvcu_read(xvcu->vcu_slcr_ba, VCU_PLL_STATUS);
+ if (lock_status & VCU_PLL_STATUS_LOCK_STATUS_MASK) {
+ xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
+ 0, VCU_PLL_CTRL_BYPASS_MASK,
+ VCU_PLL_CTRL_BYPASS_SHIFT);
+ return 0;
+ }
+ } while (!time_after(jiffies, timeout));
+
+ /* PLL is not locked even after the timeout of the 2sec */
+ dev_err(xvcu->dev, "PLL is not locked\n");
+ return -ETIMEDOUT;
+}
+
+/**
+ * xvcu_probe - Probe existence of the logicoreIP
+ * and initialize PLL
+ *
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Returns 0 on success
+ * Negative error code otherwise
+ */
+static int xvcu_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct xvcu_device *xvcu;
+ int ret;
+
+ xvcu = devm_kzalloc(&pdev->dev, sizeof(*xvcu), GFP_KERNEL);
+ if (!xvcu)
+ return -ENOMEM;
+
+ xvcu->dev = &pdev->dev;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vcu_slcr");
+ if (!res) {
+ dev_err(&pdev->dev, "get vcu_slcr memory resource failed.\n");
+ return -ENODEV;
+ }
+
+ xvcu->vcu_slcr_ba = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xvcu->vcu_slcr_ba) {
+ dev_err(&pdev->dev, "vcu_slcr register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "logicore");
+ if (!res) {
+ dev_err(&pdev->dev, "get logicore memory resource failed.\n");
+ return -ENODEV;
+ }
+
+ xvcu->logicore_reg_ba = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xvcu->logicore_reg_ba) {
+ dev_err(&pdev->dev, "logicore register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ xvcu->aclk = devm_clk_get(&pdev->dev, "aclk");
+ if (IS_ERR(xvcu->aclk)) {
+ dev_err(&pdev->dev, "Could not get aclk clock\n");
+ return PTR_ERR(xvcu->aclk);
+ }
+
+ xvcu->pll_ref = devm_clk_get(&pdev->dev, "pll_ref");
+ if (IS_ERR(xvcu->pll_ref)) {
+ dev_err(&pdev->dev, "Could not get pll_ref clock\n");
+ return PTR_ERR(xvcu->pll_ref);
+ }
+
+ ret = clk_prepare_enable(xvcu->aclk);
+ if (ret) {
+ dev_err(&pdev->dev, "aclk clock enable failed\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(xvcu->pll_ref);
+ if (ret) {
+ dev_err(&pdev->dev, "pll_ref clock enable failed\n");
+ goto error_aclk;
+ }
+
+ /*
+ * Do the Gasket isolation and put the VCU out of reset
+ * Bit 0 : Gasket isolation
+ * Bit 1 : put VCU out of reset
+ */
+ xvcu_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, VCU_GASKET_VALUE);
+
+ /* Do the PLL Settings based on the ref clk,core and mcu clk freq */
+ ret = xvcu_set_pll(xvcu);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set the pll\n");
+ goto error_pll_ref;
+ }
+
+ dev_set_drvdata(&pdev->dev, xvcu);
+
+ dev_info(&pdev->dev, "%s: Probed successfully\n", __func__);
+
+ return 0;
+
+error_pll_ref:
+ clk_disable_unprepare(xvcu->pll_ref);
+error_aclk:
+ clk_disable_unprepare(xvcu->aclk);
+ return ret;
+}
+
+/**
+ * xvcu_remove - Insert gasket isolation
+ * and disable the clock
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Returns 0 on success
+ * Negative error code otherwise
+ */
+static int xvcu_remove(struct platform_device *pdev)
+{
+ struct xvcu_device *xvcu;
+
+ xvcu = platform_get_drvdata(pdev);
+ if (!xvcu)
+ return -ENODEV;
+
+ /* Add the the Gasket isolation and put the VCU in reset. */
+ xvcu_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, 0);
+
+ clk_disable_unprepare(xvcu->pll_ref);
+ clk_disable_unprepare(xvcu->aclk);
+
+ return 0;
+}
+
+static const struct of_device_id xvcu_of_id_table[] = {
+ { .compatible = "xlnx,vcu" },
+ { .compatible = "xlnx,vcu-logicoreip-1.0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xvcu_of_id_table);
+
+static struct platform_driver xvcu_driver = {
+ .driver = {
+ .name = "xilinx-vcu",
+ .of_match_table = xvcu_of_id_table,
+ },
+ .probe = xvcu_probe,
+ .remove = xvcu_remove,
+};
+
+module_platform_driver(xvcu_driver);
+
+MODULE_AUTHOR("Dhaval Shah <dshah@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx VCU init Driver");
+MODULE_LICENSE("GPL v2");