diff options
Diffstat (limited to 'drivers/remoteproc/qcom_q6v5_mss.c')
| -rw-r--r-- | drivers/remoteproc/qcom_q6v5_mss.c | 1676 |
1 files changed, 1428 insertions, 248 deletions
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index 01be7314e176..91940977ca89 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -1,45 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Qualcomm self-authenticating modem subsystem remoteproc driver * * Copyright (C) 2016 Linaro Ltd. * Copyright (C) 2014 Sony Mobile Communications AB * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/clk.h> #include <linux/delay.h> +#include <linux/devcoredump.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> -#include <linux/of_address.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/of_reserved_mem.h> +#include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/remoteproc.h> #include <linux/reset.h> #include <linux/soc/qcom/mdt_loader.h> #include <linux/iopoll.h> +#include <linux/slab.h> #include "remoteproc_internal.h" #include "qcom_common.h" +#include "qcom_pil_info.h" #include "qcom_q6v5.h" -#include <linux/qcom_scm.h> +#include <linux/firmware/qcom/qcom_scm.h> #define MPSS_CRASH_REASON_SMEM 421 +#define MBA_LOG_SIZE SZ_4K + +#define MPSS_PAS_ID 5 + /* RMB Status Register Values */ #define RMB_PBL_SUCCESS 0x1 @@ -67,20 +69,36 @@ #define QDSP6SS_GFMUX_CTL_REG 0x020 #define QDSP6SS_PWR_CTL_REG 0x030 #define QDSP6SS_MEM_PWR_CTL 0x0B0 +#define QDSP6V6SS_MEM_PWR_CTL 0x034 #define QDSP6SS_STRAP_ACC 0x110 +#define QDSP6V62SS_BHS_STATUS 0x0C4 /* AXI Halt Register Offsets */ #define AXI_HALTREQ_REG 0x0 #define AXI_HALTACK_REG 0x4 #define AXI_IDLE_REG 0x8 +#define AXI_GATING_VALID_OVERRIDE BIT(0) + +#define HALT_ACK_TIMEOUT_US 100000 -#define HALT_ACK_TIMEOUT_MS 100 +/* QACCEPT Register Offsets */ +#define QACCEPT_ACCEPT_REG 0x0 +#define QACCEPT_ACTIVE_REG 0x4 +#define QACCEPT_DENY_REG 0x8 +#define QACCEPT_REQ_REG 0xC + +#define QACCEPT_TIMEOUT_US 50 /* QDSP6SS_RESET */ #define Q6SS_STOP_CORE BIT(0) #define Q6SS_CORE_ARES BIT(1) #define Q6SS_BUS_ARES_ENABLE BIT(2) +/* QDSP6SS CBCR */ +#define Q6SS_CBCR_CLKEN BIT(0) +#define Q6SS_CBCR_CLKOFF BIT(31) +#define Q6SS_CBCR_TIMEOUT_US 200 + /* QDSP6SS_GFMUX_CTL */ #define Q6SS_CLK_ENABLE BIT(1) @@ -96,21 +114,30 @@ #define QDSS_BHS_ON BIT(21) #define QDSS_LDO_BYP BIT(22) +/* QDSP6v55 parameters */ +#define QDSP6V55_MEM_BITS GENMASK(16, 8) + /* QDSP6v56 parameters */ #define QDSP6v56_LDO_BYP BIT(25) #define QDSP6v56_BHS_ON BIT(24) #define QDSP6v56_CLAMP_WL BIT(21) #define QDSP6v56_CLAMP_QMC_MEM BIT(22) -#define HALT_CHECK_MAX_LOOPS 200 #define QDSP6SS_XO_CBCR 0x0038 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20 +#define QDSP6v55_BHS_EN_REST_ACK BIT(0) /* QDSP6v65 parameters */ +#define QDSP6SS_CORE_CBCR 0x20 #define QDSP6SS_SLEEP 0x3C #define QDSP6SS_BOOT_CORE_START 0x400 #define QDSP6SS_BOOT_CMD 0x404 -#define SLEEP_CHECK_MAX_LOOPS 200 #define BOOT_FSM_TIMEOUT 10000 +#define BHS_CHECK_MAX_LOOPS 200 + +/* External power block headswitch */ +#define EXTERNAL_BHS_ON BIT(0) +#define EXTERNAL_BHS_STATUS BIT(4) +#define EXTERNAL_BHS_TIMEOUT_US 50 struct reg_info { struct regulator *reg; @@ -127,13 +154,21 @@ struct qcom_mss_reg_res { struct rproc_hexagon_res { const char *hexagon_mba_image; struct qcom_mss_reg_res *proxy_supply; + struct qcom_mss_reg_res *fallback_proxy_supply; struct qcom_mss_reg_res *active_supply; char **proxy_clk_names; char **reset_clk_names; char **active_clk_names; + char **proxy_pd_names; int version; bool need_mem_protection; bool has_alt_reset; + bool has_mba_logs; + bool has_spare_reg; + bool has_qaccept_regs; + bool has_ext_bhs_reg; + bool has_ext_cntl_regs; + bool has_vq6; }; struct q6v5 { @@ -144,9 +179,23 @@ struct q6v5 { void __iomem *rmb_base; struct regmap *halt_map; + struct regmap *conn_map; + u32 halt_q6; u32 halt_modem; u32 halt_nc; + u32 halt_vq6; + u32 conn_box; + u32 ext_bhs; + + u32 qaccept_mdm; + u32 qaccept_cx; + u32 qaccept_axi; + + u32 axim1_clk_off; + u32 crypto_clk_off; + u32 force_clk_on; + u32 rscc_disable; struct reset_control *mss_restart; struct reset_control *pdc_reset; @@ -156,52 +205,72 @@ struct q6v5 { struct clk *active_clks[8]; struct clk *reset_clks[4]; struct clk *proxy_clks[4]; + struct device *proxy_pds[3]; int active_clk_count; int reset_clk_count; int proxy_clk_count; + int proxy_pd_count; struct reg_info active_regs[1]; - struct reg_info proxy_regs[3]; + struct reg_info proxy_regs[1]; + struct reg_info fallback_proxy_regs[2]; int active_reg_count; int proxy_reg_count; - - bool running; + int fallback_proxy_reg_count; bool dump_mba_loaded; - unsigned long dump_segment_mask; - unsigned long dump_complete_mask; + size_t current_dump_size; + size_t total_dump_size; phys_addr_t mba_phys; - void *mba_region; size_t mba_size; + size_t dp_size; + + phys_addr_t mdata_phys; + size_t mdata_size; phys_addr_t mpss_phys; phys_addr_t mpss_reloc; - void *mpss_region; size_t mpss_size; struct qcom_rproc_glink glink_subdev; struct qcom_rproc_subdev smd_subdev; + struct qcom_rproc_pdm pdm_subdev; struct qcom_rproc_ssr ssr_subdev; struct qcom_sysmon *sysmon; + struct platform_device *bam_dmux; bool need_mem_protection; bool has_alt_reset; - int mpss_perm; - int mba_perm; + bool has_mba_logs; + bool has_spare_reg; + bool has_qaccept_regs; + bool has_ext_bhs_reg; + bool has_ext_cntl_regs; + bool has_vq6; + u64 mpss_perm; + u64 mba_perm; + const char *hexagon_mdt_image; int version; }; enum { + MSS_MSM8226, + MSS_MSM8909, MSS_MSM8916, + MSS_MSM8926, + MSS_MSM8953, MSS_MSM8974, MSS_MSM8996, + MSS_MSM8998, + MSS_SC7180, + MSS_SC7280, + MSS_SDM660, MSS_SDM845, }; static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, const struct qcom_mss_reg_res *reg_res) { - int rc; int i; if (!reg_res) @@ -209,13 +278,10 @@ static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, for (i = 0; reg_res[i].supply; i++) { regs[i].reg = devm_regulator_get(dev, reg_res[i].supply); - if (IS_ERR(regs[i].reg)) { - rc = PTR_ERR(regs[i].reg); - if (rc != -EPROBE_DEFER) - dev_err(dev, "Failed to get %s\n regulator", - reg_res[i].supply); - return rc; - } + if (IS_ERR(regs[i].reg)) + return dev_err_probe(dev, PTR_ERR(regs[i].reg), + "Failed to get %s\n regulator", + reg_res[i].supply); regs[i].uV = reg_res[i].uV; regs[i].uA = reg_res[i].uA; @@ -321,31 +387,147 @@ static void q6v5_clk_disable(struct device *dev, clk_disable_unprepare(clks[i]); } -static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm, - bool remote_owner, phys_addr_t addr, +static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds, + size_t pd_count) +{ + int ret; + int i; + + for (i = 0; i < pd_count; i++) { + dev_pm_genpd_set_performance_state(pds[i], INT_MAX); + ret = pm_runtime_get_sync(pds[i]); + if (ret < 0) { + pm_runtime_put_noidle(pds[i]); + dev_pm_genpd_set_performance_state(pds[i], 0); + goto unroll_pd_votes; + } + } + + return 0; + +unroll_pd_votes: + for (i--; i >= 0; i--) { + dev_pm_genpd_set_performance_state(pds[i], 0); + pm_runtime_put(pds[i]); + } + + return ret; +} + +static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds, + size_t pd_count) +{ + int i; + + for (i = 0; i < pd_count; i++) { + dev_pm_genpd_set_performance_state(pds[i], 0); + pm_runtime_put(pds[i]); + } +} + +static int q6v5_external_bhs_enable(struct q6v5 *qproc) +{ + u32 val; + int ret = 0; + + /* + * Enable external power block headswitch and wait for it to + * stabilize + */ + regmap_set_bits(qproc->conn_map, qproc->ext_bhs, EXTERNAL_BHS_ON); + + ret = regmap_read_poll_timeout(qproc->conn_map, qproc->ext_bhs, + val, val & EXTERNAL_BHS_STATUS, + 1, EXTERNAL_BHS_TIMEOUT_US); + + if (ret) { + dev_err(qproc->dev, "External BHS timed out\n"); + ret = -ETIMEDOUT; + } + + return ret; +} + +static void q6v5_external_bhs_disable(struct q6v5 *qproc) +{ + regmap_clear_bits(qproc->conn_map, qproc->ext_bhs, EXTERNAL_BHS_ON); +} + +static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, u64 *current_perm, + bool local, bool remote, phys_addr_t addr, size_t size) { - struct qcom_scm_vmperm next; + struct qcom_scm_vmperm next[2]; + int perms = 0; if (!qproc->need_mem_protection) return 0; - if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA)) - return 0; - if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS)) + + if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) && + remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA))) return 0; - next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS; - next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX; + if (local) { + next[perms].vmid = QCOM_SCM_VMID_HLOS; + next[perms].perm = QCOM_SCM_PERM_RWX; + perms++; + } + + if (remote) { + next[perms].vmid = QCOM_SCM_VMID_MSS_MSA; + next[perms].perm = QCOM_SCM_PERM_RW; + perms++; + } return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K), - current_perm, &next, 1); + current_perm, next, perms); } +static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region) +{ + const struct firmware *dp_fw; + + if (request_firmware_direct(&dp_fw, "msadp", qproc->dev)) + return; + + if (SZ_1M + dp_fw->size <= qproc->mba_size) { + memcpy(mba_region + SZ_1M, dp_fw->data, dp_fw->size); + qproc->dp_size = dp_fw->size; + } + + release_firmware(dp_fw); +} + +#define MSM8974_B00_OFFSET 0x1000 + static int q6v5_load(struct rproc *rproc, const struct firmware *fw) { struct q6v5 *qproc = rproc->priv; + void *mba_region; - memcpy(qproc->mba_region, fw->data, fw->size); + /* MBA is restricted to a maximum size of 1M */ + if (fw->size > qproc->mba_size || fw->size > SZ_1M) { + dev_err(qproc->dev, "MBA firmware load failed\n"); + return -EINVAL; + } + + mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC); + if (!mba_region) { + dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", + &qproc->mba_phys, qproc->mba_size); + return -EBUSY; + } + + if ((qproc->version == MSS_MSM8974 || + qproc->version == MSS_MSM8226 || + qproc->version == MSS_MSM8926) && + fw->size > MSM8974_B00_OFFSET && + !memcmp(fw->data, ELFMAG, SELFMAG)) + memcpy(mba_region, fw->data + MSM8974_B00_OFFSET, fw->size - MSM8974_B00_OFFSET); + else + memcpy(mba_region, fw->data, fw->size); + q6v5_debug_policy_load(qproc, mba_region); + memunmap(mba_region); return 0; } @@ -358,6 +540,30 @@ static int q6v5_reset_assert(struct q6v5 *qproc) reset_control_assert(qproc->pdc_reset); ret = reset_control_reset(qproc->mss_restart); reset_control_deassert(qproc->pdc_reset); + } else if (qproc->has_spare_reg) { + /* + * When the AXI pipeline is being reset with the Q6 modem partly + * operational there is possibility of AXI valid signal to + * glitch, leading to spurious transactions and Q6 hangs. A work + * around is employed by asserting the AXI_GATING_VALID_OVERRIDE + * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE + * is withdrawn post MSS assert followed by a MSS deassert, + * while holding the PDC reset. + */ + reset_control_assert(qproc->pdc_reset); + regmap_update_bits(qproc->conn_map, qproc->conn_box, + AXI_GATING_VALID_OVERRIDE, 1); + reset_control_assert(qproc->mss_restart); + reset_control_deassert(qproc->pdc_reset); + regmap_update_bits(qproc->conn_map, qproc->conn_box, + AXI_GATING_VALID_OVERRIDE, 0); + ret = reset_control_deassert(qproc->mss_restart); + } else if (qproc->has_ext_cntl_regs) { + regmap_write(qproc->conn_map, qproc->rscc_disable, 0); + reset_control_assert(qproc->pdc_reset); + reset_control_assert(qproc->mss_restart); + reset_control_deassert(qproc->pdc_reset); + ret = reset_control_deassert(qproc->mss_restart); } else { ret = reset_control_assert(qproc->mss_restart); } @@ -375,6 +581,8 @@ static int q6v5_reset_deassert(struct q6v5 *qproc) ret = reset_control_reset(qproc->mss_restart); writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET); reset_control_deassert(qproc->pdc_reset); + } else if (qproc->has_spare_reg || qproc->has_ext_cntl_regs) { + ret = reset_control_reset(qproc->mss_restart); } else { ret = reset_control_deassert(qproc->mss_restart); } @@ -428,6 +636,31 @@ static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms) return val; } +static void q6v5_dump_mba_logs(struct q6v5 *qproc) +{ + struct rproc *rproc = qproc->rproc; + void *data; + void *mba_region; + + if (!qproc->has_mba_logs) + return; + + if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys, + qproc->mba_size)) + return; + + mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC); + if (!mba_region) + return; + + data = vmalloc(MBA_LOG_SIZE); + if (data) { + memcpy(data, mba_region, MBA_LOG_SIZE); + dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL); + } + memunmap(mba_region); +} + static int q6v5proc_reset(struct q6v5 *qproc) { u32 val; @@ -436,12 +669,12 @@ static int q6v5proc_reset(struct q6v5 *qproc) if (qproc->version == MSS_SDM845) { val = readl(qproc->reg_base + QDSP6SS_SLEEP); - val |= 0x1; + val |= Q6SS_CBCR_CLKEN; writel(val, qproc->reg_base + QDSP6SS_SLEEP); ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, - val, !(val & BIT(31)), 1, - SLEEP_CHECK_MAX_LOOPS); + val, !(val & Q6SS_CBCR_CLKOFF), 1, + Q6SS_CBCR_TIMEOUT_US); if (ret) { dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); return -ETIMEDOUT; @@ -462,10 +695,67 @@ static int q6v5proc_reset(struct q6v5 *qproc) } goto pbl_wait; - } else if (qproc->version == MSS_MSM8996) { - /* Override the ACC value if required */ - writel(QDSP6SS_ACC_OVERRIDE_VAL, - qproc->reg_base + QDSP6SS_STRAP_ACC); + } else if (qproc->version == MSS_SC7180 || qproc->version == MSS_SC7280) { + val = readl(qproc->reg_base + QDSP6SS_SLEEP); + val |= Q6SS_CBCR_CLKEN; + writel(val, qproc->reg_base + QDSP6SS_SLEEP); + + ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, + val, !(val & Q6SS_CBCR_CLKOFF), 1, + Q6SS_CBCR_TIMEOUT_US); + if (ret) { + dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); + return -ETIMEDOUT; + } + + /* Turn on the XO clock needed for PLL setup */ + val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); + val |= Q6SS_CBCR_CLKEN; + writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); + + ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, + val, !(val & Q6SS_CBCR_CLKOFF), 1, + Q6SS_CBCR_TIMEOUT_US); + if (ret) { + dev_err(qproc->dev, "QDSP6SS XO clock timed out\n"); + return -ETIMEDOUT; + } + + /* Configure Q6 core CBCR to auto-enable after reset sequence */ + val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR); + val |= Q6SS_CBCR_CLKEN; + writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR); + + /* De-assert the Q6 stop core signal */ + writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); + + /* Wait for 10 us for any staggering logic to settle */ + usleep_range(10, 20); + + /* Trigger the boot FSM to start the Q6 out-of-reset sequence */ + writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); + + /* Poll the MSS_STATUS for FSM completion */ + ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, + val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); + if (ret) { + dev_err(qproc->dev, "Boot FSM failed to complete.\n"); + /* Reset the modem so that boot FSM is in reset state */ + q6v5_reset_deassert(qproc); + return ret; + } + goto pbl_wait; + } else if (qproc->version == MSS_MSM8909 || + qproc->version == MSS_MSM8953 || + qproc->version == MSS_MSM8996 || + qproc->version == MSS_MSM8998 || + qproc->version == MSS_SDM660) { + + if (qproc->version != MSS_MSM8909 && + qproc->version != MSS_MSM8953) + /* Override the ACC value if required */ + writel(QDSP6SS_ACC_OVERRIDE_VAL, + qproc->reg_base + QDSP6SS_STRAP_ACC); /* Assert resets, stop core */ val = readl(qproc->reg_base + QDSP6SS_RESET_REG); @@ -474,13 +764,13 @@ static int q6v5proc_reset(struct q6v5 *qproc) /* BHS require xo cbcr to be enabled */ val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); - val |= 0x1; + val |= Q6SS_CBCR_CLKEN; writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); /* Read CLKOFF bit to go low indicating CLK is enabled */ ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, - val, !(val & BIT(31)), 1, - HALT_CHECK_MAX_LOOPS); + val, !(val & Q6SS_CBCR_CLKOFF), 1, + Q6SS_CBCR_TIMEOUT_US); if (ret) { dev_err(qproc->dev, "xo cbcr enabling timed out (rc:%d)\n", ret); @@ -493,33 +783,68 @@ static int q6v5proc_reset(struct q6v5 *qproc) val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); udelay(1); + if (qproc->version == MSS_SDM660) { + ret = readl_relaxed_poll_timeout(qproc->reg_base + QDSP6V62SS_BHS_STATUS, + i, (i & QDSP6v55_BHS_EN_REST_ACK), + 1, BHS_CHECK_MAX_LOOPS); + if (ret == -ETIMEDOUT) { + dev_err(qproc->dev, "BHS_EN_REST_ACK not set!\n"); + return -ETIMEDOUT; + } + } + /* Put LDO in bypass mode */ val |= QDSP6v56_LDO_BYP; writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); - /* Deassert QDSP6 compiler memory clamp */ - val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); - val &= ~QDSP6v56_CLAMP_QMC_MEM; - writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); - - /* Deassert memory peripheral sleep and L2 memory standby */ - val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N; - writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); - - /* Turn on L1, L2, ETB and JU memories 1 at a time */ - val = readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL); - for (i = 19; i >= 0; i--) { - val |= BIT(i); - writel(val, qproc->reg_base + - QDSP6SS_MEM_PWR_CTL); - /* - * Read back value to ensure the write is done then - * wait for 1us for both memory peripheral and data - * array to turn on. - */ - val |= readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL); - udelay(1); + if (qproc->version != MSS_MSM8909) { + int mem_pwr_ctl; + + /* Deassert QDSP6 compiler memory clamp */ + val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); + val &= ~QDSP6v56_CLAMP_QMC_MEM; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + + /* Deassert memory peripheral sleep and L2 memory standby */ + val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + + /* Turn on L1, L2, ETB and JU memories 1 at a time */ + if (qproc->version == MSS_MSM8953 || + qproc->version == MSS_MSM8996) { + mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL; + i = 19; + } else { + /* MSS_MSM8998, MSS_SDM660 */ + mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL; + i = 28; + } + val = readl(qproc->reg_base + mem_pwr_ctl); + for (; i >= 0; i--) { + val |= BIT(i); + writel(val, qproc->reg_base + mem_pwr_ctl); + /* + * Read back value to ensure the write is done then + * wait for 1us for both memory peripheral and data + * array to turn on. + */ + val |= readl(qproc->reg_base + mem_pwr_ctl); + udelay(1); + } + } else { + /* Turn on memories */ + val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); + val |= Q6SS_SLP_RET_N | Q6SS_L2DATA_STBY_N | + Q6SS_ETB_SLP_NRET_N | QDSP6V55_MEM_BITS; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + + /* Turn on L2 banks 1 at a time */ + for (i = 0; i <= 7; i++) { + val |= BIT(i); + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + } } + /* Remove word line clamp */ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); val &= ~QDSP6v56_CLAMP_WL; @@ -585,11 +910,93 @@ pbl_wait: return ret; } +static int q6v5proc_enable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset) +{ + unsigned int val; + int ret; + + if (!qproc->has_qaccept_regs) + return 0; + + if (qproc->has_ext_cntl_regs) { + regmap_write(qproc->conn_map, qproc->rscc_disable, 0); + regmap_write(qproc->conn_map, qproc->force_clk_on, 1); + + ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val, + !val, 1, Q6SS_CBCR_TIMEOUT_US); + if (ret) { + dev_err(qproc->dev, "failed to enable axim1 clock\n"); + return -ETIMEDOUT; + } + } + + regmap_write(map, offset + QACCEPT_REQ_REG, 1); + + /* Wait for accept */ + ret = regmap_read_poll_timeout(map, offset + QACCEPT_ACCEPT_REG, val, val, 5, + QACCEPT_TIMEOUT_US); + if (ret) { + dev_err(qproc->dev, "qchannel enable failed\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void q6v5proc_disable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset) +{ + int ret; + unsigned int val, retry; + unsigned int nretry = 10; + bool takedown_complete = false; + + if (!qproc->has_qaccept_regs) + return; + + while (!takedown_complete && nretry) { + nretry--; + + /* Wait for active transactions to complete */ + regmap_read_poll_timeout(map, offset + QACCEPT_ACTIVE_REG, val, !val, 5, + QACCEPT_TIMEOUT_US); + + /* Request Q-channel transaction takedown */ + regmap_write(map, offset + QACCEPT_REQ_REG, 0); + + /* + * If the request is denied, reset the Q-channel takedown request, + * wait for active transactions to complete and retry takedown. + */ + retry = 10; + while (retry) { + usleep_range(5, 10); + retry--; + ret = regmap_read(map, offset + QACCEPT_DENY_REG, &val); + if (!ret && val) { + regmap_write(map, offset + QACCEPT_REQ_REG, 1); + break; + } + + ret = regmap_read(map, offset + QACCEPT_ACCEPT_REG, &val); + if (!ret && !val) { + takedown_complete = true; + break; + } + } + + if (!retry) + break; + } + + /* Rely on mss_restart to clear out pending transactions on takedown failure */ + if (!takedown_complete) + dev_err(qproc->dev, "qchannel takedown failed\n"); +} + static void q6v5proc_halt_axi_port(struct q6v5 *qproc, struct regmap *halt_map, u32 offset) { - unsigned long timeout; unsigned int val; int ret; @@ -602,14 +1009,8 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc, regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); /* Wait for halt */ - timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS); - for (;;) { - ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val); - if (ret || val || time_after(jiffies, timeout)) - break; - - msleep(1); - } + regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val, + val, 1000, HALT_ACK_TIMEOUT_US); ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); if (ret || !val) @@ -619,27 +1020,55 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc, regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); } -static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) +static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw, + const char *fw_name) { unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; dma_addr_t phys; - int mdata_perm; + void *metadata; + u64 mdata_perm; int xferop_ret; + size_t size; void *ptr; int ret; - ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs); - if (!ptr) { - dev_err(qproc->dev, "failed to allocate mdt buffer\n"); - return -ENOMEM; + metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev); + if (IS_ERR(metadata)) + return PTR_ERR(metadata); + + if (qproc->mdata_phys) { + if (size > qproc->mdata_size) { + ret = -EINVAL; + dev_err(qproc->dev, "metadata size outside memory range\n"); + goto free_metadata; + } + + phys = qproc->mdata_phys; + ptr = memremap(qproc->mdata_phys, size, MEMREMAP_WC); + if (!ptr) { + ret = -EBUSY; + dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", + &qproc->mdata_phys, size); + goto free_metadata; + } + } else { + ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs); + if (!ptr) { + ret = -ENOMEM; + dev_err(qproc->dev, "failed to allocate mdt buffer\n"); + goto free_metadata; + } } - memcpy(ptr, fw->data, fw->size); + memcpy(ptr, metadata, size); + + if (qproc->mdata_phys) + memunmap(ptr); /* Hypervisor mapping to access metadata by modem */ mdata_perm = BIT(QCOM_SCM_VMID_HLOS); - ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, - true, phys, fw->size); + ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true, + phys, size); if (ret) { dev_err(qproc->dev, "assigning Q6 access to metadata failed: %d\n", ret); @@ -657,14 +1086,17 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); /* Metadata authentication done, remove modem access */ - xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, - false, phys, fw->size); + xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false, + phys, size); if (xferop_ret) dev_warn(qproc->dev, "mdt buffer not reclaimed system may become unstable\n"); free_dma_attrs: - dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs); + if (!qproc->mdata_phys) + dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs); +free_metadata: + kfree(metadata); return ret < 0 ? ret : 0; } @@ -687,14 +1119,30 @@ static int q6v5_mba_load(struct q6v5 *qproc) { int ret; int xfermemop_ret; + bool mba_load_err = false; + + ret = qcom_q6v5_prepare(&qproc->q6v5); + if (ret) + return ret; + + ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); + if (ret < 0) { + dev_err(qproc->dev, "failed to enable proxy power domains\n"); + goto disable_irqs; + } - qcom_q6v5_prepare(&qproc->q6v5); + ret = q6v5_regulator_enable(qproc, qproc->fallback_proxy_regs, + qproc->fallback_proxy_reg_count); + if (ret) { + dev_err(qproc->dev, "failed to enable fallback proxy supplies\n"); + goto disable_proxy_pds; + } ret = q6v5_regulator_enable(qproc, qproc->proxy_regs, qproc->proxy_reg_count); if (ret) { dev_err(qproc->dev, "failed to enable proxy supplies\n"); - goto disable_irqs; + goto disable_fallback_proxy_reg; } ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks, @@ -711,11 +1159,17 @@ static int q6v5_mba_load(struct q6v5 *qproc) goto disable_proxy_clk; } + if (qproc->has_ext_bhs_reg) { + ret = q6v5_external_bhs_enable(qproc); + if (ret < 0) + goto disable_vdd; + } + ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks, qproc->reset_clk_count); if (ret) { dev_err(qproc->dev, "failed to enable reset clocks\n"); - goto disable_vdd; + goto disable_ext_bhs; } ret = q6v5_reset_deassert(qproc); @@ -731,8 +1185,25 @@ static int q6v5_mba_load(struct q6v5 *qproc) goto assert_reset; } + ret = q6v5proc_enable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi); + if (ret) { + dev_err(qproc->dev, "failed to enable axi bridge\n"); + goto disable_active_clks; + } + + /* + * Some versions of the MBA firmware will upon boot wipe the MPSS region as well, so provide + * the Q6 access to this region. + */ + ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true, + qproc->mpss_phys, qproc->mpss_size); + if (ret) { + dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret); + goto disable_active_clks; + } + /* Assign MBA image access in DDR to q6 */ - ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, + ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true, qproc->mba_phys, qproc->mba_size); if (ret) { dev_err(qproc->dev, @@ -740,7 +1211,14 @@ static int q6v5_mba_load(struct q6v5 *qproc) goto disable_active_clks; } + if (qproc->has_mba_logs) + qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE); + writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); + if (qproc->dp_size) { + writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG); + writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); + } ret = q6v5proc_reset(qproc); if (ret) @@ -762,16 +1240,23 @@ static int q6v5_mba_load(struct q6v5 *qproc) halt_axi_ports: q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); + if (qproc->has_vq6) + q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6); q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); - + q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm); + q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx); + q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi); + mba_load_err = true; reclaim_mba: - xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, - qproc->mba_phys, + xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, + false, qproc->mba_phys, qproc->mba_size); if (xfermemop_ret) { dev_err(qproc->dev, "Failed to reclaim mba buffer, system may become unstable\n"); + } else if (mba_load_err) { + q6v5_dump_mba_logs(qproc); } disable_active_clks: @@ -782,6 +1267,9 @@ assert_reset: disable_reset_clks: q6v5_clk_disable(qproc->dev, qproc->reset_clks, qproc->reset_clk_count); +disable_ext_bhs: + if (qproc->has_ext_bhs_reg) + q6v5_external_bhs_disable(qproc); disable_vdd: q6v5_regulator_disable(qproc, qproc->active_regs, qproc->active_reg_count); @@ -791,6 +1279,11 @@ disable_proxy_clk: disable_proxy_reg: q6v5_regulator_disable(qproc, qproc->proxy_regs, qproc->proxy_reg_count); +disable_fallback_proxy_reg: + q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs, + qproc->fallback_proxy_reg_count); +disable_proxy_pds: + q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); disable_irqs: qcom_q6v5_unprepare(&qproc->q6v5); @@ -803,8 +1296,11 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc) u32 val; qproc->dump_mba_loaded = false; + qproc->dp_size = 0; q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); + if (qproc->has_vq6) + q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6); q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); if (qproc->version == MSS_MSM8996) { @@ -817,10 +1313,23 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc) writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); } - ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, - false, qproc->mpss_phys, - qproc->mpss_size); - WARN_ON(ret); + if (qproc->has_ext_cntl_regs) { + regmap_write(qproc->conn_map, qproc->rscc_disable, 1); + + ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val, + !val, 1, Q6SS_CBCR_TIMEOUT_US); + if (ret) + dev_err(qproc->dev, "failed to enable axim1 clock\n"); + + ret = regmap_read_poll_timeout(qproc->halt_map, qproc->crypto_clk_off, val, + !val, 1, Q6SS_CBCR_TIMEOUT_US); + if (ret) + dev_err(qproc->dev, "failed to enable crypto clock\n"); + } + + q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm); + q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx); + q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi); q6v5_reset_assert(qproc); @@ -828,26 +1337,49 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc) qproc->reset_clk_count); q6v5_clk_disable(qproc->dev, qproc->active_clks, qproc->active_clk_count); + if (qproc->has_ext_bhs_reg) + q6v5_external_bhs_disable(qproc); q6v5_regulator_disable(qproc, qproc->active_regs, qproc->active_reg_count); /* In case of failure or coredump scenario where reclaiming MBA memory * could not happen reclaim it here. */ - ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, + ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys, qproc->mba_size); WARN_ON(ret); ret = qcom_q6v5_unprepare(&qproc->q6v5); if (ret) { + q6v5_pds_disable(qproc, qproc->proxy_pds, + qproc->proxy_pd_count); q6v5_clk_disable(qproc->dev, qproc->proxy_clks, qproc->proxy_clk_count); + q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs, + qproc->fallback_proxy_reg_count); q6v5_regulator_disable(qproc, qproc->proxy_regs, qproc->proxy_reg_count); } } +static int q6v5_reload_mba(struct rproc *rproc) +{ + struct q6v5 *qproc = rproc->priv; + const struct firmware *fw; + int ret; + + ret = request_firmware(&fw, rproc->firmware, qproc->dev); + if (ret < 0) + return ret; + + q6v5_load(rproc, fw); + ret = q6v5_mba_load(qproc); + release_firmware(fw); + + return ret; +} + static int q6v5_mpss_load(struct q6v5 *qproc) { const struct elf32_phdr *phdrs; @@ -859,24 +1391,34 @@ static int q6v5_mpss_load(struct q6v5 *qproc) phys_addr_t boot_addr; phys_addr_t min_addr = PHYS_ADDR_MAX; phys_addr_t max_addr = 0; + u32 code_length; bool relocate = false; - char seg_name[10]; + char *fw_name; + size_t fw_name_len; ssize_t offset; size_t size = 0; void *ptr; int ret; int i; - ret = request_firmware(&fw, "modem.mdt", qproc->dev); + fw_name_len = strlen(qproc->hexagon_mdt_image); + if (fw_name_len <= 4) + return -EINVAL; + + fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL); + if (!fw_name) + return -ENOMEM; + + ret = request_firmware(&fw, fw_name, qproc->dev); if (ret < 0) { - dev_err(qproc->dev, "unable to load modem.mdt\n"); - return ret; + dev_err(qproc->dev, "unable to load %s\n", fw_name); + goto out; } /* Initialize the RMB validator */ writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); - ret = q6v5_mpss_init_image(qproc, fw); + ret = q6v5_mpss_init_image(qproc, fw, qproc->hexagon_mdt_image); if (ret) goto release_firmware; @@ -899,6 +1441,32 @@ static int q6v5_mpss_load(struct q6v5 *qproc) max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); } + if (qproc->version == MSS_MSM8953) { + ret = qcom_scm_pas_mem_setup(MPSS_PAS_ID, qproc->mpss_phys, qproc->mpss_size); + if (ret) { + dev_err(qproc->dev, + "setting up mpss memory failed: %d\n", ret); + goto release_firmware; + } + } + + /* + * In case of a modem subsystem restart on secure devices, the modem + * memory can be reclaimed only after MBA is loaded. + */ + q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false, + qproc->mpss_phys, qproc->mpss_size); + + /* Share ownership between Linux and MSS, during segment loading */ + ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true, + qproc->mpss_phys, qproc->mpss_size); + if (ret) { + dev_err(qproc->dev, + "assigning Q6 access to mpss memory failed: %d\n", ret); + ret = -EAGAIN; + goto release_firmware; + } + mpss_reloc = relocate ? min_addr : qproc->mpss_phys; qproc->mpss_reloc = mpss_reloc; /* Load firmware segments */ @@ -915,17 +1483,54 @@ static int q6v5_mpss_load(struct q6v5 *qproc) goto release_firmware; } - ptr = qproc->mpss_region + offset; + if (phdr->p_filesz > phdr->p_memsz) { + dev_err(qproc->dev, + "refusing to load segment %d with p_filesz > p_memsz\n", + i); + ret = -EINVAL; + goto release_firmware; + } - if (phdr->p_filesz) { - snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i); - ret = request_firmware(&seg_fw, seg_name, qproc->dev); + ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC); + if (!ptr) { + dev_err(qproc->dev, + "unable to map memory region: %pa+%zx-%x\n", + &qproc->mpss_phys, offset, phdr->p_memsz); + goto release_firmware; + } + + if (phdr->p_filesz && phdr->p_offset < fw->size) { + /* Firmware is large enough to be non-split */ + if (phdr->p_offset + phdr->p_filesz > fw->size) { + dev_err(qproc->dev, + "failed to load segment %d from truncated file %s\n", + i, fw_name); + ret = -EINVAL; + memunmap(ptr); + goto release_firmware; + } + + memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz); + } else if (phdr->p_filesz) { + /* Replace "xxx.xxx" with "xxx.bxx" */ + sprintf(fw_name + fw_name_len - 3, "b%02d", i); + ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev, + ptr, phdr->p_filesz); if (ret) { - dev_err(qproc->dev, "failed to load %s\n", seg_name); + dev_err(qproc->dev, "failed to load %s\n", fw_name); + memunmap(ptr); goto release_firmware; } - memcpy(ptr, seg_fw->data, seg_fw->size); + if (seg_fw->size != phdr->p_filesz) { + dev_err(qproc->dev, + "failed to load segment %d from truncated file %s\n", + i, fw_name); + ret = -EINVAL; + release_firmware(seg_fw); + memunmap(ptr); + goto release_firmware; + } release_firmware(seg_fw); } @@ -934,11 +1539,27 @@ static int q6v5_mpss_load(struct q6v5 *qproc) memset(ptr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz); } + memunmap(ptr); size += phdr->p_memsz; + + code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); + if (!code_length) { + boot_addr = relocate ? qproc->mpss_phys : min_addr; + writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG); + writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); + } + writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); + + ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); + if (ret < 0) { + dev_err(qproc->dev, "MPSS authentication failed: %d\n", + ret); + goto release_firmware; + } } /* Transfer ownership of modem ddr region to q6 */ - ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, + ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true, qproc->mpss_phys, qproc->mpss_size); if (ret) { dev_err(qproc->dev, @@ -947,53 +1568,71 @@ static int q6v5_mpss_load(struct q6v5 *qproc) goto release_firmware; } - boot_addr = relocate ? qproc->mpss_phys : min_addr; - writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG); - writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); - writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); - ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000); if (ret == -ETIMEDOUT) dev_err(qproc->dev, "MPSS authentication timed out\n"); else if (ret < 0) dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret); + qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size); + release_firmware: release_firmware(fw); +out: + kfree(fw_name); return ret < 0 ? ret : 0; } static void qcom_q6v5_dump_segment(struct rproc *rproc, struct rproc_dump_segment *segment, - void *dest) + void *dest, size_t cp_offset, size_t size) { int ret = 0; struct q6v5 *qproc = rproc->priv; - unsigned long mask = BIT((unsigned long)segment->priv); - void *ptr = rproc_da_to_va(rproc, segment->da, segment->size); + int offset = segment->da - qproc->mpss_reloc; + void *ptr = NULL; /* Unlock mba before copying segments */ - if (!qproc->dump_mba_loaded) - ret = q6v5_mba_load(qproc); + if (!qproc->dump_mba_loaded) { + ret = q6v5_reload_mba(rproc); + if (!ret) { + /* Reset ownership back to Linux to copy segments */ + ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, + true, false, + qproc->mpss_phys, + qproc->mpss_size); + } + } - if (!ptr || ret) - memset(dest, 0xff, segment->size); - else - memcpy(dest, ptr, segment->size); + if (!ret) + ptr = memremap(qproc->mpss_phys + offset + cp_offset, size, MEMREMAP_WC); + + if (ptr) { + memcpy(dest, ptr, size); + memunmap(ptr); + } else { + memset(dest, 0xff, size); + } - qproc->dump_segment_mask |= mask; + qproc->current_dump_size += size; /* Reclaim mba after copying segments */ - if (qproc->dump_segment_mask == qproc->dump_complete_mask) { - if (qproc->dump_mba_loaded) + if (qproc->current_dump_size == qproc->total_dump_size) { + if (qproc->dump_mba_loaded) { + /* Try to reset ownership back to Q6 */ + q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, + false, true, + qproc->mpss_phys, + qproc->mpss_size); q6v5_mba_reclaim(qproc); + } } } static int q6v5_start(struct rproc *rproc) { - struct q6v5 *qproc = (struct q6v5 *)rproc->priv; + struct q6v5 *qproc = rproc->priv; int xfermemop_ret; int ret; @@ -1001,7 +1640,8 @@ static int q6v5_start(struct rproc *rproc) if (ret) return ret; - dev_info(qproc->dev, "MBA booted, loading mpss\n"); + dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n", + qproc->dp_size ? "" : "out"); ret = q6v5_mpss_load(qproc); if (ret) @@ -1013,37 +1653,31 @@ static int q6v5_start(struct rproc *rproc) goto reclaim_mpss; } - xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, - qproc->mba_phys, + xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, + false, qproc->mba_phys, qproc->mba_size); if (xfermemop_ret) dev_err(qproc->dev, "Failed to reclaim mba buffer system may become unstable\n"); /* Reset Dump Segment Mask */ - qproc->dump_segment_mask = 0; - qproc->running = true; + qproc->current_dump_size = 0; return 0; reclaim_mpss: - xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, - false, qproc->mpss_phys, - qproc->mpss_size); - WARN_ON(xfermemop_ret); q6v5_mba_reclaim(qproc); + q6v5_dump_mba_logs(qproc); return ret; } static int q6v5_stop(struct rproc *rproc) { - struct q6v5 *qproc = (struct q6v5 *)rproc->priv; + struct q6v5 *qproc = rproc->priv; int ret; - qproc->running = false; - - ret = qcom_q6v5_request_stop(&qproc->q6v5); + ret = qcom_q6v5_request_stop(&qproc->q6v5, qproc->sysmon); if (ret == -ETIMEDOUT) dev_err(qproc->dev, "timed out on wait\n"); @@ -1052,18 +1686,6 @@ static int q6v5_stop(struct rproc *rproc) return 0; } -static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len) -{ - struct q6v5 *qproc = rproc->priv; - int offset; - - offset = da - qproc->mpss_reloc; - if (offset < 0 || offset + len > qproc->mpss_size) - return NULL; - - return qproc->mpss_region + offset; -} - static int qcom_q6v5_register_dump_segments(struct rproc *rproc, const struct firmware *mba_fw) { @@ -1075,15 +1697,18 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc, unsigned long i; int ret; - ret = request_firmware(&fw, "modem.mdt", qproc->dev); + ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev); if (ret < 0) { - dev_err(qproc->dev, "unable to load modem.mdt\n"); + dev_err(qproc->dev, "unable to load %s\n", + qproc->hexagon_mdt_image); return ret; } + rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); + ehdr = (struct elf32_hdr *)fw->data; phdrs = (struct elf32_phdr *)(ehdr + 1); - qproc->dump_complete_mask = 0; + qproc->total_dump_size = 0; for (i = 0; i < ehdr->e_phnum; i++) { phdr = &phdrs[i]; @@ -1094,23 +1719,30 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc, ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr, phdr->p_memsz, qcom_q6v5_dump_segment, - (void *)i); + NULL); if (ret) break; - qproc->dump_complete_mask |= BIT(i); + qproc->total_dump_size += phdr->p_memsz; } release_firmware(fw); return ret; } +static unsigned long q6v5_panic(struct rproc *rproc) +{ + struct q6v5 *qproc = rproc->priv; + + return qcom_q6v5_panic(&qproc->q6v5); +} + static const struct rproc_ops q6v5_ops = { .start = q6v5_start, .stop = q6v5_stop, - .da_to_va = q6v5_da_to_va, .parse_fw = qcom_q6v5_register_dump_segments, .load = q6v5_load, + .panic = q6v5_panic, }; static void qcom_msa_handover(struct qcom_q6v5 *q6v5) @@ -1121,26 +1753,30 @@ static void qcom_msa_handover(struct qcom_q6v5 *q6v5) qproc->proxy_clk_count); q6v5_regulator_disable(qproc, qproc->proxy_regs, qproc->proxy_reg_count); + q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs, + qproc->fallback_proxy_reg_count); + q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); } static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) { struct of_phandle_args args; - struct resource *res; + int halt_cell_cnt = 3; int ret; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); - qproc->reg_base = devm_ioremap_resource(&pdev->dev, res); + qproc->reg_base = devm_platform_ioremap_resource_byname(pdev, "qdsp6"); if (IS_ERR(qproc->reg_base)) return PTR_ERR(qproc->reg_base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb"); - qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res); + qproc->rmb_base = devm_platform_ioremap_resource_byname(pdev, "rmb"); if (IS_ERR(qproc->rmb_base)) return PTR_ERR(qproc->rmb_base); + if (qproc->has_vq6) + halt_cell_cnt++; + ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, - "qcom,halt-regs", 3, 0, &args); + "qcom,halt-regs", halt_cell_cnt, 0, &args); if (ret < 0) { dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); return -EINVAL; @@ -1155,6 +1791,86 @@ static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) qproc->halt_modem = args.args[1]; qproc->halt_nc = args.args[2]; + if (qproc->has_vq6) + qproc->halt_vq6 = args.args[3]; + + if (qproc->has_qaccept_regs) { + ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, + "qcom,qaccept-regs", + 3, 0, &args); + if (ret < 0) { + dev_err(&pdev->dev, "failed to parse qaccept-regs\n"); + return -EINVAL; + } + + qproc->qaccept_mdm = args.args[0]; + qproc->qaccept_cx = args.args[1]; + qproc->qaccept_axi = args.args[2]; + } + + if (qproc->has_ext_bhs_reg) { + ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, + "qcom,ext-bhs-reg", + 1, 0, &args); + if (ret < 0) { + dev_err(&pdev->dev, "failed to parse ext-bhs-reg index 0\n"); + return -EINVAL; + } + + qproc->conn_map = syscon_node_to_regmap(args.np); + of_node_put(args.np); + if (IS_ERR(qproc->conn_map)) + return PTR_ERR(qproc->conn_map); + + qproc->ext_bhs = args.args[0]; + } + + if (qproc->has_ext_cntl_regs) { + ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, + "qcom,ext-regs", + 2, 0, &args); + if (ret < 0) { + dev_err(&pdev->dev, "failed to parse ext-regs index 0\n"); + return -EINVAL; + } + + qproc->conn_map = syscon_node_to_regmap(args.np); + of_node_put(args.np); + if (IS_ERR(qproc->conn_map)) + return PTR_ERR(qproc->conn_map); + + qproc->force_clk_on = args.args[0]; + qproc->rscc_disable = args.args[1]; + + ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, + "qcom,ext-regs", + 2, 1, &args); + if (ret < 0) { + dev_err(&pdev->dev, "failed to parse ext-regs index 1\n"); + return -EINVAL; + } + + qproc->axim1_clk_off = args.args[0]; + qproc->crypto_clk_off = args.args[1]; + } + + if (qproc->has_spare_reg) { + ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, + "qcom,spare-regs", + 1, 0, &args); + if (ret < 0) { + dev_err(&pdev->dev, "failed to parse spare-regs\n"); + return -EINVAL; + } + + qproc->conn_map = syscon_node_to_regmap(args.np); + of_node_put(args.np); + if (IS_ERR(qproc->conn_map)) + return PTR_ERR(qproc->conn_map); + + qproc->conn_box = args.args[0]; + } + return 0; } @@ -1168,17 +1884,66 @@ static int q6v5_init_clocks(struct device *dev, struct clk **clks, for (i = 0; clk_names[i]; i++) { clks[i] = devm_clk_get(dev, clk_names[i]); - if (IS_ERR(clks[i])) { - int rc = PTR_ERR(clks[i]); + if (IS_ERR(clks[i])) + return dev_err_probe(dev, PTR_ERR(clks[i]), + "Failed to get %s clock\n", + clk_names[i]); + } - if (rc != -EPROBE_DEFER) - dev_err(dev, "Failed to get %s clock\n", - clk_names[i]); - return rc; + return i; +} + +static int q6v5_pds_attach(struct device *dev, struct device **devs, + char **pd_names) +{ + size_t num_pds = 0; + int ret; + int i; + + if (!pd_names) + return 0; + + while (pd_names[num_pds]) + num_pds++; + + /* Handle single power domain */ + if (num_pds == 1 && dev->pm_domain) { + devs[0] = dev; + pm_runtime_enable(dev); + return 1; + } + + for (i = 0; i < num_pds; i++) { + devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); + if (IS_ERR_OR_NULL(devs[i])) { + ret = PTR_ERR(devs[i]) ? : -ENODATA; + goto unroll_attach; } } - return i; + return num_pds; + +unroll_attach: + for (i--; i >= 0; i--) + dev_pm_domain_detach(devs[i], false); + + return ret; +} + +static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds, + size_t pd_count) +{ + struct device *dev = qproc->dev; + int i; + + /* Handle single power domain */ + if (pd_count == 1 && dev->pm_domain) { + pm_runtime_disable(dev); + return; + } + + for (i = 0; i < pd_count; i++) + dev_pm_domain_detach(pds[i], false); } static int q6v5_init_reset(struct q6v5 *qproc) @@ -1190,7 +1955,7 @@ static int q6v5_init_reset(struct q6v5 *qproc) return PTR_ERR(qproc->mss_restart); } - if (qproc->has_alt_reset) { + if (qproc->has_alt_reset || qproc->has_spare_reg || qproc->has_ext_cntl_regs) { qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev, "pdc_reset"); if (IS_ERR(qproc->pdc_reset)) { @@ -1205,54 +1970,69 @@ static int q6v5_init_reset(struct q6v5 *qproc) static int q6v5_alloc_memory_region(struct q6v5 *qproc) { struct device_node *child; - struct device_node *node; - struct resource r; + struct resource res; int ret; + /* + * In the absence of mba/mpss sub-child, extract the mba and mpss + * reserved memory regions from device's memory-region property. + */ child = of_get_child_by_name(qproc->dev->of_node, "mba"); - node = of_parse_phandle(child, "memory-region", 0); - ret = of_address_to_resource(node, 0, &r); + if (!child) { + ret = of_reserved_mem_region_to_resource(qproc->dev->of_node, 0, &res); + } else { + ret = of_reserved_mem_region_to_resource(child, 0, &res); + of_node_put(child); + } + if (ret) { dev_err(qproc->dev, "unable to resolve mba region\n"); return ret; } - of_node_put(node); - qproc->mba_phys = r.start; - qproc->mba_size = resource_size(&r); - qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size); - if (!qproc->mba_region) { - dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", - &r.start, qproc->mba_size); - return -EBUSY; + qproc->mba_phys = res.start; + qproc->mba_size = resource_size(&res); + + if (!child) { + ret = of_reserved_mem_region_to_resource(qproc->dev->of_node, 1, &res); + } else { + child = of_get_child_by_name(qproc->dev->of_node, "mpss"); + ret = of_reserved_mem_region_to_resource(child, 0, &res); + of_node_put(child); } - child = of_get_child_by_name(qproc->dev->of_node, "mpss"); - node = of_parse_phandle(child, "memory-region", 0); - ret = of_address_to_resource(node, 0, &r); if (ret) { dev_err(qproc->dev, "unable to resolve mpss region\n"); return ret; } - of_node_put(node); - qproc->mpss_phys = qproc->mpss_reloc = r.start; - qproc->mpss_size = resource_size(&r); - qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size); - if (!qproc->mpss_region) { - dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", - &r.start, qproc->mpss_size); - return -EBUSY; + qproc->mpss_phys = qproc->mpss_reloc = res.start; + qproc->mpss_size = resource_size(&res); + + if (!child) { + ret = of_reserved_mem_region_to_resource(qproc->dev->of_node, 2, &res); + } else { + child = of_get_child_by_name(qproc->dev->of_node, "metadata"); + ret = of_reserved_mem_region_to_resource(child, 0, &res); + of_node_put(child); } + if (ret) + return 0; + + qproc->mdata_phys = res.start; + qproc->mdata_size = resource_size(&res); + return 0; } static int q6v5_probe(struct platform_device *pdev) { const struct rproc_hexagon_res *desc; + struct device_node *node; struct q6v5 *qproc; struct rproc *rproc; + const char *mba_image; int ret; desc = of_device_get_match_data(&pdev->dev); @@ -1262,113 +2042,259 @@ static int q6v5_probe(struct platform_device *pdev) if (desc->need_mem_protection && !qcom_scm_is_available()) return -EPROBE_DEFER; - rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, - desc->hexagon_mba_image, sizeof(*qproc)); + mba_image = desc->hexagon_mba_image; + ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", + 0, &mba_image); + if (ret < 0 && ret != -EINVAL) { + dev_err(&pdev->dev, "unable to read mba firmware-name\n"); + return ret; + } + + rproc = devm_rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, + mba_image, sizeof(*qproc)); if (!rproc) { dev_err(&pdev->dev, "failed to allocate rproc\n"); return -ENOMEM; } - qproc = (struct q6v5 *)rproc->priv; + rproc->auto_boot = false; + rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); + + qproc = rproc->priv; qproc->dev = &pdev->dev; qproc->rproc = rproc; + qproc->hexagon_mdt_image = "modem.mdt"; + ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", + 1, &qproc->hexagon_mdt_image); + if (ret < 0 && ret != -EINVAL) { + dev_err(&pdev->dev, "unable to read mpss firmware-name\n"); + return ret; + } + platform_set_drvdata(pdev, qproc); + qproc->has_qaccept_regs = desc->has_qaccept_regs; + qproc->has_ext_bhs_reg = desc->has_ext_bhs_reg; + qproc->has_ext_cntl_regs = desc->has_ext_cntl_regs; + qproc->has_vq6 = desc->has_vq6; + qproc->has_spare_reg = desc->has_spare_reg; ret = q6v5_init_mem(qproc, pdev); if (ret) - goto free_rproc; + return ret; ret = q6v5_alloc_memory_region(qproc); if (ret) - goto free_rproc; + return ret; ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks, desc->proxy_clk_names); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to get proxy clocks.\n"); - goto free_rproc; - } + if (ret < 0) + return ret; qproc->proxy_clk_count = ret; ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks, desc->reset_clk_names); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to get reset clocks.\n"); - goto free_rproc; - } + if (ret < 0) + return ret; qproc->reset_clk_count = ret; ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks, desc->active_clk_names); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to get active clocks.\n"); - goto free_rproc; - } + if (ret < 0) + return ret; qproc->active_clk_count = ret; ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs, desc->proxy_supply); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to get proxy regulators.\n"); - goto free_rproc; - } + if (ret < 0) + return ret; qproc->proxy_reg_count = ret; ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs, desc->active_supply); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to get active regulators.\n"); - goto free_rproc; - } + if (ret < 0) + return ret; qproc->active_reg_count = ret; + ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds, + desc->proxy_pd_names); + /* Fallback to regulators for old device trees */ + if (ret == -ENODATA && desc->fallback_proxy_supply) { + ret = q6v5_regulator_init(&pdev->dev, + qproc->fallback_proxy_regs, + desc->fallback_proxy_supply); + if (ret < 0) + return ret; + qproc->fallback_proxy_reg_count = ret; + } else if (ret < 0) { + dev_err(&pdev->dev, "Failed to init power domains\n"); + return ret; + } else { + qproc->proxy_pd_count = ret; + } + qproc->has_alt_reset = desc->has_alt_reset; ret = q6v5_init_reset(qproc); if (ret) - goto free_rproc; + goto detach_proxy_pds; qproc->version = desc->version; qproc->need_mem_protection = desc->need_mem_protection; + qproc->has_mba_logs = desc->has_mba_logs; - ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, + ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, "modem", qcom_msa_handover); if (ret) - goto free_rproc; + goto detach_proxy_pds; qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS); qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS); - qcom_add_glink_subdev(rproc, &qproc->glink_subdev); + qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss"); qcom_add_smd_subdev(rproc, &qproc->smd_subdev); + qcom_add_pdm_subdev(rproc, &qproc->pdm_subdev); qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); + if (IS_ERR(qproc->sysmon)) { + ret = PTR_ERR(qproc->sysmon); + goto remove_subdevs; + } ret = rproc_add(rproc); if (ret) - goto free_rproc; + goto remove_sysmon_subdev; + + node = of_get_compatible_child(pdev->dev.of_node, "qcom,bam-dmux"); + qproc->bam_dmux = of_platform_device_create(node, NULL, &pdev->dev); + of_node_put(node); return 0; -free_rproc: - rproc_free(rproc); +remove_sysmon_subdev: + qcom_remove_sysmon_subdev(qproc->sysmon); +remove_subdevs: + qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); + qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); + qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); +detach_proxy_pds: + q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); return ret; } -static int q6v5_remove(struct platform_device *pdev) +static void q6v5_remove(struct platform_device *pdev) { struct q6v5 *qproc = platform_get_drvdata(pdev); + struct rproc *rproc = qproc->rproc; - rproc_del(qproc->rproc); + if (qproc->bam_dmux) + of_platform_device_destroy(&qproc->bam_dmux->dev, NULL); + rproc_del(rproc); + qcom_q6v5_deinit(&qproc->q6v5); qcom_remove_sysmon_subdev(qproc->sysmon); - qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev); - qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev); - qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev); - rproc_free(qproc->rproc); + qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); + qcom_remove_pdm_subdev(rproc, &qproc->pdm_subdev); + qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); + qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); - return 0; + q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); } +static const struct rproc_hexagon_res sc7180_mss = { + .hexagon_mba_image = "mba.mbn", + .proxy_clk_names = (char*[]){ + "xo", + NULL + }, + .reset_clk_names = (char*[]){ + "iface", + "bus", + "snoc_axi", + NULL + }, + .active_clk_names = (char*[]){ + "mnoc_axi", + "nav", + NULL + }, + .proxy_pd_names = (char*[]){ + "cx", + "mx", + "mss", + NULL + }, + .need_mem_protection = true, + .has_alt_reset = false, + .has_mba_logs = true, + .has_spare_reg = true, + .has_qaccept_regs = false, + .has_ext_bhs_reg = false, + .has_ext_cntl_regs = false, + .has_vq6 = false, + .version = MSS_SC7180, +}; + +static const struct rproc_hexagon_res sc7280_mss = { + .hexagon_mba_image = "mba.mbn", + .proxy_clk_names = (char*[]){ + "xo", + "pka", + NULL + }, + .active_clk_names = (char*[]){ + "iface", + "offline", + "snoc_axi", + NULL + }, + .proxy_pd_names = (char*[]){ + "cx", + "mss", + NULL + }, + .need_mem_protection = true, + .has_alt_reset = false, + .has_mba_logs = true, + .has_spare_reg = false, + .has_qaccept_regs = true, + .has_ext_bhs_reg = false, + .has_ext_cntl_regs = true, + .has_vq6 = true, + .version = MSS_SC7280, +}; + +static const struct rproc_hexagon_res sdm660_mss = { + .hexagon_mba_image = "mba.mbn", + .proxy_clk_names = (char*[]){ + "xo", + "qdss", + "mem", + NULL + }, + .active_clk_names = (char*[]){ + "iface", + "bus", + "gpll0_mss", + "mnoc_axi", + "snoc_axi", + NULL + }, + .proxy_pd_names = (char*[]){ + "cx", + "mx", + NULL + }, + .need_mem_protection = true, + .has_alt_reset = false, + .has_mba_logs = false, + .has_spare_reg = false, + .has_qaccept_regs = false, + .has_ext_bhs_reg = false, + .has_ext_cntl_regs = false, + .has_vq6 = false, + .version = MSS_SDM660, +}; + static const struct rproc_hexagon_res sdm845_mss = { .hexagon_mba_image = "mba.mbn", .proxy_clk_names = (char*[]){ @@ -1388,34 +2314,140 @@ static const struct rproc_hexagon_res sdm845_mss = { "mnoc_axi", NULL }, + .proxy_pd_names = (char*[]){ + "cx", + "mx", + "mss", + NULL + }, .need_mem_protection = true, .has_alt_reset = true, + .has_mba_logs = false, + .has_spare_reg = false, + .has_qaccept_regs = false, + .has_ext_bhs_reg = false, + .has_ext_cntl_regs = false, + .has_vq6 = false, .version = MSS_SDM845, }; +static const struct rproc_hexagon_res msm8998_mss = { + .hexagon_mba_image = "mba.mbn", + .proxy_clk_names = (char*[]){ + "xo", + "qdss", + "mem", + NULL + }, + .active_clk_names = (char*[]){ + "iface", + "bus", + "gpll0_mss", + "mnoc_axi", + "snoc_axi", + NULL + }, + .proxy_pd_names = (char*[]){ + "cx", + "mx", + NULL + }, + .need_mem_protection = true, + .has_alt_reset = false, + .has_mba_logs = false, + .has_spare_reg = false, + .has_qaccept_regs = false, + .has_ext_bhs_reg = false, + .has_ext_cntl_regs = false, + .has_vq6 = false, + .version = MSS_MSM8998, +}; + static const struct rproc_hexagon_res msm8996_mss = { .hexagon_mba_image = "mba.mbn", + .proxy_supply = (struct qcom_mss_reg_res[]) { + { + .supply = "pll", + .uA = 100000, + }, + {} + }, .proxy_clk_names = (char*[]){ "xo", - "pnoc", + "qdss", NULL }, .active_clk_names = (char*[]){ "iface", "bus", "mem", - "gpll0_mss_clk", + "gpll0_mss", + "snoc_axi", + "mnoc_axi", + NULL + }, + .proxy_pd_names = (char*[]){ + "mx", + "cx", NULL }, .need_mem_protection = true, .has_alt_reset = false, + .has_mba_logs = false, + .has_spare_reg = false, + .has_qaccept_regs = false, + .has_ext_bhs_reg = false, + .has_ext_cntl_regs = false, + .has_vq6 = false, .version = MSS_MSM8996, }; +static const struct rproc_hexagon_res msm8909_mss = { + .hexagon_mba_image = "mba.mbn", + .proxy_supply = (struct qcom_mss_reg_res[]) { + { + .supply = "pll", + .uA = 100000, + }, + {} + }, + .proxy_clk_names = (char*[]){ + "xo", + NULL + }, + .active_clk_names = (char*[]){ + "iface", + "bus", + "mem", + NULL + }, + .proxy_pd_names = (char*[]){ + "mx", + "cx", + NULL + }, + .need_mem_protection = false, + .has_alt_reset = false, + .has_mba_logs = false, + .has_spare_reg = false, + .has_qaccept_regs = false, + .has_ext_bhs_reg = false, + .has_ext_cntl_regs = false, + .has_vq6 = false, + .version = MSS_MSM8909, +}; + static const struct rproc_hexagon_res msm8916_mss = { .hexagon_mba_image = "mba.mbn", .proxy_supply = (struct qcom_mss_reg_res[]) { { + .supply = "pll", + .uA = 100000, + }, + {} + }, + .fallback_proxy_supply = (struct qcom_mss_reg_res[]) { + { .supply = "mx", .uV = 1050000, }, @@ -1423,6 +2455,37 @@ static const struct rproc_hexagon_res msm8916_mss = { .supply = "cx", .uA = 100000, }, + {} + }, + .proxy_clk_names = (char*[]){ + "xo", + NULL + }, + .active_clk_names = (char*[]){ + "iface", + "bus", + "mem", + NULL + }, + .proxy_pd_names = (char*[]){ + "mx", + "cx", + NULL + }, + .need_mem_protection = false, + .has_alt_reset = false, + .has_mba_logs = false, + .has_spare_reg = false, + .has_qaccept_regs = false, + .has_ext_bhs_reg = false, + .has_ext_cntl_regs = false, + .has_vq6 = false, + .version = MSS_MSM8916, +}; + +static const struct rproc_hexagon_res msm8953_mss = { + .hexagon_mba_image = "mba.mbn", + .proxy_supply = (struct qcom_mss_reg_res[]) { { .supply = "pll", .uA = 100000, @@ -1439,26 +2502,125 @@ static const struct rproc_hexagon_res msm8916_mss = { "mem", NULL }, + .proxy_pd_names = (char*[]) { + "cx", + "mx", + "mss", + NULL + }, .need_mem_protection = false, .has_alt_reset = false, - .version = MSS_MSM8916, + .has_mba_logs = false, + .has_spare_reg = false, + .has_qaccept_regs = false, + .has_ext_bhs_reg = false, + .has_ext_cntl_regs = false, + .has_vq6 = false, + .version = MSS_MSM8953, }; static const struct rproc_hexagon_res msm8974_mss = { .hexagon_mba_image = "mba.b00", .proxy_supply = (struct qcom_mss_reg_res[]) { { + .supply = "pll", + .uA = 100000, + }, + { .supply = "mx", .uV = 1050000, }, + {} + }, + .fallback_proxy_supply = (struct qcom_mss_reg_res[]) { { .supply = "cx", .uA = 100000, }, + {} + }, + .active_supply = (struct qcom_mss_reg_res[]) { + { + .supply = "mss", + .uV = 1050000, + .uA = 100000, + }, + {} + }, + .proxy_clk_names = (char*[]){ + "xo", + NULL + }, + .active_clk_names = (char*[]){ + "iface", + "bus", + "mem", + NULL + }, + .proxy_pd_names = (char*[]){ + "cx", + NULL + }, + .need_mem_protection = false, + .has_alt_reset = false, + .has_mba_logs = false, + .has_spare_reg = false, + .has_qaccept_regs = false, + .has_ext_bhs_reg = false, + .has_ext_cntl_regs = false, + .has_vq6 = false, + .version = MSS_MSM8974, +}; + +static const struct rproc_hexagon_res msm8226_mss = { + .hexagon_mba_image = "mba.b00", + .proxy_supply = (struct qcom_mss_reg_res[]) { + { + .supply = "pll", + .uA = 100000, + }, + { + .supply = "mx", + .uV = 1050000, + }, + {} + }, + .proxy_clk_names = (char*[]){ + "xo", + NULL + }, + .active_clk_names = (char*[]){ + "iface", + "bus", + "mem", + NULL + }, + .proxy_pd_names = (char*[]){ + "cx", + NULL + }, + .need_mem_protection = false, + .has_alt_reset = false, + .has_mba_logs = false, + .has_spare_reg = false, + .has_qaccept_regs = false, + .has_ext_bhs_reg = true, + .has_ext_cntl_regs = false, + .has_vq6 = false, + .version = MSS_MSM8226, +}; + +static const struct rproc_hexagon_res msm8926_mss = { + .hexagon_mba_image = "mba.b00", + .proxy_supply = (struct qcom_mss_reg_res[]) { { .supply = "pll", .uA = 100000, }, + { + .supply = "mx", + .uV = 1050000, + }, {} }, .active_supply = (struct qcom_mss_reg_res[]) { @@ -1479,16 +2641,34 @@ static const struct rproc_hexagon_res msm8974_mss = { "mem", NULL }, + .proxy_pd_names = (char*[]){ + "cx", + NULL + }, .need_mem_protection = false, .has_alt_reset = false, - .version = MSS_MSM8974, + .has_mba_logs = false, + .has_spare_reg = false, + .has_qaccept_regs = false, + .has_ext_bhs_reg = false, + .has_ext_cntl_regs = false, + .has_vq6 = false, + .version = MSS_MSM8926, }; static const struct of_device_id q6v5_of_match[] = { { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss}, + { .compatible = "qcom,msm8226-mss-pil", .data = &msm8226_mss}, + { .compatible = "qcom,msm8909-mss-pil", .data = &msm8909_mss}, { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss}, + { .compatible = "qcom,msm8926-mss-pil", .data = &msm8926_mss}, + { .compatible = "qcom,msm8953-mss-pil", .data = &msm8953_mss}, { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss}, { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss}, + { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss}, + { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss}, + { .compatible = "qcom,sc7280-mss-pil", .data = &sc7280_mss}, + { .compatible = "qcom,sdm660-mss-pil", .data = &sdm660_mss}, { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss}, { }, }; |
