summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2019-02-15 17:58:41 +0100
committerArnd Bergmann <arnd@arndb.de>2019-02-15 18:00:05 +0100
commit405bcfff172db863b7c4b63e0e0ef8508966c90a (patch)
tree67b34d629c465fd51f863f2029b0faf03e3a3a99 /drivers
parentdea73a34e0edcff6a56f238748b49fc88046b9a0 (diff)
parent4e2256d31f0f24107c36dae35a1d84dff0fced30 (diff)
Merge tag 'qcom-drivers-for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/agross/linux into arm/drivers
Qualcomm ARM Based Driver Updates for v5.1 * Add Qualcomm RPMh power domain driver and related changes * Fix issues with sleep/wake sets and batch API in RPMh * Update MAINTAINERS Qualcomm entry * Fixup RMTFS-mem sysfs and uevents * Fix error handling in GSBI * Add SMD-RPM compatible entry for SDM660 * tag 'qcom-drivers-for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/agross/linux: soc: qcom: smd-rpm: Add sdm660 compatible soc: qcom: gsbi: Fix error handling in gsbi_probe() soc: qcom: rpmh: Avoid accessing freed memory from batch API drivers: qcom: rpmh: avoid sending sleep/wake sets immediately soc: qcom: rmtfs-mem: Make sysfs attributes world-readable soc: qcom: rmtfs-mem: Add class to enable uevents soc: qcom: update config dependencies for QCOM_RPMPD soc: qcom: rpmpd: Drop family A RPM dependency MAINTAINERS: update list of qcom drivers soc: qcom: rpmhpd: Mark mx as a parent for cx soc: qcom: rpmhpd: Add RPMh power domain driver soc: qcom: rpmpd: Add support for get/set performance state soc: qcom: rpmpd: Add a Power domain driver to model corners dt-bindings: power: Add qcom rpm power domain driver bindings OPP: Add support for parsing the 'opp-level' property dt-bindings: opp: Introduce opp-level bindings Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/opp/core.c18
-rw-r--r--drivers/opp/of.c2
-rw-r--r--drivers/opp/opp.h2
-rw-r--r--drivers/soc/qcom/Kconfig18
-rw-r--r--drivers/soc/qcom/Makefile2
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c7
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c32
-rw-r--r--drivers/soc/qcom/rpmh.c37
-rw-r--r--drivers/soc/qcom/rpmhpd.c406
-rw-r--r--drivers/soc/qcom/rpmpd.c315
-rw-r--r--drivers/soc/qcom/smd-rpm.c1
11 files changed, 815 insertions, 25 deletions
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 18f1639dbc4a..e06a0ab05ad6 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -131,6 +131,24 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
/**
+ * dev_pm_opp_get_level() - Gets the level corresponding to an available opp
+ * @opp: opp for which level value has to be returned for
+ *
+ * Return: level read from device tree corresponding to the opp, else
+ * return 0.
+ */
+unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
+{
+ if (IS_ERR_OR_NULL(opp) || !opp->available) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return 0;
+ }
+
+ return opp->level;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
+
+/**
* dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
* @opp: opp for which turbo mode is being verified
*
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 06f0f632ec47..1779f2c93291 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -594,6 +594,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
new_opp->rate = (unsigned long)rate;
}
+ of_property_read_u32(np, "opp-level", &new_opp->level);
+
/* Check if the OPP supports hardware's hierarchy of versions or not */
if (!_opp_is_supported(dev, opp_table, np)) {
dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index e24d81497375..4458175aa661 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -60,6 +60,7 @@ extern struct list_head opp_tables;
* @suspend: true if suspend OPP
* @pstate: Device's power domain's performance state.
* @rate: Frequency in hertz
+ * @level: Performance level
* @supplies: Power supplies voltage/current values
* @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
* frequency from any other OPP's frequency.
@@ -80,6 +81,7 @@ struct dev_pm_opp {
bool suspend;
unsigned int pstate;
unsigned long rate;
+ unsigned int level;
struct dev_pm_opp_supply *supplies;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index fcbf8a2e4080..1ee298f6bf17 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -98,6 +98,24 @@ config QCOM_RPMH
of hardware components aggregate requests for these resources and
help apply the aggregated state on the resource.
+config QCOM_RPMHPD
+ bool "Qualcomm RPMh Power domain driver"
+ depends on QCOM_RPMH && QCOM_COMMAND_DB
+ help
+ QCOM RPMh Power domain driver to support power-domains with
+ performance states. The driver communicates a performance state
+ value to RPMh which then translates it into corresponding voltage
+ for the voltage rail.
+
+config QCOM_RPMPD
+ bool "Qualcomm RPM Power domain driver"
+ depends on QCOM_SMD_RPM=y
+ help
+ QCOM RPM Power domain driver to support power-domains with
+ performance states. The driver communicates a performance state
+ value to RPM which then translates it into corresponding voltage
+ for the voltage rail.
+
config QCOM_SMEM
tristate "Qualcomm Shared Memory Manager (SMEM)"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index f25b54cd6cf8..ffe519b0cb66 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -21,3 +21,5 @@ obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
obj-$(CONFIG_QCOM_APR) += apr.o
obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o
obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
+obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o
+obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
index 09c669e70d63..038abc377fdb 100644
--- a/drivers/soc/qcom/qcom_gsbi.c
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -138,7 +138,7 @@ static int gsbi_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *base;
struct gsbi_info *gsbi;
- int i;
+ int i, ret;
u32 mask, gsbi_num;
const struct crci_config *config = NULL;
@@ -221,7 +221,10 @@ static int gsbi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gsbi);
- return of_platform_populate(node, NULL, NULL, &pdev->dev);
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ if (ret)
+ clk_disable_unprepare(gsbi->hclk);
+ return ret;
}
static int gsbi_remove(struct platform_device *pdev)
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
index 97bb5989aa21..7200d762a951 100644
--- a/drivers/soc/qcom/rmtfs_mem.c
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -45,9 +45,9 @@ static ssize_t qcom_rmtfs_mem_show(struct device *dev,
struct device_attribute *attr,
char *buf);
-static DEVICE_ATTR(phys_addr, 0400, qcom_rmtfs_mem_show, NULL);
-static DEVICE_ATTR(size, 0400, qcom_rmtfs_mem_show, NULL);
-static DEVICE_ATTR(client_id, 0400, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(phys_addr, 0444, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(size, 0444, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(client_id, 0444, qcom_rmtfs_mem_show, NULL);
static ssize_t qcom_rmtfs_mem_show(struct device *dev,
struct device_attribute *attr,
@@ -132,6 +132,11 @@ static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp)
return 0;
}
+static struct class rmtfs_class = {
+ .owner = THIS_MODULE,
+ .name = "rmtfs",
+};
+
static const struct file_operations qcom_rmtfs_mem_fops = {
.owner = THIS_MODULE,
.open = qcom_rmtfs_mem_open,
@@ -199,6 +204,7 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
dev_set_name(&rmtfs_mem->dev, "qcom_rmtfs_mem%d", client_id);
rmtfs_mem->dev.id = client_id;
+ rmtfs_mem->dev.class = &rmtfs_class;
rmtfs_mem->dev.devt = MKDEV(MAJOR(qcom_rmtfs_mem_major), client_id);
ret = cdev_device_add(&rmtfs_mem->cdev, &rmtfs_mem->dev);
@@ -277,32 +283,42 @@ static struct platform_driver qcom_rmtfs_mem_driver = {
},
};
-static int qcom_rmtfs_mem_init(void)
+static int __init qcom_rmtfs_mem_init(void)
{
int ret;
+ ret = class_register(&rmtfs_class);
+ if (ret)
+ return ret;
+
ret = alloc_chrdev_region(&qcom_rmtfs_mem_major, 0,
QCOM_RMTFS_MEM_DEV_MAX, "qcom_rmtfs_mem");
if (ret < 0) {
pr_err("qcom_rmtfs_mem: failed to allocate char dev region\n");
- return ret;
+ goto unregister_class;
}
ret = platform_driver_register(&qcom_rmtfs_mem_driver);
if (ret < 0) {
pr_err("qcom_rmtfs_mem: failed to register rmtfs_mem driver\n");
- unregister_chrdev_region(qcom_rmtfs_mem_major,
- QCOM_RMTFS_MEM_DEV_MAX);
+ goto unregister_chrdev;
}
+ return 0;
+
+unregister_chrdev:
+ unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
+unregister_class:
+ class_unregister(&rmtfs_class);
return ret;
}
module_init(qcom_rmtfs_mem_init);
-static void qcom_rmtfs_mem_exit(void)
+static void __exit qcom_rmtfs_mem_exit(void)
{
platform_driver_unregister(&qcom_rmtfs_mem_driver);
unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
+ class_unregister(&rmtfs_class);
}
module_exit(qcom_rmtfs_mem_exit);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index c7beb6841289..035091fd44b8 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -80,6 +80,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
msg);
struct completion *compl = rpm_msg->completion;
+ bool free = rpm_msg->needs_free;
rpm_msg->err = r;
@@ -94,7 +95,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
complete(compl);
exit:
- if (rpm_msg->needs_free)
+ if (free)
kfree(rpm_msg);
}
@@ -192,9 +193,8 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state,
WARN_ON(irqs_disabled());
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
} else {
- ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
- &rpm_msg->msg);
/* Clean up our call by spoofing tx_done */
+ ret = 0;
rpmh_tx_done(&rpm_msg->msg, ret);
}
@@ -348,11 +348,12 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
{
struct batch_cache_req *req;
struct rpmh_request *rpm_msgs;
- DECLARE_COMPLETION_ONSTACK(compl);
+ struct completion *compls;
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
unsigned long time_left;
int count = 0;
- int ret, i, j;
+ int ret, i;
+ void *ptr;
if (!cmd || !n)
return -EINVAL;
@@ -362,10 +363,15 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
if (!count)
return -EINVAL;
- req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
+ ptr = kzalloc(sizeof(*req) +
+ count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
GFP_ATOMIC);
- if (!req)
+ if (!ptr)
return -ENOMEM;
+
+ req = ptr;
+ compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
+
req->count = count;
rpm_msgs = req->rpm_msgs;
@@ -380,25 +386,26 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
}
for (i = 0; i < count; i++) {
- rpm_msgs[i].completion = &compl;
+ struct completion *compl = &compls[i];
+
+ init_completion(compl);
+ rpm_msgs[i].completion = compl;
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
if (ret) {
pr_err("Error(%d) sending RPMH message addr=%#x\n",
ret, rpm_msgs[i].msg.cmds[0].addr);
- for (j = i; j < count; j++)
- rpmh_tx_done(&rpm_msgs[j].msg, ret);
break;
}
}
time_left = RPMH_TIMEOUT_MS;
- for (i = 0; i < count; i++) {
- time_left = wait_for_completion_timeout(&compl, time_left);
+ while (i--) {
+ time_left = wait_for_completion_timeout(&compls[i], time_left);
if (!time_left) {
/*
* Better hope they never finish because they'll signal
- * the completion on our stack and that's bad once
- * we've returned from the function.
+ * the completion that we're going to free once
+ * we've returned from this function.
*/
WARN_ON(1);
ret = -ETIMEDOUT;
@@ -407,7 +414,7 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
}
exit:
- kfree(req);
+ kfree(ptr);
return ret;
}
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
new file mode 100644
index 000000000000..5741ec3fa814
--- /dev/null
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
+
+#define domain_to_rpmhpd(domain) container_of(domain, struct rpmhpd, pd)
+
+#define RPMH_ARC_MAX_LEVELS 16
+
+/**
+ * struct rpmhpd - top level RPMh power domain resource data structure
+ * @dev: rpmh power domain controller device
+ * @pd: generic_pm_domain corrresponding to the power domain
+ * @peer: A peer power domain in case Active only Voting is
+ * supported
+ * @active_only: True if it represents an Active only peer
+ * @level: An array of level (vlvl) to corner (hlvl) mappings
+ * derived from cmd-db
+ * @level_count: Number of levels supported by the power domain. max
+ * being 16 (0 - 15)
+ * @enabled: true if the power domain is enabled
+ * @res_name: Resource name used for cmd-db lookup
+ * @addr: Resource address as looped up using resource name from
+ * cmd-db
+ */
+struct rpmhpd {
+ struct device *dev;
+ struct generic_pm_domain pd;
+ struct generic_pm_domain *parent;
+ struct rpmhpd *peer;
+ const bool active_only;
+ unsigned int corner;
+ unsigned int active_corner;
+ u32 level[RPMH_ARC_MAX_LEVELS];
+ size_t level_count;
+ bool enabled;
+ const char *res_name;
+ u32 addr;
+};
+
+struct rpmhpd_desc {
+ struct rpmhpd **rpmhpds;
+ size_t num_pds;
+};
+
+static DEFINE_MUTEX(rpmhpd_lock);
+
+/* SDM845 RPMH powerdomains */
+
+static struct rpmhpd sdm845_ebi = {
+ .pd = { .name = "ebi", },
+ .res_name = "ebi.lvl",
+};
+
+static struct rpmhpd sdm845_lmx = {
+ .pd = { .name = "lmx", },
+ .res_name = "lmx.lvl",
+};
+
+static struct rpmhpd sdm845_lcx = {
+ .pd = { .name = "lcx", },
+ .res_name = "lcx.lvl",
+};
+
+static struct rpmhpd sdm845_gfx = {
+ .pd = { .name = "gfx", },
+ .res_name = "gfx.lvl",
+};
+
+static struct rpmhpd sdm845_mss = {
+ .pd = { .name = "mss", },
+ .res_name = "mss.lvl",
+};
+
+static struct rpmhpd sdm845_mx_ao;
+static struct rpmhpd sdm845_mx = {
+ .pd = { .name = "mx", },
+ .peer = &sdm845_mx_ao,
+ .res_name = "mx.lvl",
+};
+
+static struct rpmhpd sdm845_mx_ao = {
+ .pd = { .name = "mx_ao", },
+ .peer = &sdm845_mx,
+ .res_name = "mx.lvl",
+};
+
+static struct rpmhpd sdm845_cx_ao;
+static struct rpmhpd sdm845_cx = {
+ .pd = { .name = "cx", },
+ .peer = &sdm845_cx_ao,
+ .parent = &sdm845_mx.pd,
+ .res_name = "cx.lvl",
+};
+
+static struct rpmhpd sdm845_cx_ao = {
+ .pd = { .name = "cx_ao", },
+ .peer = &sdm845_cx,
+ .parent = &sdm845_mx_ao.pd,
+ .res_name = "cx.lvl",
+};
+
+static struct rpmhpd *sdm845_rpmhpds[] = {
+ [SDM845_EBI] = &sdm845_ebi,
+ [SDM845_MX] = &sdm845_mx,
+ [SDM845_MX_AO] = &sdm845_mx_ao,
+ [SDM845_CX] = &sdm845_cx,
+ [SDM845_CX_AO] = &sdm845_cx_ao,
+ [SDM845_LMX] = &sdm845_lmx,
+ [SDM845_LCX] = &sdm845_lcx,
+ [SDM845_GFX] = &sdm845_gfx,
+ [SDM845_MSS] = &sdm845_mss,
+};
+
+static const struct rpmhpd_desc sdm845_desc = {
+ .rpmhpds = sdm845_rpmhpds,
+ .num_pds = ARRAY_SIZE(sdm845_rpmhpds),
+};
+
+static const struct of_device_id rpmhpd_match_table[] = {
+ { .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
+ { }
+};
+
+static int rpmhpd_send_corner(struct rpmhpd *pd, int state,
+ unsigned int corner, bool sync)
+{
+ struct tcs_cmd cmd = {
+ .addr = pd->addr,
+ .data = corner,
+ };
+
+ /*
+ * Wait for an ack only when we are increasing the
+ * perf state of the power domain
+ */
+ if (sync)
+ return rpmh_write(pd->dev, state, &cmd, 1);
+ else
+ return rpmh_write_async(pd->dev, state, &cmd, 1);
+}
+
+static void to_active_sleep(struct rpmhpd *pd, unsigned int corner,
+ unsigned int *active, unsigned int *sleep)
+{
+ *active = corner;
+
+ if (pd->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+/*
+ * This function is used to aggregate the votes across the active only
+ * resources and its peers. The aggregated votes are sent to RPMh as
+ * ACTIVE_ONLY votes (which take effect immediately), as WAKE_ONLY votes
+ * (applied by RPMh on system wakeup) and as SLEEP votes (applied by RPMh
+ * on system sleep).
+ * We send ACTIVE_ONLY votes for resources without any peers. For others,
+ * which have an active only peer, all 3 votes are sent.
+ */
+static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
+{
+ int ret;
+ struct rpmhpd *peer = pd->peer;
+ unsigned int active_corner, sleep_corner;
+ unsigned int this_active_corner = 0, this_sleep_corner = 0;
+ unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
+
+ to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
+
+ if (peer && peer->enabled)
+ to_active_sleep(peer, peer->corner, &peer_active_corner,
+ &peer_sleep_corner);
+
+ active_corner = max(this_active_corner, peer_active_corner);
+
+ ret = rpmhpd_send_corner(pd, RPMH_ACTIVE_ONLY_STATE, active_corner,
+ active_corner > pd->active_corner);
+ if (ret)
+ return ret;
+
+ pd->active_corner = active_corner;
+
+ if (peer) {
+ peer->active_corner = active_corner;
+
+ ret = rpmhpd_send_corner(pd, RPMH_WAKE_ONLY_STATE,
+ active_corner, false);
+ if (ret)
+ return ret;
+
+ sleep_corner = max(this_sleep_corner, peer_sleep_corner);
+
+ return rpmhpd_send_corner(pd, RPMH_SLEEP_STATE, sleep_corner,
+ false);
+ }
+
+ return ret;
+}
+
+static int rpmhpd_power_on(struct generic_pm_domain *domain)
+{
+ struct rpmhpd *pd = domain_to_rpmhpd(domain);
+ int ret = 0;
+
+ mutex_lock(&rpmhpd_lock);
+
+ if (pd->corner)
+ ret = rpmhpd_aggregate_corner(pd, pd->corner);
+
+ if (!ret)
+ pd->enabled = true;
+
+ mutex_unlock(&rpmhpd_lock);
+
+ return ret;
+}
+
+static int rpmhpd_power_off(struct generic_pm_domain *domain)
+{
+ struct rpmhpd *pd = domain_to_rpmhpd(domain);
+ int ret = 0;
+
+ mutex_lock(&rpmhpd_lock);
+
+ ret = rpmhpd_aggregate_corner(pd, pd->level[0]);
+
+ if (!ret)
+ pd->enabled = false;
+
+ mutex_unlock(&rpmhpd_lock);
+
+ return ret;
+}
+
+static int rpmhpd_set_performance_state(struct generic_pm_domain *domain,
+ unsigned int level)
+{
+ struct rpmhpd *pd = domain_to_rpmhpd(domain);
+ int ret = 0, i;
+
+ mutex_lock(&rpmhpd_lock);
+
+ for (i = 0; i < pd->level_count; i++)
+ if (level <= pd->level[i])
+ break;
+
+ /*
+ * If the level requested is more than that supported by the
+ * max corner, just set it to max anyway.
+ */
+ if (i == pd->level_count)
+ i--;
+
+ if (pd->enabled) {
+ ret = rpmhpd_aggregate_corner(pd, i);
+ if (ret)
+ goto out;
+ }
+
+ pd->corner = i;
+out:
+ mutex_unlock(&rpmhpd_lock);
+
+ return ret;
+}
+
+static unsigned int rpmhpd_get_performance_state(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_level(opp);
+}
+
+static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)
+{
+ int i;
+ const u16 *buf;
+
+ buf = cmd_db_read_aux_data(rpmhpd->res_name, &rpmhpd->level_count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ /* 2 bytes used for each command DB aux data entry */
+ rpmhpd->level_count >>= 1;
+
+ if (rpmhpd->level_count > RPMH_ARC_MAX_LEVELS)
+ return -EINVAL;
+
+ for (i = 0; i < rpmhpd->level_count; i++) {
+ rpmhpd->level[i] = buf[i];
+
+ /*
+ * The AUX data may be zero padded. These 0 valued entries at
+ * the end of the map must be ignored.
+ */
+ if (i > 0 && rpmhpd->level[i] == 0) {
+ rpmhpd->level_count = i;
+ break;
+ }
+ pr_debug("%s: ARC hlvl=%2d --> vlvl=%4u\n", rpmhpd->res_name, i,
+ rpmhpd->level[i]);
+ }
+
+ return 0;
+}
+
+static int rpmhpd_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ size_t num_pds;
+ struct device *dev = &pdev->dev;
+ struct genpd_onecell_data *data;
+ struct rpmhpd **rpmhpds;
+ const struct rpmhpd_desc *desc;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ rpmhpds = desc->rpmhpds;
+ num_pds = desc->num_pds;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->domains = devm_kcalloc(dev, num_pds, sizeof(*data->domains),
+ GFP_KERNEL);
+ if (!data->domains)
+ return -ENOMEM;
+
+ data->num_domains = num_pds;
+
+ for (i = 0; i < num_pds; i++) {
+ if (!rpmhpds[i]) {
+ dev_warn(dev, "rpmhpds[%d] is empty\n", i);
+ continue;
+ }
+
+ rpmhpds[i]->dev = dev;
+ rpmhpds[i]->addr = cmd_db_read_addr(rpmhpds[i]->res_name);
+ if (!rpmhpds[i]->addr) {
+ dev_err(dev, "Could not find RPMh address for resource %s\n",
+ rpmhpds[i]->res_name);
+ return -ENODEV;
+ }
+
+ ret = cmd_db_read_slave_id(rpmhpds[i]->res_name);
+ if (ret != CMD_DB_HW_ARC) {
+ dev_err(dev, "RPMh slave ID mismatch\n");
+ return -EINVAL;
+ }
+
+ ret = rpmhpd_update_level_mapping(rpmhpds[i]);
+ if (ret)
+ return ret;
+
+ rpmhpds[i]->pd.power_off = rpmhpd_power_off;
+ rpmhpds[i]->pd.power_on = rpmhpd_power_on;
+ rpmhpds[i]->pd.set_performance_state = rpmhpd_set_performance_state;
+ rpmhpds[i]->pd.opp_to_performance_state = rpmhpd_get_performance_state;
+ pm_genpd_init(&rpmhpds[i]->pd, NULL, true);
+
+ data->domains[i] = &rpmhpds[i]->pd;
+ }
+
+ /* Add subdomains */
+ for (i = 0; i < num_pds; i++) {
+ if (!rpmhpds[i])
+ continue;
+ if (rpmhpds[i]->parent)
+ pm_genpd_add_subdomain(rpmhpds[i]->parent,
+ &rpmhpds[i]->pd);
+ }
+
+ return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
+}
+
+static struct platform_driver rpmhpd_driver = {
+ .driver = {
+ .name = "qcom-rpmhpd",
+ .of_match_table = rpmhpd_match_table,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rpmhpd_probe,
+};
+
+static int __init rpmhpd_init(void)
+{
+ return platform_driver_register(&rpmhpd_driver);
+}
+core_initcall(rpmhpd_init);
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
new file mode 100644
index 000000000000..005326050c23
--- /dev/null
+++ b/drivers/soc/qcom/rpmpd.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/pm_domain.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#include <dt-bindings/power/qcom-rpmpd.h>
+
+#define domain_to_rpmpd(domain) container_of(domain, struct rpmpd, pd)
+
+/* Resource types */
+#define RPMPD_SMPA 0x61706d73
+#define RPMPD_LDOA 0x616f646c
+
+/* Operation Keys */
+#define KEY_CORNER 0x6e726f63 /* corn */
+#define KEY_ENABLE 0x6e657773 /* swen */
+#define KEY_FLOOR_CORNER 0x636676 /* vfc */
+
+#define MAX_RPMPD_STATE 6
+
+#define DEFINE_RPMPD_CORNER_SMPA(_platform, _name, _active, r_id) \
+ static struct rpmpd _platform##_##_active; \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .peer = &_platform##_##_active, \
+ .res_type = RPMPD_SMPA, \
+ .res_id = r_id, \
+ .key = KEY_CORNER, \
+ }; \
+ static struct rpmpd _platform##_##_active = { \
+ .pd = { .name = #_active, }, \
+ .peer = &_platform##_##_name, \
+ .active_only = true, \
+ .res_type = RPMPD_SMPA, \
+ .res_id = r_id, \
+ .key = KEY_CORNER, \
+ }
+
+#define DEFINE_RPMPD_CORNER_LDOA(_platform, _name, r_id) \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .res_type = RPMPD_LDOA, \
+ .res_id = r_id, \
+ .key = KEY_CORNER, \
+ }
+
+#define DEFINE_RPMPD_VFC(_platform, _name, r_id, r_type) \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .res_type = r_type, \
+ .res_id = r_id, \
+ .key = KEY_FLOOR_CORNER, \
+ }
+
+#define DEFINE_RPMPD_VFC_SMPA(_platform, _name, r_id) \
+ DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_SMPA)
+
+#define DEFINE_RPMPD_VFC_LDOA(_platform, _name, r_id) \
+ DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_LDOA)
+
+struct rpmpd_req {
+ __le32 key;
+ __le32 nbytes;
+ __le32 value;
+};
+
+struct rpmpd {
+ struct generic_pm_domain pd;
+ struct rpmpd *peer;
+ const bool active_only;
+ unsigned int corner;
+ bool enabled;
+ const char *res_name;
+ const int res_type;
+ const int res_id;
+ struct qcom_smd_rpm *rpm;
+ __le32 key;
+};
+
+struct rpmpd_desc {
+ struct rpmpd **rpmpds;
+ size_t num_pds;
+};
+
+static DEFINE_MUTEX(rpmpd_lock);
+
+/* msm8996 RPM Power domains */
+DEFINE_RPMPD_CORNER_SMPA(msm8996, vddcx, vddcx_ao, 1);
+DEFINE_RPMPD_CORNER_SMPA(msm8996, vddmx, vddmx_ao, 2);
+DEFINE_RPMPD_CORNER_LDOA(msm8996, vddsscx, 26);
+
+DEFINE_RPMPD_VFC_SMPA(msm8996, vddcx_vfc, 1);
+DEFINE_RPMPD_VFC_LDOA(msm8996, vddsscx_vfc, 26);
+
+static struct rpmpd *msm8996_rpmpds[] = {
+ [MSM8996_VDDCX] = &msm8996_vddcx,
+ [MSM8996_VDDCX_AO] = &msm8996_vddcx_ao,
+ [MSM8996_VDDCX_VFC] = &msm8996_vddcx_vfc,
+ [MSM8996_VDDMX] = &msm8996_vddmx,
+ [MSM8996_VDDMX_AO] = &msm8996_vddmx_ao,
+ [MSM8996_VDDSSCX] = &msm8996_vddsscx,
+ [MSM8996_VDDSSCX_VFC] = &msm8996_vddsscx_vfc,
+};
+
+static const struct rpmpd_desc msm8996_desc = {
+ .rpmpds = msm8996_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8996_rpmpds),
+};
+
+static const struct of_device_id rpmpd_match_table[] = {
+ { .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
+ { }
+};
+
+static int rpmpd_send_enable(struct rpmpd *pd, bool enable)
+{
+ struct rpmpd_req req = {
+ .key = KEY_ENABLE,
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(enable),
+ };
+
+ return qcom_rpm_smd_write(pd->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ pd->res_type, pd->res_id, &req, sizeof(req));
+}
+
+static int rpmpd_send_corner(struct rpmpd *pd, int state, unsigned int corner)
+{
+ struct rpmpd_req req = {
+ .key = pd->key,
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(corner),
+ };
+
+ return qcom_rpm_smd_write(pd->rpm, state, pd->res_type, pd->res_id,
+ &req, sizeof(req));
+};
+
+static void to_active_sleep(struct rpmpd *pd, unsigned int corner,
+ unsigned int *active, unsigned int *sleep)
+{
+ *active = corner;
+
+ if (pd->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+static int rpmpd_aggregate_corner(struct rpmpd *pd)
+{
+ int ret;
+ struct rpmpd *peer = pd->peer;
+ unsigned int active_corner, sleep_corner;
+ unsigned int this_active_corner = 0, this_sleep_corner = 0;
+ unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
+
+ to_active_sleep(pd, pd->corner, &this_active_corner, &this_sleep_corner);
+
+ if (peer && peer->enabled)
+ to_active_sleep(peer, peer->corner, &peer_active_corner,
+ &peer_sleep_corner);
+
+ active_corner = max(this_active_corner, peer_active_corner);
+
+ ret = rpmpd_send_corner(pd, QCOM_SMD_RPM_ACTIVE_STATE, active_corner);
+ if (ret)
+ return ret;
+
+ sleep_corner = max(this_sleep_corner, peer_sleep_corner);
+
+ return rpmpd_send_corner(pd, QCOM_SMD_RPM_SLEEP_STATE, sleep_corner);
+}
+
+static int rpmpd_power_on(struct generic_pm_domain *domain)
+{
+ int ret;
+ struct rpmpd *pd = domain_to_rpmpd(domain);
+
+ mutex_lock(&rpmpd_lock);
+
+ ret = rpmpd_send_enable(pd, true);
+ if (ret)
+ goto out;
+
+ pd->enabled = true;
+
+ if (pd->corner)
+ ret = rpmpd_aggregate_corner(pd);
+
+out:
+ mutex_unlock(&rpmpd_lock);
+
+ return ret;
+}
+
+static int rpmpd_power_off(struct generic_pm_domain *domain)
+{
+ int ret;
+ struct rpmpd *pd = domain_to_rpmpd(domain);
+
+ mutex_lock(&rpmpd_lock);
+
+ ret = rpmpd_send_enable(pd, false);
+ if (!ret)
+ pd->enabled = false;
+
+ mutex_unlock(&rpmpd_lock);
+
+ return ret;
+}
+
+static int rpmpd_set_performance(struct generic_pm_domain *domain,
+ unsigned int state)
+{
+ int ret = 0;
+ struct rpmpd *pd = domain_to_rpmpd(domain);
+
+ if (state > MAX_RPMPD_STATE)
+ goto out;
+
+ mutex_lock(&rpmpd_lock);
+
+ pd->corner = state;
+
+ if (!pd->enabled && pd->key != KEY_FLOOR_CORNER)
+ goto out;
+
+ ret = rpmpd_aggregate_corner(pd);
+
+out:
+ mutex_unlock(&rpmpd_lock);
+
+ return ret;
+}
+
+static unsigned int rpmpd_get_performance(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_level(opp);
+}
+
+static int rpmpd_probe(struct platform_device *pdev)
+{
+ int i;
+ size_t num;
+ struct genpd_onecell_data *data;
+ struct qcom_smd_rpm *rpm;
+ struct rpmpd **rpmpds;
+ const struct rpmpd_desc *desc;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
+ return -ENODEV;
+ }
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ rpmpds = desc->rpmpds;
+ num = desc->num_pds;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->domains = devm_kcalloc(&pdev->dev, num, sizeof(*data->domains),
+ GFP_KERNEL);
+ data->num_domains = num;
+
+ for (i = 0; i < num; i++) {
+ if (!rpmpds[i]) {
+ dev_warn(&pdev->dev, "rpmpds[] with empty entry at index=%d\n",
+ i);
+ continue;
+ }
+
+ rpmpds[i]->rpm = rpm;
+ rpmpds[i]->pd.power_off = rpmpd_power_off;
+ rpmpds[i]->pd.power_on = rpmpd_power_on;
+ rpmpds[i]->pd.set_performance_state = rpmpd_set_performance;
+ rpmpds[i]->pd.opp_to_performance_state = rpmpd_get_performance;
+ pm_genpd_init(&rpmpds[i]->pd, NULL, true);
+
+ data->domains[i] = &rpmpds[i]->pd;
+ }
+
+ return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
+}
+
+static struct platform_driver rpmpd_driver = {
+ .driver = {
+ .name = "qcom-rpmpd",
+ .of_match_table = rpmpd_match_table,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rpmpd_probe,
+};
+
+static int __init rpmpd_init(void)
+{
+ return platform_driver_register(&rpmpd_driver);
+}
+core_initcall(rpmpd_init);
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index b8e63724a49d..9956bb2c63f2 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -227,6 +227,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-msm8974" },
{ .compatible = "qcom,rpm-msm8996" },
{ .compatible = "qcom,rpm-msm8998" },
+ { .compatible = "qcom,rpm-sdm660" },
{ .compatible = "qcom,rpm-qcs404" },
{}
};