summaryrefslogtreecommitdiff
path: root/drivers/interconnect/qcom/icc-rpm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/interconnect/qcom/icc-rpm.c')
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c220
1 files changed, 113 insertions, 107 deletions
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index 6acc7686ed38..3209d8de709b 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -3,7 +3,6 @@
* Copyright (C) 2020 Linaro Ltd
*/
-#include <linux/clk.h>
#include <linux/device.h>
#include <linux/interconnect-provider.h>
#include <linux/io.h>
@@ -14,7 +13,6 @@
#include <linux/regmap.h>
#include <linux/slab.h>
-#include "smd-rpm.h"
#include "icc-common.h"
#include "icc-rpm.h"
@@ -50,6 +48,8 @@
#define NOC_QOS_MODE_FIXED_VAL 0x0
#define NOC_QOS_MODE_BYPASS_VAL 0x2
+#define ICC_BUS_CLK_MIN_RATE 19200ULL /* kHz */
+
static int qcom_icc_set_qnoc_qos(struct icc_node *src)
{
struct icc_provider *provider = src->provider;
@@ -204,34 +204,39 @@ static int qcom_icc_qos_set(struct icc_node *node)
}
}
-static int qcom_icc_rpm_set(struct qcom_icc_node *qn, u64 sum_bw)
+static int qcom_icc_rpm_set(struct qcom_icc_node *qn, u64 *bw)
{
- int ret = 0;
+ int ret, rpm_ctx = 0;
+ u64 bw_bps;
if (qn->qos.ap_owned)
return 0;
- if (qn->mas_rpm_id != -1) {
- ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
- RPM_BUS_MASTER_REQ,
- qn->mas_rpm_id,
- sum_bw);
- if (ret) {
- pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
- qn->mas_rpm_id, ret);
- return ret;
+ for (rpm_ctx = 0; rpm_ctx < QCOM_SMD_RPM_STATE_NUM; rpm_ctx++) {
+ bw_bps = icc_units_to_bps(bw[rpm_ctx]);
+
+ if (qn->mas_rpm_id != -1) {
+ ret = qcom_icc_rpm_smd_send(rpm_ctx,
+ RPM_BUS_MASTER_REQ,
+ qn->mas_rpm_id,
+ bw_bps);
+ if (ret) {
+ pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
+ qn->mas_rpm_id, ret);
+ return ret;
+ }
}
- }
- if (qn->slv_rpm_id != -1) {
- ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
- RPM_BUS_SLAVE_REQ,
- qn->slv_rpm_id,
- sum_bw);
- if (ret) {
- pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
- qn->slv_rpm_id, ret);
- return ret;
+ if (qn->slv_rpm_id != -1) {
+ ret = qcom_icc_rpm_smd_send(rpm_ctx,
+ RPM_BUS_SLAVE_REQ,
+ qn->slv_rpm_id,
+ bw_bps);
+ if (ret) {
+ pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
+ qn->slv_rpm_id, ret);
+ return ret;
+ }
}
}
@@ -248,7 +253,7 @@ static void qcom_icc_pre_bw_aggregate(struct icc_node *node)
size_t i;
qn = node->data;
- for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
+ for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) {
qn->sum_avg[i] = 0;
qn->max_peak[i] = 0;
}
@@ -272,9 +277,9 @@ static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
qn = node->data;
if (!tag)
- tag = QCOM_ICC_TAG_ALWAYS;
+ tag = RPM_ALWAYS_TAG;
- for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
+ for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) {
if (tag & BIT(i)) {
qn->sum_avg[i] += avg_bw;
qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
@@ -287,61 +292,45 @@ static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
}
/**
- * qcom_icc_bus_aggregate - aggregate bandwidth by traversing all nodes
+ * qcom_icc_bus_aggregate - calculate bus clock rates by traversing all nodes
* @provider: generic interconnect provider
- * @agg_avg: an array for aggregated average bandwidth of buckets
- * @agg_peak: an array for aggregated peak bandwidth of buckets
- * @max_agg_avg: pointer to max value of aggregated average bandwidth
+ * @agg_clk_rate: array containing the aggregated clock rates in kHz
*/
-static void qcom_icc_bus_aggregate(struct icc_provider *provider,
- u64 *agg_avg, u64 *agg_peak,
- u64 *max_agg_avg)
+static void qcom_icc_bus_aggregate(struct icc_provider *provider, u64 *agg_clk_rate)
{
- struct icc_node *node;
+ u64 agg_avg_rate, agg_rate;
struct qcom_icc_node *qn;
- u64 sum_avg[QCOM_ICC_NUM_BUCKETS];
+ struct icc_node *node;
int i;
- /* Initialise aggregate values */
- for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
- agg_avg[i] = 0;
- agg_peak[i] = 0;
- }
-
- *max_agg_avg = 0;
-
/*
- * Iterate nodes on the interconnect and aggregate bandwidth
- * requests for every bucket.
+ * Iterate nodes on the provider, aggregate bandwidth requests for
+ * every bucket and convert them into bus clock rates.
*/
list_for_each_entry(node, &provider->nodes, node_list) {
qn = node->data;
- for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
+ for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) {
if (qn->channels)
- sum_avg[i] = div_u64(qn->sum_avg[i], qn->channels);
+ agg_avg_rate = div_u64(qn->sum_avg[i], qn->channels);
else
- sum_avg[i] = qn->sum_avg[i];
- agg_avg[i] += sum_avg[i];
- agg_peak[i] = max_t(u64, agg_peak[i], qn->max_peak[i]);
+ agg_avg_rate = qn->sum_avg[i];
+
+ agg_rate = max_t(u64, agg_avg_rate, qn->max_peak[i]);
+ do_div(agg_rate, qn->buswidth);
+
+ agg_clk_rate[i] = max_t(u64, agg_clk_rate[i], agg_rate);
}
}
-
- /* Find maximum values across all buckets */
- for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++)
- *max_agg_avg = max_t(u64, *max_agg_avg, agg_avg[i]);
}
static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
{
- struct qcom_icc_provider *qp;
struct qcom_icc_node *src_qn = NULL, *dst_qn = NULL;
+ u64 agg_clk_rate[QCOM_SMD_RPM_STATE_NUM] = { 0 };
struct icc_provider *provider;
- u64 sum_bw;
- u64 rate;
- u64 agg_avg[QCOM_ICC_NUM_BUCKETS], agg_peak[QCOM_ICC_NUM_BUCKETS];
- u64 max_agg_avg;
- int ret, i;
- int bucket;
+ struct qcom_icc_provider *qp;
+ u64 active_rate, sleep_rate;
+ int ret;
src_qn = src->data;
if (dst)
@@ -349,56 +338,66 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
provider = src->provider;
qp = to_qcom_provider(provider);
- qcom_icc_bus_aggregate(provider, agg_avg, agg_peak, &max_agg_avg);
-
- sum_bw = icc_units_to_bps(max_agg_avg);
+ qcom_icc_bus_aggregate(provider, agg_clk_rate);
+ active_rate = agg_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE];
+ sleep_rate = agg_clk_rate[QCOM_SMD_RPM_SLEEP_STATE];
- ret = qcom_icc_rpm_set(src_qn, sum_bw);
+ ret = qcom_icc_rpm_set(src_qn, src_qn->sum_avg);
if (ret)
return ret;
if (dst_qn) {
- ret = qcom_icc_rpm_set(dst_qn, sum_bw);
+ ret = qcom_icc_rpm_set(dst_qn, dst_qn->sum_avg);
+ if (ret)
+ return ret;
+ }
+
+ /* Some providers don't have a bus clock to scale */
+ if (!qp->bus_clk_desc && !qp->bus_clk)
+ return 0;
+
+ /*
+ * Downstream checks whether the requested rate is zero, but it makes little sense
+ * to vote for a value that's below the lower threshold, so let's not do so.
+ */
+ if (qp->keep_alive)
+ active_rate = max(ICC_BUS_CLK_MIN_RATE, active_rate);
+
+ /* Some providers have a non-RPM-owned bus clock - convert kHz->Hz for the CCF */
+ if (qp->bus_clk) {
+ active_rate = max_t(u64, active_rate, sleep_rate);
+ /* ARM32 caps clk_set_rate arg to u32.. Nothing we can do about that! */
+ active_rate = min_t(u64, 1000ULL * active_rate, ULONG_MAX);
+ return clk_set_rate(qp->bus_clk, active_rate);
+ }
+
+ /* RPM only accepts <=INT_MAX rates */
+ active_rate = min_t(u64, active_rate, INT_MAX);
+ sleep_rate = min_t(u64, sleep_rate, INT_MAX);
+
+ if (active_rate != qp->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE]) {
+ ret = qcom_icc_rpm_set_bus_rate(qp->bus_clk_desc, QCOM_SMD_RPM_ACTIVE_STATE,
+ active_rate);
if (ret)
return ret;
+
+ /* Cache the rate after we've successfully commited it to RPM */
+ qp->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE] = active_rate;
}
- for (i = 0; i < qp->num_bus_clks; i++) {
- /*
- * Use WAKE bucket for active clock, otherwise, use SLEEP bucket
- * for other clocks. If a platform doesn't set interconnect
- * path tags, by default use sleep bucket for all clocks.
- *
- * Note, AMC bucket is not supported yet.
- */
- if (!strcmp(qp->bus_clks[i].id, "bus_a"))
- bucket = QCOM_ICC_BUCKET_WAKE;
- else
- bucket = QCOM_ICC_BUCKET_SLEEP;
-
- rate = icc_units_to_bps(max(agg_avg[bucket], agg_peak[bucket]));
- do_div(rate, src_qn->buswidth);
- rate = min_t(u64, rate, LONG_MAX);
-
- if (qp->bus_clk_rate[i] == rate)
- continue;
-
- ret = clk_set_rate(qp->bus_clks[i].clk, rate);
- if (ret) {
- pr_err("%s clk_set_rate error: %d\n",
- qp->bus_clks[i].id, ret);
+ if (sleep_rate != qp->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE]) {
+ ret = qcom_icc_rpm_set_bus_rate(qp->bus_clk_desc, QCOM_SMD_RPM_SLEEP_STATE,
+ sleep_rate);
+ if (ret)
return ret;
- }
- qp->bus_clk_rate[i] = rate;
+
+ /* Cache the rate after we've successfully commited it to RPM */
+ qp->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE] = sleep_rate;
}
return 0;
}
-static const char * const bus_clocks[] = {
- "bus", "bus_a",
-};
-
int qnoc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -440,6 +439,20 @@ int qnoc_probe(struct platform_device *pdev)
if (!qp->intf_clks)
return -ENOMEM;
+ if (desc->bus_clk_desc) {
+ qp->bus_clk_desc = devm_kzalloc(dev, sizeof(*qp->bus_clk_desc),
+ GFP_KERNEL);
+ if (!qp->bus_clk_desc)
+ return -ENOMEM;
+
+ qp->bus_clk_desc = desc->bus_clk_desc;
+ } else {
+ /* Some older SoCs may have a single non-RPM-owned bus clock. */
+ qp->bus_clk = devm_clk_get_optional(dev, "bus");
+ if (IS_ERR(qp->bus_clk))
+ return PTR_ERR(qp->bus_clk);
+ }
+
data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
GFP_KERNEL);
if (!data)
@@ -449,10 +462,7 @@ int qnoc_probe(struct platform_device *pdev)
for (i = 0; i < cd_num; i++)
qp->intf_clks[i].id = cds[i];
- qp->num_bus_clks = desc->no_clk_scaling ? 0 : NUM_BUS_CLKS;
- for (i = 0; i < qp->num_bus_clks; i++)
- qp->bus_clks[i].id = bus_clocks[i];
-
+ qp->keep_alive = desc->keep_alive;
qp->type = desc->type;
qp->qos_offset = desc->qos_offset;
@@ -481,11 +491,7 @@ int qnoc_probe(struct platform_device *pdev)
}
regmap_done:
- ret = devm_clk_bulk_get(dev, qp->num_bus_clks, qp->bus_clks);
- if (ret)
- return ret;
-
- ret = clk_bulk_prepare_enable(qp->num_bus_clks, qp->bus_clks);
+ ret = clk_prepare_enable(qp->bus_clk);
if (ret)
return ret;
@@ -557,7 +563,7 @@ err_deregister_provider:
icc_provider_deregister(provider);
err_remove_nodes:
icc_nodes_remove(provider);
- clk_bulk_disable_unprepare(qp->num_bus_clks, qp->bus_clks);
+ clk_disable_unprepare(qp->bus_clk);
return ret;
}
@@ -569,7 +575,7 @@ int qnoc_remove(struct platform_device *pdev)
icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
- clk_bulk_disable_unprepare(qp->num_bus_clks, qp->bus_clks);
+ clk_disable_unprepare(qp->bus_clk);
return 0;
}