summaryrefslogtreecommitdiff
path: root/drivers/firmware/arm_scmi/perf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firmware/arm_scmi/perf.c')
-rw-r--r--drivers/firmware/arm_scmi/perf.c1171
1 files changed, 999 insertions, 172 deletions
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 3c8ae7cc35de..683fd9b85c5c 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -2,15 +2,32 @@
/*
* System Control and Management Interface (SCMI) Performance Protocol
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2023 ARM Ltd.
*/
+#define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
+
+#include <linux/bits.h>
+#include <linux/hashtable.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
+#include <linux/scmi_protocol.h>
#include <linux/sort.h>
+#include <linux/xarray.h>
+
+#include <trace/events/scmi.h>
+
+#include "protocols.h"
+#include "notify.h"
-#include "common.h"
+/* Updated only after ALL the mandatory features for that version are merged */
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x40000
+
+#define MAX_OPPS 32
enum scmi_performance_protocol_cmd {
PERF_DOMAIN_ATTRIBUTES = 0x3,
@@ -21,18 +38,30 @@ enum scmi_performance_protocol_cmd {
PERF_LEVEL_GET = 0x8,
PERF_NOTIFY_LIMITS = 0x9,
PERF_NOTIFY_LEVEL = 0xa,
+ PERF_DESCRIBE_FASTCHANNEL = 0xb,
+ PERF_DOMAIN_NAME_GET = 0xc,
+};
+
+enum {
+ PERF_FC_LEVEL,
+ PERF_FC_LIMIT,
+ PERF_FC_MAX,
};
struct scmi_opp {
u32 perf;
u32 power;
u32 trans_latency_us;
+ u32 indicative_freq;
+ u32 level_index;
+ struct hlist_node hash;
};
struct scmi_msg_resp_perf_attributes {
__le16 num_domains;
__le16 flags;
#define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0))
+#define POWER_SCALE_IN_MICROWATT(x) ((x) & BIT(1))
__le32 stats_addr_low;
__le32 stats_addr_high;
__le32 stats_size;
@@ -44,10 +73,13 @@ struct scmi_msg_resp_perf_domain_attributes {
#define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30))
#define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
#define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
+#define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27))
+#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(26))
+#define SUPPORTS_LEVEL_INDEXING(x) ((x) & BIT(25))
__le32 rate_limit_us;
__le32 sustained_freq_khz;
__le32 sustained_perf_level;
- u8 name[SCMI_MAX_STR_SIZE];
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
};
struct scmi_msg_perf_describe_levels {
@@ -76,6 +108,19 @@ struct scmi_perf_notify_level_or_limits {
__le32 notify_enable;
};
+struct scmi_perf_limits_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 range_max;
+ __le32 range_min;
+};
+
+struct scmi_perf_level_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 performance_level;
+};
+
struct scmi_msg_resp_perf_describe_levels {
__le16 num_returned;
__le16 num_remaining;
@@ -84,100 +129,212 @@ struct scmi_msg_resp_perf_describe_levels {
__le32 power;
__le16 transition_latency_us;
__le16 reserved;
- } opp[0];
+ } opp[];
+};
+
+struct scmi_msg_resp_perf_describe_levels_v4 {
+ __le16 num_returned;
+ __le16 num_remaining;
+ struct {
+ __le32 perf_val;
+ __le32 power;
+ __le16 transition_latency_us;
+ __le16 reserved;
+ __le32 indicative_freq;
+ __le32 level_index;
+ } opp[];
};
struct perf_dom_info {
+ u32 id;
bool set_limits;
- bool set_perf;
bool perf_limit_notify;
bool perf_level_notify;
+ bool perf_fastchannels;
+ bool level_indexing_mode;
u32 opp_count;
+ u32 rate_limit_us;
u32 sustained_freq_khz;
u32 sustained_perf_level;
- u32 mult_factor;
- char name[SCMI_MAX_STR_SIZE];
+ unsigned long mult_factor;
+ struct scmi_perf_domain_info info;
struct scmi_opp opp[MAX_OPPS];
+ struct scmi_fc_info *fc_info;
+ struct xarray opps_by_idx;
+ struct xarray opps_by_lvl;
+ DECLARE_HASHTABLE(opps_by_freq, ilog2(MAX_OPPS));
};
+#define LOOKUP_BY_FREQ(__htp, __freq) \
+({ \
+ /* u32 cast is needed to pick right hash func */ \
+ u32 f_ = (u32)(__freq); \
+ struct scmi_opp *_opp; \
+ \
+ hash_for_each_possible((__htp), _opp, hash, f_) \
+ if (_opp->indicative_freq == f_) \
+ break; \
+ _opp; \
+})
+
struct scmi_perf_info {
- int num_domains;
- bool power_scale_mw;
+ u32 version;
+ u16 num_domains;
+ enum scmi_power_scale power_scale;
u64 stats_addr;
u32 stats_size;
+ bool notify_lvl_cmd;
+ bool notify_lim_cmd;
struct perf_dom_info *dom_info;
};
-static int scmi_perf_attributes_get(const struct scmi_handle *handle,
+static enum scmi_performance_protocol_cmd evt_2_cmd[] = {
+ PERF_NOTIFY_LIMITS,
+ PERF_NOTIFY_LEVEL,
+};
+
+static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
struct scmi_perf_info *pi)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_perf_attributes *attr;
- ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
- SCMI_PROTOCOL_PERF, 0, sizeof(*attr), &t);
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(*attr), &t);
if (ret)
return ret;
attr = t->rx.buf;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
u16 flags = le16_to_cpu(attr->flags);
pi->num_domains = le16_to_cpu(attr->num_domains);
- pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags);
+
+ if (POWER_SCALE_IN_MILLIWATT(flags))
+ pi->power_scale = SCMI_POWER_MILLIWATTS;
+ if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3)
+ if (POWER_SCALE_IN_MICROWATT(flags))
+ pi->power_scale = SCMI_POWER_MICROWATTS;
+
pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
(u64)le32_to_cpu(attr->stats_addr_high) << 32;
pi->stats_size = le32_to_cpu(attr->stats_size);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
+
+ if (!ret) {
+ if (!ph->hops->protocol_msg_check(ph, PERF_NOTIFY_LEVEL, NULL))
+ pi->notify_lvl_cmd = true;
+
+ if (!ph->hops->protocol_msg_check(ph, PERF_NOTIFY_LIMITS, NULL))
+ pi->notify_lim_cmd = true;
+ }
+
return ret;
}
+static void scmi_perf_xa_destroy(void *data)
+{
+ int domain;
+ struct scmi_perf_info *pinfo = data;
+
+ for (domain = 0; domain < pinfo->num_domains; domain++) {
+ xa_destroy(&((pinfo->dom_info + domain)->opps_by_idx));
+ xa_destroy(&((pinfo->dom_info + domain)->opps_by_lvl));
+ }
+}
+
static int
-scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
- struct perf_dom_info *dom_info)
+scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ struct perf_dom_info *dom_info,
+ bool notify_lim_cmd, bool notify_lvl_cmd,
+ u32 version)
{
int ret;
+ u32 flags;
struct scmi_xfer *t;
struct scmi_msg_resp_perf_domain_attributes *attr;
- ret = scmi_xfer_get_init(handle, PERF_DOMAIN_ATTRIBUTES,
- SCMI_PROTOCOL_PERF, sizeof(domain),
- sizeof(*attr), &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_DOMAIN_ATTRIBUTES,
+ sizeof(dom_info->id), sizeof(*attr), &t);
if (ret)
return ret;
- *(__le32 *)t->tx.buf = cpu_to_le32(domain);
+ put_unaligned_le32(dom_info->id, t->tx.buf);
attr = t->rx.buf;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
- u32 flags = le32_to_cpu(attr->flags);
+ flags = le32_to_cpu(attr->flags);
dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
- dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
- dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
- dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
+ dom_info->info.set_perf = SUPPORTS_SET_PERF_LVL(flags);
+ if (notify_lim_cmd)
+ dom_info->perf_limit_notify =
+ SUPPORTS_PERF_LIMIT_NOTIFY(flags);
+ if (notify_lvl_cmd)
+ dom_info->perf_level_notify =
+ SUPPORTS_PERF_LEVEL_NOTIFY(flags);
+ dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
+ if (PROTOCOL_REV_MAJOR(version) >= 0x4)
+ dom_info->level_indexing_mode =
+ SUPPORTS_LEVEL_INDEXING(flags);
+ dom_info->rate_limit_us = le32_to_cpu(attr->rate_limit_us) &
+ GENMASK(19, 0);
dom_info->sustained_freq_khz =
le32_to_cpu(attr->sustained_freq_khz);
dom_info->sustained_perf_level =
le32_to_cpu(attr->sustained_perf_level);
+ /*
+ * sustained_freq_khz = mult_factor * sustained_perf_level
+ * mult_factor must be non zero positive integer(not fraction)
+ */
if (!dom_info->sustained_freq_khz ||
- !dom_info->sustained_perf_level)
+ !dom_info->sustained_perf_level ||
+ dom_info->level_indexing_mode) {
/* CPUFreq converts to kHz, hence default 1000 */
dom_info->mult_factor = 1000;
- else
+ } else {
dom_info->mult_factor =
- (dom_info->sustained_freq_khz * 1000) /
- dom_info->sustained_perf_level;
- strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+ (dom_info->sustained_freq_khz * 1000UL)
+ / dom_info->sustained_perf_level;
+ if ((dom_info->sustained_freq_khz * 1000UL) %
+ dom_info->sustained_perf_level)
+ dev_warn(ph->dev,
+ "multiplier for domain %d rounded\n",
+ dom_info->id);
+ }
+ if (!dom_info->mult_factor)
+ dev_warn(ph->dev,
+ "Wrong sustained perf/frequency(domain %d)\n",
+ dom_info->id);
+
+ strscpy(dom_info->info.name, attr->name,
+ SCMI_SHORT_NAME_MAX_SIZE);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ /*
+ * If supported overwrite short name with the extended one;
+ * on error just carry on and use already provided short name.
+ */
+ if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
+ SUPPORTS_EXTENDED_NAMES(flags))
+ ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET,
+ dom_info->id, NULL, dom_info->info.name,
+ SCMI_MAX_STR_SIZE);
+
+ xa_init(&dom_info->opps_by_lvl);
+ if (dom_info->level_indexing_mode) {
+ xa_init(&dom_info->opps_by_idx);
+ hash_init(dom_info->opps_by_freq);
}
- scmi_xfer_put(handle, t);
return ret;
}
@@ -188,76 +345,202 @@ static int opp_cmp_func(const void *opp1, const void *opp2)
return t1->perf - t2->perf;
}
+struct scmi_perf_ipriv {
+ u32 version;
+ struct perf_dom_info *perf_dom;
+};
+
+static void iter_perf_levels_prepare_message(void *message,
+ unsigned int desc_index,
+ const void *priv)
+{
+ struct scmi_msg_perf_describe_levels *msg = message;
+ const struct scmi_perf_ipriv *p = priv;
+
+ msg->domain = cpu_to_le32(p->perf_dom->id);
+ /* Set the number of OPPs to be skipped/already read */
+ msg->level_index = cpu_to_le32(desc_index);
+}
+
+static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ const struct scmi_msg_resp_perf_describe_levels *r = response;
+
+ st->num_returned = le16_to_cpu(r->num_returned);
+ st->num_remaining = le16_to_cpu(r->num_remaining);
+
+ return 0;
+}
+
+static inline int
+process_response_opp(struct device *dev, struct perf_dom_info *dom,
+ struct scmi_opp *opp, unsigned int loop_idx,
+ const struct scmi_msg_resp_perf_describe_levels *r)
+{
+ int ret;
+
+ opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
+ opp->power = le32_to_cpu(r->opp[loop_idx].power);
+ opp->trans_latency_us =
+ le16_to_cpu(r->opp[loop_idx].transition_latency_us);
+
+ ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+ if (ret) {
+ dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
+ opp->perf, dom->info.name, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int
+process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
+ struct scmi_opp *opp, unsigned int loop_idx,
+ const struct scmi_msg_resp_perf_describe_levels_v4 *r)
+{
+ int ret;
+
+ opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
+ opp->power = le32_to_cpu(r->opp[loop_idx].power);
+ opp->trans_latency_us =
+ le16_to_cpu(r->opp[loop_idx].transition_latency_us);
+
+ ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+ if (ret) {
+ dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
+ opp->perf, dom->info.name, ret);
+ return ret;
+ }
+
+ /* Note that PERF v4 reports always five 32-bit words */
+ opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
+ if (dom->level_indexing_mode) {
+ opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index);
+
+ ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
+ GFP_KERNEL);
+ if (ret) {
+ dev_warn(dev,
+ "Failed to add opps_by_idx at %d for %s - ret:%d\n",
+ opp->level_index, dom->info.name, ret);
+
+ /* Cleanup by_lvl too */
+ xa_erase(&dom->opps_by_lvl, opp->perf);
+
+ return ret;
+ }
+
+ hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
+ }
+
+ return 0;
+}
+
static int
-scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
- struct perf_dom_info *perf_dom)
+iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *priv)
{
- int ret, cnt;
- u32 tot_opp_cnt = 0;
- u16 num_returned, num_remaining;
- struct scmi_xfer *t;
+ int ret;
struct scmi_opp *opp;
- struct scmi_msg_perf_describe_levels *dom_info;
- struct scmi_msg_resp_perf_describe_levels *level_info;
+ struct scmi_perf_ipriv *p = priv;
+
+ opp = &p->perf_dom->opp[p->perf_dom->opp_count];
+ if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
+ ret = process_response_opp(ph->dev, p->perf_dom, opp,
+ st->loop_idx, response);
+ else
+ ret = process_response_opp_v4(ph->dev, p->perf_dom, opp,
+ st->loop_idx, response);
- ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_LEVELS,
- SCMI_PROTOCOL_PERF, sizeof(*dom_info), 0, &t);
+ /* Skip BAD duplicates received from firmware */
+ if (ret)
+ return ret == -EBUSY ? 0 : ret;
+
+ p->perf_dom->opp_count++;
+
+ dev_dbg(ph->dev, "Level %d Power %d Latency %dus Ifreq %d Index %d\n",
+ opp->perf, opp->power, opp->trans_latency_us,
+ opp->indicative_freq, opp->level_index);
+
+ return 0;
+}
+
+static int
+scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph,
+ struct perf_dom_info *perf_dom, u32 version)
+{
+ int ret;
+ void *iter;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_perf_levels_prepare_message,
+ .update_state = iter_perf_levels_update_state,
+ .process_response = iter_perf_levels_process_response,
+ };
+ struct scmi_perf_ipriv ppriv = {
+ .version = version,
+ .perf_dom = perf_dom,
+ };
+
+ iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS,
+ PERF_DESCRIBE_LEVELS,
+ sizeof(struct scmi_msg_perf_describe_levels),
+ &ppriv);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ ret = ph->hops->iter_response_run(iter);
if (ret)
return ret;
- dom_info = t->tx.buf;
- level_info = t->rx.buf;
+ if (perf_dom->opp_count)
+ sort(perf_dom->opp, perf_dom->opp_count,
+ sizeof(struct scmi_opp), opp_cmp_func, NULL);
- do {
- dom_info->domain = cpu_to_le32(domain);
- /* Set the number of OPPs to be skipped/already read */
- dom_info->level_index = cpu_to_le32(tot_opp_cnt);
+ return ret;
+}
- ret = scmi_do_xfer(handle, t);
- if (ret)
- break;
+static int scmi_perf_num_domains_get(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_perf_info *pi = ph->get_priv(ph);
- num_returned = le16_to_cpu(level_info->num_returned);
- num_remaining = le16_to_cpu(level_info->num_remaining);
- if (tot_opp_cnt + num_returned > MAX_OPPS) {
- dev_err(handle->dev, "No. of OPPs exceeded MAX_OPPS");
- break;
- }
+ return pi->num_domains;
+}
- opp = &perf_dom->opp[tot_opp_cnt];
- for (cnt = 0; cnt < num_returned; cnt++, opp++) {
- opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val);
- opp->power = le32_to_cpu(level_info->opp[cnt].power);
- opp->trans_latency_us = le16_to_cpu
- (level_info->opp[cnt].transition_latency_us);
+static inline struct perf_dom_info *
+scmi_perf_domain_lookup(const struct scmi_protocol_handle *ph, u32 domain)
+{
+ struct scmi_perf_info *pi = ph->get_priv(ph);
- dev_dbg(handle->dev, "Level %d Power %d Latency %dus\n",
- opp->perf, opp->power, opp->trans_latency_us);
- }
+ if (domain >= pi->num_domains)
+ return ERR_PTR(-EINVAL);
- tot_opp_cnt += num_returned;
- /*
- * check for both returned and remaining to avoid infinite
- * loop due to buggy firmware
- */
- } while (num_returned && num_remaining);
+ return pi->dom_info + domain;
+}
- perf_dom->opp_count = tot_opp_cnt;
- scmi_xfer_put(handle, t);
+static const struct scmi_perf_domain_info *
+scmi_perf_info_get(const struct scmi_protocol_handle *ph, u32 domain)
+{
+ struct perf_dom_info *dom;
- sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
- return ret;
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return ERR_PTR(-EINVAL);
+
+ return &dom->info;
}
-static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
- u32 max_perf, u32 min_perf)
+static int scmi_perf_msg_limits_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 max_perf, u32 min_perf)
{
int ret;
struct scmi_xfer *t;
struct scmi_perf_set_limits *limits;
- ret = scmi_xfer_get_init(handle, PERF_LIMITS_SET, SCMI_PROTOCOL_PERF,
- sizeof(*limits), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_SET,
+ sizeof(*limits), 0, &t);
if (ret)
return ret;
@@ -266,27 +549,84 @@ static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
limits->max_level = cpu_to_le32(max_perf);
limits->min_level = cpu_to_le32(min_perf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
- u32 *max_perf, u32 *min_perf)
+static int __scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
+ struct perf_dom_info *dom, u32 max_perf,
+ u32 min_perf)
+{
+ if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].set_addr) {
+ struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
+
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_SET,
+ dom->id, min_perf, max_perf);
+ iowrite32(max_perf, fci->set_addr);
+ iowrite32(min_perf, fci->set_addr + 4);
+ ph->hops->fastchannel_db_ring(fci->set_db);
+ return 0;
+ }
+
+ return scmi_perf_msg_limits_set(ph, dom->id, max_perf, min_perf);
+}
+
+static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 max_perf, u32 min_perf)
+{
+ struct scmi_perf_info *pi = ph->get_priv(ph);
+ struct perf_dom_info *dom;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
+
+ if (!dom->set_limits)
+ return -EOPNOTSUPP;
+
+ if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf)
+ return -EINVAL;
+
+ if (dom->level_indexing_mode) {
+ struct scmi_opp *opp;
+
+ if (min_perf) {
+ opp = xa_load(&dom->opps_by_lvl, min_perf);
+ if (!opp)
+ return -EIO;
+
+ min_perf = opp->level_index;
+ }
+
+ if (max_perf) {
+ opp = xa_load(&dom->opps_by_lvl, max_perf);
+ if (!opp)
+ return -EIO;
+
+ max_perf = opp->level_index;
+ }
+ }
+
+ return __scmi_perf_limits_set(ph, dom, max_perf, min_perf);
+}
+
+static int scmi_perf_msg_limits_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *max_perf, u32 *min_perf)
{
int ret;
struct scmi_xfer *t;
struct scmi_perf_get_limits *limits;
- ret = scmi_xfer_get_init(handle, PERF_LIMITS_GET, SCMI_PROTOCOL_PERF,
- sizeof(__le32), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_GET,
+ sizeof(__le32), 0, &t);
if (ret)
return ret;
- *(__le32 *)t->tx.buf = cpu_to_le32(domain);
+ put_unaligned_le32(domain, t->tx.buf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
limits = t->rx.buf;
@@ -294,19 +634,68 @@ static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
*min_perf = le32_to_cpu(limits->min_level);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
- u32 level, bool poll)
+static int __scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
+ struct perf_dom_info *dom, u32 *max_perf,
+ u32 *min_perf)
+{
+ if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) {
+ struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
+
+ *max_perf = ioread32(fci->get_addr);
+ *min_perf = ioread32(fci->get_addr + 4);
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_GET,
+ dom->id, *min_perf, *max_perf);
+ return 0;
+ }
+
+ return scmi_perf_msg_limits_get(ph, dom->id, max_perf, min_perf);
+}
+
+static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *max_perf, u32 *min_perf)
+{
+ int ret;
+ struct perf_dom_info *dom;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
+
+ ret = __scmi_perf_limits_get(ph, dom, max_perf, min_perf);
+ if (ret)
+ return ret;
+
+ if (dom->level_indexing_mode) {
+ struct scmi_opp *opp;
+
+ opp = xa_load(&dom->opps_by_idx, *min_perf);
+ if (!opp)
+ return -EIO;
+
+ *min_perf = opp->perf;
+
+ opp = xa_load(&dom->opps_by_idx, *max_perf);
+ if (!opp)
+ return -EIO;
+
+ *max_perf = opp->perf;
+ }
+
+ return 0;
+}
+
+static int scmi_perf_msg_level_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 level, bool poll)
{
int ret;
struct scmi_xfer *t;
struct scmi_perf_set_level *lvl;
- ret = scmi_xfer_get_init(handle, PERF_LEVEL_SET, SCMI_PROTOCOL_PERF,
- sizeof(*lvl), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_SET, sizeof(*lvl), 0, &t);
if (ret)
return ret;
@@ -315,133 +704,319 @@ static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
lvl->domain = cpu_to_le32(domain);
lvl->level = cpu_to_le32(level);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
- u32 *level, bool poll)
+static int __scmi_perf_level_set(const struct scmi_protocol_handle *ph,
+ struct perf_dom_info *dom, u32 level,
+ bool poll)
+{
+ if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) {
+ struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL];
+
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_SET,
+ dom->id, level, 0);
+ iowrite32(level, fci->set_addr);
+ ph->hops->fastchannel_db_ring(fci->set_db);
+ return 0;
+ }
+
+ return scmi_perf_msg_level_set(ph, dom->id, level, poll);
+}
+
+static int scmi_perf_level_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 level, bool poll)
+{
+ struct perf_dom_info *dom;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
+
+ if (!dom->info.set_perf)
+ return -EOPNOTSUPP;
+
+ if (dom->level_indexing_mode) {
+ struct scmi_opp *opp;
+
+ opp = xa_load(&dom->opps_by_lvl, level);
+ if (!opp)
+ return -EIO;
+
+ level = opp->level_index;
+ }
+
+ return __scmi_perf_level_set(ph, dom, level, poll);
+}
+
+static int scmi_perf_msg_level_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *level, bool poll)
{
int ret;
struct scmi_xfer *t;
- ret = scmi_xfer_get_init(handle, PERF_LEVEL_GET, SCMI_PROTOCOL_PERF,
- sizeof(u32), sizeof(u32), &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_GET,
+ sizeof(u32), sizeof(u32), &t);
if (ret)
return ret;
t->hdr.poll_completion = poll;
- *(__le32 *)t->tx.buf = cpu_to_le32(domain);
+ put_unaligned_le32(domain, t->tx.buf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
- *level = le32_to_cpu(*(__le32 *)t->rx.buf);
+ *level = get_unaligned_le32(t->rx.buf);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-/* Device specific ops */
-static int scmi_dev_domain_id(struct device *dev)
+static int __scmi_perf_level_get(const struct scmi_protocol_handle *ph,
+ struct perf_dom_info *dom, u32 *level,
+ bool poll)
{
- struct of_phandle_args clkspec;
+ if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) {
+ *level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr);
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_GET,
+ dom->id, *level, 0);
+ return 0;
+ }
- if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
- 0, &clkspec))
- return -EINVAL;
+ return scmi_perf_msg_level_get(ph, dom->id, level, poll);
+}
+
+static int scmi_perf_level_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *level, bool poll)
+{
+ int ret;
+ struct perf_dom_info *dom;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
- return clkspec.args[0];
+ ret = __scmi_perf_level_get(ph, dom, level, poll);
+ if (ret)
+ return ret;
+
+ if (dom->level_indexing_mode) {
+ struct scmi_opp *opp;
+
+ opp = xa_load(&dom->opps_by_idx, *level);
+ if (!opp)
+ return -EIO;
+
+ *level = opp->perf;
+ }
+
+ return 0;
}
-static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
- struct device *dev)
+static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph,
+ u32 domain, int message_id,
+ bool enable)
{
- int idx, ret, domain;
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_perf_notify_level_or_limits *notify;
+
+ ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->domain = cpu_to_le32(domain);
+ notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
+ struct perf_dom_info *dom)
+{
+ struct scmi_fc_info *fc;
+
+ fc = devm_kcalloc(ph->dev, PERF_FC_MAX, sizeof(*fc), GFP_KERNEL);
+ if (!fc)
+ return;
+
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LEVEL_GET, 4, dom->id,
+ &fc[PERF_FC_LEVEL].get_addr, NULL,
+ &fc[PERF_FC_LEVEL].rate_limit);
+
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LIMITS_GET, 8, dom->id,
+ &fc[PERF_FC_LIMIT].get_addr, NULL,
+ &fc[PERF_FC_LIMIT].rate_limit);
+
+ if (dom->info.set_perf)
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LEVEL_SET, 4, dom->id,
+ &fc[PERF_FC_LEVEL].set_addr,
+ &fc[PERF_FC_LEVEL].set_db,
+ &fc[PERF_FC_LEVEL].rate_limit);
+
+ if (dom->set_limits)
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LIMITS_SET, 8, dom->id,
+ &fc[PERF_FC_LIMIT].set_addr,
+ &fc[PERF_FC_LIMIT].set_db,
+ &fc[PERF_FC_LIMIT].rate_limit);
+
+ dom->fc_info = fc;
+}
+
+static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
+ struct device *dev, u32 domain)
+{
+ int idx, ret;
unsigned long freq;
- struct scmi_opp *opp;
+ struct dev_pm_opp_data data = {};
struct perf_dom_info *dom;
- struct scmi_perf_info *pi = handle->perf_priv;
- domain = scmi_dev_domain_id(dev);
- if (domain < 0)
- return domain;
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
- dom = pi->dom_info + domain;
+ for (idx = 0; idx < dom->opp_count; idx++) {
+ if (!dom->level_indexing_mode)
+ freq = dom->opp[idx].perf * dom->mult_factor;
+ else
+ freq = dom->opp[idx].indicative_freq * dom->mult_factor;
- for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
- freq = opp->perf * dom->mult_factor;
+ /* All OPPs above the sustained frequency are treated as turbo */
+ data.turbo = freq > dom->sustained_freq_khz * 1000UL;
- ret = dev_pm_opp_add(dev, freq, 0);
- if (ret) {
- dev_warn(dev, "failed to add opp %luHz\n", freq);
+ data.level = dom->opp[idx].perf;
+ data.freq = freq;
- while (idx-- > 0) {
- freq = (--opp)->perf * dom->mult_factor;
- dev_pm_opp_remove(dev, freq);
- }
+ ret = dev_pm_opp_add_dynamic(dev, &data);
+ if (ret) {
+ dev_warn(dev, "[%d][%s]: Failed to add OPP[%d] %lu\n",
+ domain, dom->info.name, idx, freq);
+ dev_pm_opp_remove_all_dynamic(dev);
return ret;
}
+
+ dev_dbg(dev, "[%d][%s]:: Registered OPP[%d] %lu\n",
+ domain, dom->info.name, idx, freq);
}
return 0;
}
-static int scmi_dvfs_transition_latency_get(const struct scmi_handle *handle,
- struct device *dev)
+static int
+scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph,
+ u32 domain)
{
struct perf_dom_info *dom;
- struct scmi_perf_info *pi = handle->perf_priv;
- int domain = scmi_dev_domain_id(dev);
- if (domain < 0)
- return domain;
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
- dom = pi->dom_info + domain;
/* uS to nS */
return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
}
-static int scmi_dvfs_freq_set(const struct scmi_handle *handle, u32 domain,
+static int
+scmi_dvfs_rate_limit_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *rate_limit)
+{
+ struct perf_dom_info *dom;
+
+ if (!rate_limit)
+ return -EINVAL;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
+
+ *rate_limit = dom->rate_limit_us;
+ return 0;
+}
+
+static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long freq, bool poll)
{
- struct scmi_perf_info *pi = handle->perf_priv;
- struct perf_dom_info *dom = pi->dom_info + domain;
+ unsigned int level;
+ struct perf_dom_info *dom;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
- return scmi_perf_level_set(handle, domain, freq / dom->mult_factor,
- poll);
+ if (!dom->level_indexing_mode) {
+ level = freq / dom->mult_factor;
+ } else {
+ struct scmi_opp *opp;
+
+ opp = LOOKUP_BY_FREQ(dom->opps_by_freq,
+ freq / dom->mult_factor);
+ if (!opp)
+ return -EIO;
+
+ level = opp->level_index;
+ }
+
+ return __scmi_perf_level_set(ph, dom, level, poll);
}
-static int scmi_dvfs_freq_get(const struct scmi_handle *handle, u32 domain,
+static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long *freq, bool poll)
{
int ret;
u32 level;
- struct scmi_perf_info *pi = handle->perf_priv;
- struct perf_dom_info *dom = pi->dom_info + domain;
+ struct perf_dom_info *dom;
- ret = scmi_perf_level_get(handle, domain, &level, poll);
- if (!ret)
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
+
+ ret = __scmi_perf_level_get(ph, dom, &level, poll);
+ if (ret)
+ return ret;
+
+ if (!dom->level_indexing_mode) {
*freq = level * dom->mult_factor;
+ } else {
+ struct scmi_opp *opp;
+
+ opp = xa_load(&dom->opps_by_idx, level);
+ if (!opp)
+ return -EIO;
+
+ *freq = opp->indicative_freq * dom->mult_factor;
+ }
return ret;
}
-static int scmi_dvfs_est_power_get(const struct scmi_handle *handle, u32 domain,
- unsigned long *freq, unsigned long *power)
+static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
+ u32 domain, unsigned long *freq,
+ unsigned long *power)
{
- struct scmi_perf_info *pi = handle->perf_priv;
struct perf_dom_info *dom;
unsigned long opp_freq;
int idx, ret = -EINVAL;
struct scmi_opp *opp;
- dom = pi->dom_info + domain;
- if (!dom)
- return -EIO;
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
- opp_freq = opp->perf * dom->mult_factor;
+ if (!dom->level_indexing_mode)
+ opp_freq = opp->perf * dom->mult_factor;
+ else
+ opp_freq = opp->indicative_freq * dom->mult_factor;
+
if (opp_freq < *freq)
continue;
@@ -454,37 +1029,279 @@ static int scmi_dvfs_est_power_get(const struct scmi_handle *handle, u32 domain,
return ret;
}
-static struct scmi_perf_ops perf_ops = {
+static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
+ u32 domain)
+{
+ struct perf_dom_info *dom;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return false;
+
+ return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr;
+}
+
+static int scmi_fast_switch_rate_limit(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *rate_limit)
+{
+ struct perf_dom_info *dom;
+
+ if (!rate_limit)
+ return -EINVAL;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
+
+ if (!dom->fc_info)
+ return -EINVAL;
+
+ *rate_limit = dom->fc_info[PERF_FC_LEVEL].rate_limit;
+ return 0;
+}
+
+static enum scmi_power_scale
+scmi_power_scale_get(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_perf_info *pi = ph->get_priv(ph);
+
+ return pi->power_scale;
+}
+
+static const struct scmi_perf_proto_ops perf_proto_ops = {
+ .num_domains_get = scmi_perf_num_domains_get,
+ .info_get = scmi_perf_info_get,
.limits_set = scmi_perf_limits_set,
.limits_get = scmi_perf_limits_get,
.level_set = scmi_perf_level_set,
.level_get = scmi_perf_level_get,
- .device_domain_id = scmi_dev_domain_id,
.transition_latency_get = scmi_dvfs_transition_latency_get,
+ .rate_limit_get = scmi_dvfs_rate_limit_get,
.device_opps_add = scmi_dvfs_device_opps_add,
.freq_set = scmi_dvfs_freq_set,
.freq_get = scmi_dvfs_freq_get,
.est_power_get = scmi_dvfs_est_power_get,
+ .fast_switch_possible = scmi_fast_switch_possible,
+ .fast_switch_rate_limit = scmi_fast_switch_rate_limit,
+ .power_scale_get = scmi_power_scale_get,
};
-static int scmi_perf_protocol_init(struct scmi_handle *handle)
+static bool scmi_perf_notify_supported(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id)
{
- int domain;
+ bool supported;
+ struct perf_dom_info *dom;
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd))
+ return false;
+
+ dom = scmi_perf_domain_lookup(ph, src_id);
+ if (IS_ERR(dom))
+ return false;
+
+ if (evt_id == SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED)
+ supported = dom->perf_limit_notify;
+ else
+ supported = dom->perf_level_notify;
+
+ return supported;
+}
+
+static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret, cmd_id;
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd))
+ return -EINVAL;
+
+ cmd_id = evt_2_cmd[evt_id];
+ ret = scmi_perf_level_limits_notify(ph, src_id, cmd_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static int
+scmi_perf_xlate_opp_to_freq(struct perf_dom_info *dom,
+ unsigned int index, unsigned long *freq)
+{
+ struct scmi_opp *opp;
+
+ if (!dom || !freq)
+ return -EINVAL;
+
+ if (!dom->level_indexing_mode) {
+ opp = xa_load(&dom->opps_by_lvl, index);
+ if (!opp)
+ return -ENODEV;
+
+ *freq = opp->perf * dom->mult_factor;
+ } else {
+ opp = xa_load(&dom->opps_by_idx, index);
+ if (!opp)
+ return -ENODEV;
+
+ *freq = opp->indicative_freq * dom->mult_factor;
+ }
+
+ return 0;
+}
+
+static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ int ret;
+ void *rep = NULL;
+ struct perf_dom_info *dom;
+
+ switch (evt_id) {
+ case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED:
+ {
+ const struct scmi_perf_limits_notify_payld *p = payld;
+ struct scmi_perf_limits_report *r = report;
+ unsigned long freq_min, freq_max;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->range_max = le32_to_cpu(p->range_max);
+ r->range_min = le32_to_cpu(p->range_min);
+ /* Check if the reported domain exist at all */
+ dom = scmi_perf_domain_lookup(ph, r->domain_id);
+ if (IS_ERR(dom))
+ break;
+ /*
+ * Event will be reported from this point on...
+ * ...even if, later, xlated frequencies were not retrieved.
+ */
+ *src_id = r->domain_id;
+ rep = r;
+
+ ret = scmi_perf_xlate_opp_to_freq(dom, r->range_max, &freq_max);
+ if (ret)
+ break;
+
+ ret = scmi_perf_xlate_opp_to_freq(dom, r->range_min, &freq_min);
+ if (ret)
+ break;
+
+ /* Report translated freqs ONLY if both available */
+ r->range_max_freq = freq_max;
+ r->range_min_freq = freq_min;
+
+ break;
+ }
+ case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED:
+ {
+ const struct scmi_perf_level_notify_payld *p = payld;
+ struct scmi_perf_level_report *r = report;
+ unsigned long freq;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ /* Report translated freqs ONLY if available */
+ r->performance_level = le32_to_cpu(p->performance_level);
+ /* Check if the reported domain exist at all */
+ dom = scmi_perf_domain_lookup(ph, r->domain_id);
+ if (IS_ERR(dom))
+ break;
+ /*
+ * Event will be reported from this point on...
+ * ...even if, later, xlated frequencies were not retrieved.
+ */
+ *src_id = r->domain_id;
+ rep = r;
+
+ /* Report translated freqs ONLY if available */
+ ret = scmi_perf_xlate_opp_to_freq(dom, r->performance_level,
+ &freq);
+ if (ret)
+ break;
+
+ r->performance_level_freq = freq;
+
+ break;
+ }
+ default:
+ break;
+ }
+
+ return rep;
+}
+
+static int scmi_perf_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_perf_info *pi = ph->get_priv(ph);
+
+ if (!pi)
+ return -EINVAL;
+
+ return pi->num_domains;
+}
+
+static const struct scmi_event perf_events[] = {
+ {
+ .id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+ .max_payld_sz = sizeof(struct scmi_perf_limits_notify_payld),
+ .max_report_sz = sizeof(struct scmi_perf_limits_report),
+ },
+ {
+ .id = SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED,
+ .max_payld_sz = sizeof(struct scmi_perf_level_notify_payld),
+ .max_report_sz = sizeof(struct scmi_perf_level_report),
+ },
+};
+
+static const struct scmi_event_ops perf_event_ops = {
+ .is_notify_supported = scmi_perf_notify_supported,
+ .get_num_sources = scmi_perf_get_num_sources,
+ .set_notify_enabled = scmi_perf_set_notify_enabled,
+ .fill_custom_report = scmi_perf_fill_custom_report,
+};
+
+static const struct scmi_protocol_events perf_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &perf_event_ops,
+ .evts = perf_events,
+ .num_events = ARRAY_SIZE(perf_events),
+};
+
+static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ int domain, ret;
u32 version;
struct scmi_perf_info *pinfo;
- scmi_version_get(handle, SCMI_PROTOCOL_PERF, &version);
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
- dev_dbg(handle->dev, "Performance Version %d.%d\n",
+ dev_dbg(ph->dev, "Performance Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
- pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
- scmi_perf_attributes_get(handle, pinfo);
+ pinfo->version = version;
- pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
+ ret = scmi_perf_attributes_get(ph, pinfo);
+ if (ret)
+ return ret;
+
+ pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
if (!pinfo->dom_info)
return -ENOMEM;
@@ -492,19 +1309,29 @@ static int scmi_perf_protocol_init(struct scmi_handle *handle)
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct perf_dom_info *dom = pinfo->dom_info + domain;
- scmi_perf_domain_attributes_get(handle, domain, dom);
- scmi_perf_describe_levels_get(handle, domain, dom);
+ dom->id = domain;
+ scmi_perf_domain_attributes_get(ph, dom, pinfo->notify_lim_cmd,
+ pinfo->notify_lvl_cmd, version);
+ scmi_perf_describe_levels_get(ph, dom, version);
+
+ if (dom->perf_fastchannels)
+ scmi_perf_domain_init_fc(ph, dom);
}
- handle->perf_ops = &perf_ops;
- handle->perf_priv = pinfo;
+ ret = devm_add_action_or_reset(ph->dev, scmi_perf_xa_destroy, pinfo);
+ if (ret)
+ return ret;
- return 0;
+ return ph->set_priv(ph, pinfo, version);
}
-static int __init scmi_perf_init(void)
-{
- return scmi_protocol_register(SCMI_PROTOCOL_PERF,
- &scmi_perf_protocol_init);
-}
-subsys_initcall(scmi_perf_init);
+static const struct scmi_protocol scmi_perf = {
+ .id = SCMI_PROTOCOL_PERF,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_perf_protocol_init,
+ .ops = &perf_proto_ops,
+ .events = &perf_protocol_events,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(perf, scmi_perf)