summaryrefslogtreecommitdiff
path: root/drivers/cpufreq/scmi-cpufreq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/scmi-cpufreq.c')
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c101
1 files changed, 95 insertions, 6 deletions
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 4ee23f4ebf4a..b8fe758aeb01 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -16,6 +16,7 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/scmi_protocol.h>
#include <linux/types.h>
@@ -26,10 +27,13 @@ struct scmi_data {
int nr_opp;
struct device *cpu_dev;
cpumask_var_t opp_shared_cpus;
+ struct notifier_block limit_notify_nb;
+ struct freq_qos_request limits_freq_req;
};
static struct scmi_protocol_handle *ph;
static const struct scmi_perf_proto_ops *perf_ops;
+static struct cpufreq_driver scmi_cpufreq_driver;
static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
{
@@ -62,9 +66,9 @@ static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct scmi_data *priv = policy->driver_data;
+ unsigned long freq = target_freq;
- if (!perf_ops->freq_set(ph, priv->domain_id,
- target_freq * 1000, true))
+ if (!perf_ops->freq_set(ph, priv->domain_id, freq * 1000, true))
return target_freq;
return 0;
@@ -144,6 +148,51 @@ scmi_get_cpu_power(struct device *cpu_dev, unsigned long *power,
return 0;
}
+static int
+scmi_get_rate_limit(u32 domain, bool has_fast_switch)
+{
+ int ret, rate_limit;
+
+ if (has_fast_switch) {
+ /*
+ * Fast channels are used whenever available,
+ * so use their rate_limit value if populated.
+ */
+ ret = perf_ops->fast_switch_rate_limit(ph, domain,
+ &rate_limit);
+ if (!ret && rate_limit)
+ return rate_limit;
+ }
+
+ ret = perf_ops->rate_limit_get(ph, domain, &rate_limit);
+ if (ret)
+ return 0;
+
+ return rate_limit;
+}
+
+static struct freq_attr *scmi_cpufreq_hw_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+ NULL,
+};
+
+static int scmi_limit_notify_cb(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct scmi_data *priv = container_of(nb, struct scmi_data, limit_notify_nb);
+ struct scmi_perf_limits_report *limit_notify = data;
+ unsigned int limit_freq_khz;
+ int ret;
+
+ limit_freq_khz = limit_notify->range_max_freq / HZ_PER_KHZ;
+
+ ret = freq_qos_update_request(&priv->limits_freq_req, limit_freq_khz);
+ if (ret < 0)
+ pr_warn("failed to update freq constraint: %d\n", ret);
+
+ return NOTIFY_OK;
+}
+
static int scmi_cpufreq_init(struct cpufreq_policy *policy)
{
int ret, nr_opp, domain;
@@ -151,6 +200,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
struct device *cpu_dev;
struct scmi_data *priv;
struct cpufreq_frequency_table *freq_table;
+ struct scmi_device *sdev = cpufreq_get_driver_data();
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
@@ -250,8 +300,41 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->fast_switch_possible =
perf_ops->fast_switch_possible(ph, domain);
+ policy->transition_delay_us =
+ scmi_get_rate_limit(domain, policy->fast_switch_possible);
+
+ if (policy_has_boost_freq(policy)) {
+ ret = cpufreq_enable_boost_support();
+ if (ret) {
+ dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
+ goto out_free_table;
+ } else {
+ scmi_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
+ scmi_cpufreq_driver.boost_enabled = true;
+ }
+ }
+
+ ret = freq_qos_add_request(&policy->constraints, &priv->limits_freq_req, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ if (ret < 0) {
+ dev_err(cpu_dev, "failed to add qos limits request: %d\n", ret);
+ goto out_free_table;
+ }
+
+ priv->limit_notify_nb.notifier_call = scmi_limit_notify_cb;
+ ret = sdev->handle->notify_ops->event_notifier_register(sdev->handle, SCMI_PROTOCOL_PERF,
+ SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+ &priv->domain_id,
+ &priv->limit_notify_nb);
+ if (ret)
+ dev_warn(&sdev->dev,
+ "failed to register for limits change notifier for domain %d\n",
+ priv->domain_id);
+
return 0;
+out_free_table:
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_free_opp:
dev_pm_opp_remove_all_dynamic(cpu_dev);
@@ -264,16 +347,20 @@ out_free_priv:
return ret;
}
-static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
+static void scmi_cpufreq_exit(struct cpufreq_policy *policy)
{
struct scmi_data *priv = policy->driver_data;
+ struct scmi_device *sdev = cpufreq_get_driver_data();
+ sdev->handle->notify_ops->event_notifier_unregister(sdev->handle, SCMI_PROTOCOL_PERF,
+ SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+ &priv->domain_id,
+ &priv->limit_notify_nb);
+ freq_qos_remove_request(&priv->limits_freq_req);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
free_cpumask_var(priv->opp_shared_cpus);
kfree(priv);
-
- return 0;
}
static void scmi_cpufreq_register_em(struct cpufreq_policy *policy)
@@ -308,7 +395,7 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
- .attr = cpufreq_generic_attr,
+ .attr = scmi_cpufreq_hw_attr,
.target_index = scmi_cpufreq_set_target,
.fast_switch = scmi_cpufreq_fast_switch,
.get = scmi_cpufreq_get_rate,
@@ -328,6 +415,8 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
if (!handle)
return -ENODEV;
+ scmi_cpufreq_driver.driver_data = sdev;
+
perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph);
if (IS_ERR(perf_ops))
return PTR_ERR(perf_ops);