summaryrefslogtreecommitdiff
path: root/drivers/devfreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/devfreq')
-rw-r--r--drivers/devfreq/Kconfig12
-rw-r--r--drivers/devfreq/Makefile1
-rw-r--r--drivers/devfreq/devfreq-event.c12
-rw-r--r--drivers/devfreq/devfreq.c135
-rw-r--r--drivers/devfreq/event/exynos-nocp.c9
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c22
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c865
-rw-r--r--drivers/devfreq/exynos-bus.c41
-rw-r--r--drivers/devfreq/governor.h127
-rw-r--r--drivers/devfreq/governor_passive.c27
-rw-r--r--drivers/devfreq/governor_performance.c3
-rw-r--r--drivers/devfreq/governor_powersave.c3
-rw-r--r--drivers/devfreq/governor_simpleondemand.c7
-rw-r--r--drivers/devfreq/governor_userspace.c21
-rw-r--r--drivers/devfreq/hisi_uncore_freq.c658
-rw-r--r--drivers/devfreq/imx-bus.c4
-rw-r--r--drivers/devfreq/imx8m-ddrc.c2
-rw-r--r--drivers/devfreq/mtk-cci-devfreq.c24
-rw-r--r--drivers/devfreq/rk3399_dmc.c14
-rw-r--r--drivers/devfreq/sun8i-a33-mbus.c44
-rw-r--r--drivers/devfreq/tegra30-devfreq.c17
21 files changed, 1621 insertions, 427 deletions
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 9754d8b31621..c999c4a1e567 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig PM_DEVFREQ
bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
- select SRCU
select PM_OPP
help
A device may have a list of frequencies and voltages available.
@@ -91,6 +90,17 @@ config ARM_EXYNOS_BUS_DEVFREQ
and adjusts the operating frequencies and voltages with OPP support.
This does not yet operate with optimal voltages.
+config ARM_HISI_UNCORE_DEVFREQ
+ tristate "HiSilicon uncore DEVFREQ Driver"
+ depends on ACPI && ACPI_PPTT && PCC
+ select DEVFREQ_GOV_PERFORMANCE
+ select DEVFREQ_GOV_USERSPACE
+ help
+ This adds a DEVFREQ driver that manages uncore frequency scaling for
+ HiSilicon Kunpeng SoCs. This enables runtime management of uncore
+ frequency scaling from kernel and userspace. The uncore domain
+ contains system interconnects and L3 cache.
+
config ARM_IMX_BUS_DEVFREQ
tristate "i.MX Generic Bus DEVFREQ Driver"
depends on ARCH_MXC || COMPILE_TEST
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index bf40d04928d0..404179d79a9d 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o
+obj-$(CONFIG_ARM_HISI_UNCORE_DEVFREQ) += hisi_uncore_freq.o
obj-$(CONFIG_ARM_IMX_BUS_DEVFREQ) += imx-bus.o
obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o
obj-$(CONFIG_ARM_MEDIATEK_CCI_DEVFREQ) += mtk-cci-devfreq.o
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index 6765c03334bc..70219099c604 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -233,7 +233,7 @@ struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
mutex_lock(&devfreq_event_list_lock);
list_for_each_entry(edev, &devfreq_event_list, node) {
- if (edev->dev.parent && edev->dev.parent->of_node == node)
+ if (edev->dev.parent && device_match_of_node(edev->dev.parent, node))
goto out;
}
@@ -244,13 +244,9 @@ struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
edev = NULL;
out:
mutex_unlock(&devfreq_event_list_lock);
-
- if (!edev) {
- of_node_put(node);
- return ERR_PTR(-ENODEV);
- }
-
of_node_put(node);
+ if (!edev)
+ return ERR_PTR(-ENODEV);
return edev;
}
@@ -469,7 +465,7 @@ ATTRIBUTE_GROUPS(devfreq_event);
static int __init devfreq_event_init(void)
{
- devfreq_event_class = class_create(THIS_MODULE, "devfreq-event");
+ devfreq_event_class = class_create("devfreq-event");
if (IS_ERR(devfreq_event_class)) {
pr_err("%s: couldn't create class\n", __FILE__);
return PTR_ERR(devfreq_event_class);
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 63347a5ae599..00979f2e0e27 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -20,6 +20,7 @@
#include <linux/stat.h>
#include <linux/pm_opp.h>
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/list.h>
@@ -28,7 +29,6 @@
#include <linux/of.h>
#include <linux/pm_qos.h>
#include <linux/units.h>
-#include "governor.h"
#define CREATE_TRACE_POINTS
#include <trace/events/devfreq.h>
@@ -88,7 +88,7 @@ static unsigned long find_available_min_freq(struct devfreq *devfreq)
struct dev_pm_opp *opp;
unsigned long min_freq = 0;
- opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq);
+ opp = dev_pm_opp_find_freq_ceil_indexed(devfreq->dev.parent, &min_freq, 0);
if (IS_ERR(opp))
min_freq = 0;
else
@@ -102,7 +102,7 @@ static unsigned long find_available_max_freq(struct devfreq *devfreq)
struct dev_pm_opp *opp;
unsigned long max_freq = ULONG_MAX;
- opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq);
+ opp = dev_pm_opp_find_freq_floor_indexed(devfreq->dev.parent, &max_freq, 0);
if (IS_ERR(opp))
max_freq = 0;
else
@@ -152,11 +152,8 @@ void devfreq_get_freq_range(struct devfreq *devfreq,
(unsigned long)HZ_PER_KHZ * qos_max_freq);
/* Apply constraints from OPP interface */
- *min_freq = max(*min_freq, devfreq->scaling_min_freq);
- *max_freq = min(*max_freq, devfreq->scaling_max_freq);
-
- if (*min_freq > *max_freq)
- *min_freq = *max_freq;
+ *max_freq = clamp(*max_freq, devfreq->scaling_min_freq, devfreq->scaling_max_freq);
+ *min_freq = clamp(*min_freq, devfreq->scaling_min_freq, *max_freq);
}
EXPORT_SYMBOL(devfreq_get_freq_range);
@@ -196,7 +193,7 @@ static int set_freq_table(struct devfreq *devfreq)
return -ENOMEM;
for (i = 0, freq = 0; i < devfreq->max_state; i++, freq++) {
- opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
+ opp = dev_pm_opp_find_freq_ceil_indexed(devfreq->dev.parent, &freq, 0);
if (IS_ERR(opp)) {
devm_kfree(devfreq->dev.parent, devfreq->freq_table);
return PTR_ERR(opp);
@@ -461,10 +458,14 @@ static void devfreq_monitor(struct work_struct *work)
if (err)
dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
+ if (devfreq->stop_polling)
+ goto out;
+
queue_delayed_work(devfreq_wq, &devfreq->work,
msecs_to_jiffies(devfreq->profile->polling_ms));
- mutex_unlock(&devfreq->lock);
+out:
+ mutex_unlock(&devfreq->lock);
trace_devfreq_monitor(devfreq);
}
@@ -472,16 +473,21 @@ static void devfreq_monitor(struct work_struct *work)
* devfreq_monitor_start() - Start load monitoring of devfreq instance
* @devfreq: the devfreq instance.
*
- * Helper function for starting devfreq device load monitoring. By
- * default delayed work based monitoring is supported. Function
- * to be called from governor in response to DEVFREQ_GOV_START
- * event when device is added to devfreq framework.
+ * Helper function for starting devfreq device load monitoring. By default,
+ * deferrable timer is used for load monitoring. But the users can change this
+ * behavior using the "timer" type in devfreq_dev_profile. This function will be
+ * called by devfreq governor in response to the DEVFREQ_GOV_START event
+ * generated while adding a device to the devfreq framework.
*/
void devfreq_monitor_start(struct devfreq *devfreq)
{
if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
return;
+ mutex_lock(&devfreq->lock);
+ if (delayed_work_pending(&devfreq->work))
+ goto out;
+
switch (devfreq->profile->timer) {
case DEVFREQ_TIMER_DEFERRABLE:
INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
@@ -490,12 +496,16 @@ void devfreq_monitor_start(struct devfreq *devfreq)
INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor);
break;
default:
- return;
+ goto out;
}
if (devfreq->profile->polling_ms)
queue_delayed_work(devfreq_wq, &devfreq->work,
msecs_to_jiffies(devfreq->profile->polling_ms));
+
+out:
+ devfreq->stop_polling = false;
+ mutex_unlock(&devfreq->lock);
}
EXPORT_SYMBOL(devfreq_monitor_start);
@@ -512,6 +522,14 @@ void devfreq_monitor_stop(struct devfreq *devfreq)
if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
return;
+ mutex_lock(&devfreq->lock);
+ if (devfreq->stop_polling) {
+ mutex_unlock(&devfreq->lock);
+ return;
+ }
+
+ devfreq->stop_polling = true;
+ mutex_unlock(&devfreq->lock);
cancel_delayed_work_sync(&devfreq->work);
}
EXPORT_SYMBOL(devfreq_monitor_stop);
@@ -763,6 +781,7 @@ static void devfreq_dev_release(struct device *dev)
dev_pm_opp_put_opp_table(devfreq->opp_table);
mutex_destroy(&devfreq->lock);
+ srcu_cleanup_notifier_head(&devfreq->transition_notifier_list);
kfree(devfreq);
}
@@ -776,8 +795,7 @@ static void remove_sysfs_files(struct devfreq *devfreq,
* @dev: the device to add devfreq feature.
* @profile: device-specific profile to run devfreq.
* @governor_name: name of the policy to choose frequency.
- * @data: private data for the governor. The devfreq framework does not
- * touch this value.
+ * @data: devfreq driver pass to governors, governor should not change it.
*/
struct devfreq *devfreq_add_device(struct device *dev,
struct devfreq_dev_profile *profile,
@@ -786,7 +804,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
{
struct devfreq *devfreq;
struct devfreq_governor *governor;
- unsigned long min_freq, max_freq;
int err = 0;
if (!dev || !profile || !governor_name) {
@@ -814,6 +831,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
mutex_lock(&devfreq->lock);
devfreq->dev.parent = dev;
devfreq->dev.class = devfreq_class;
+ devfreq->dev.groups = profile->dev_groups;
devfreq->dev.release = devfreq_dev_release;
INIT_LIST_HEAD(&devfreq->node);
devfreq->profile = profile;
@@ -854,8 +872,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_dev;
}
- devfreq_get_freq_range(devfreq, &min_freq, &max_freq);
-
devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
devfreq->opp_table = dev_pm_opp_get_opp_table(dev);
if (IS_ERR(devfreq->opp_table))
@@ -1011,8 +1027,7 @@ static void devm_devfreq_dev_release(struct device *dev, void *res)
* @dev: the device to add devfreq feature.
* @profile: device-specific profile to run devfreq.
* @governor_name: name of the policy to choose frequency.
- * @data: private data for the governor. The devfreq framework does not
- * touch this value.
+ * @data: devfreq driver pass to governors, governor should not change it.
*
* This function manages automatically the memory of devfreq device using device
* resource management and simplify the free operation for memory of devfreq
@@ -1059,7 +1074,7 @@ struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
mutex_lock(&devfreq_list_lock);
list_for_each_entry(devfreq, &devfreq_list, node) {
if (devfreq->dev.parent
- && devfreq->dev.parent->of_node == node) {
+ && device_match_of_node(devfreq->dev.parent, node)) {
mutex_unlock(&devfreq_list_lock);
return devfreq;
}
@@ -1362,15 +1377,11 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
int ret;
struct device *dev = devfreq->dev.parent;
+ if (!devfreq->governor)
+ continue;
+
if (!strncmp(devfreq->governor->name, governor->name,
DEVFREQ_NAME_LEN)) {
- /* we should have a devfreq governor! */
- if (!devfreq->governor) {
- dev_warn(dev, "%s: Governor %s NOT present\n",
- __func__, governor->name);
- continue;
- /* Fall through */
- }
ret = devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_STOP, NULL);
if (ret) {
@@ -1688,7 +1699,7 @@ static ssize_t trans_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct devfreq *df = to_devfreq(dev);
- ssize_t len;
+ ssize_t len = 0;
int i, j;
unsigned int max_state;
@@ -1697,7 +1708,7 @@ static ssize_t trans_stat_show(struct device *dev,
max_state = df->max_state;
if (max_state == 0)
- return sprintf(buf, "Not Supported.\n");
+ return sysfs_emit(buf, "Not Supported.\n");
mutex_lock(&df->lock);
if (!df->stop_polling &&
@@ -1707,31 +1718,49 @@ static ssize_t trans_stat_show(struct device *dev,
}
mutex_unlock(&df->lock);
- len = sprintf(buf, " From : To\n");
- len += sprintf(buf + len, " :");
- for (i = 0; i < max_state; i++)
- len += sprintf(buf + len, "%10lu",
- df->freq_table[i]);
+ len += sysfs_emit_at(buf, len, " From : To\n");
+ len += sysfs_emit_at(buf, len, " :");
+ for (i = 0; i < max_state; i++) {
+ if (len >= PAGE_SIZE - 1)
+ break;
+ len += sysfs_emit_at(buf, len, "%10lu",
+ df->freq_table[i]);
+ }
- len += sprintf(buf + len, " time(ms)\n");
+ if (len >= PAGE_SIZE - 1)
+ return PAGE_SIZE - 1;
+ len += sysfs_emit_at(buf, len, " time(ms)\n");
for (i = 0; i < max_state; i++) {
+ if (len >= PAGE_SIZE - 1)
+ break;
if (df->freq_table[i] == df->previous_freq)
- len += sprintf(buf + len, "*");
+ len += sysfs_emit_at(buf, len, "*");
else
- len += sprintf(buf + len, " ");
-
- len += sprintf(buf + len, "%10lu:", df->freq_table[i]);
- for (j = 0; j < max_state; j++)
- len += sprintf(buf + len, "%10u",
+ len += sysfs_emit_at(buf, len, " ");
+ if (len >= PAGE_SIZE - 1)
+ break;
+ len += sysfs_emit_at(buf, len, "%10lu:", df->freq_table[i]);
+ for (j = 0; j < max_state; j++) {
+ if (len >= PAGE_SIZE - 1)
+ break;
+ len += sysfs_emit_at(buf, len, "%10u",
df->stats.trans_table[(i * max_state) + j]);
+ }
+ if (len >= PAGE_SIZE - 1)
+ break;
+ len += sysfs_emit_at(buf, len, "%10llu\n", (u64)
+ jiffies64_to_msecs(df->stats.time_in_state[i]));
+ }
- len += sprintf(buf + len, "%10llu\n", (u64)
- jiffies64_to_msecs(df->stats.time_in_state[i]));
+ if (len < PAGE_SIZE - 1)
+ len += sysfs_emit_at(buf, len, "Total transition : %u\n",
+ df->stats.total_trans);
+ if (len >= PAGE_SIZE - 1) {
+ pr_warn_once("devfreq transition table exceeds PAGE_SIZE. Disabling\n");
+ return -EFBIG;
}
- len += sprintf(buf + len, "Total transition : %u\n",
- df->stats.total_trans);
return len;
}
@@ -1990,7 +2019,7 @@ DEFINE_SHOW_ATTRIBUTE(devfreq_summary);
static int __init devfreq_init(void)
{
- devfreq_class = class_create(THIS_MODULE, "devfreq");
+ devfreq_class = class_create("devfreq");
if (IS_ERR(devfreq_class)) {
pr_err("%s: couldn't create class\n", __FILE__);
return PTR_ERR(devfreq_class);
@@ -2036,18 +2065,18 @@ struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
/* The freq is an upper bound. opp should be lower */
- opp = dev_pm_opp_find_freq_floor(dev, freq);
+ opp = dev_pm_opp_find_freq_floor_indexed(dev, freq, 0);
/* If not available, use the closest opp */
if (opp == ERR_PTR(-ERANGE))
- opp = dev_pm_opp_find_freq_ceil(dev, freq);
+ opp = dev_pm_opp_find_freq_ceil_indexed(dev, freq, 0);
} else {
/* The freq is an lower bound. opp should be higher */
- opp = dev_pm_opp_find_freq_ceil(dev, freq);
+ opp = dev_pm_opp_find_freq_ceil_indexed(dev, freq, 0);
/* If not available, use the closest opp */
if (opp == ERR_PTR(-ERANGE))
- opp = dev_pm_opp_find_freq_floor(dev, freq);
+ opp = dev_pm_opp_find_freq_floor_indexed(dev, freq, 0);
}
return opp;
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index ccc531ee6938..6a3efd782ad0 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -214,8 +214,7 @@ static int exynos_nocp_parse_dt(struct platform_device *pdev,
nocp->clk = NULL;
/* Maps the memory mapped IO to control nocp register */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -276,18 +275,16 @@ static int exynos_nocp_probe(struct platform_device *pdev)
return 0;
}
-static int exynos_nocp_remove(struct platform_device *pdev)
+static void exynos_nocp_remove(struct platform_device *pdev)
{
struct exynos_nocp *nocp = platform_get_drvdata(pdev);
clk_disable_unprepare(nocp->clk);
-
- return 0;
}
static struct platform_driver exynos_nocp_driver = {
.probe = exynos_nocp_probe,
- .remove = exynos_nocp_remove,
+ .remove = exynos_nocp_remove,
.driver = {
.name = "exynos-nocp",
.of_match_table = exynos_nocp_id_match,
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index a443e7c42daf..88cd4dfe87e1 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -12,9 +12,9 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/suspend.h>
#include <linux/devfreq-event.h>
@@ -507,7 +507,6 @@ static int of_get_devfreq_events(struct device_node *np,
struct device *dev = info->dev;
struct device_node *events_np, *node;
int i, j, count;
- const struct of_device_id *of_id;
int ret;
events_np = of_get_child_by_name(np, "events");
@@ -525,13 +524,7 @@ static int of_get_devfreq_events(struct device_node *np,
}
info->num_events = count;
- of_id = of_match_device(exynos_ppmu_id_match, dev);
- if (of_id)
- info->ppmu_type = (enum exynos_ppmu_type)of_id->data;
- else {
- of_node_put(events_np);
- return -EINVAL;
- }
+ info->ppmu_type = (enum exynos_ppmu_type)device_get_match_data(dev);
j = 0;
for_each_child_of_node(events_np, node) {
@@ -621,8 +614,7 @@ static int exynos_ppmu_parse_dt(struct platform_device *pdev,
}
/* Maps the memory mapped IO to control PPMU register */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -700,18 +692,16 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
return 0;
}
-static int exynos_ppmu_remove(struct platform_device *pdev)
+static void exynos_ppmu_remove(struct platform_device *pdev)
{
struct exynos_ppmu *info = platform_get_drvdata(pdev);
clk_disable_unprepare(info->ppmu.clk);
-
- return 0;
}
static struct platform_driver exynos_ppmu_driver = {
.probe = exynos_ppmu_probe,
- .remove = exynos_ppmu_remove,
+ .remove = exynos_ppmu_remove,
.driver = {
.name = "exynos-ppmu",
.of_match_table = exynos_ppmu_id_match,
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 39ac069cabc7..5e6e7e900bda 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -16,30 +16,69 @@
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/list.h>
+#include <linux/seqlock.h>
#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/bitfield.h>
+#include <linux/hw_bitfield.h>
+#include <linux/bits.h>
+#include <linux/perf_event.h>
+#include <soc/rockchip/rockchip_grf.h>
#include <soc/rockchip/rk3399_grf.h>
+#include <soc/rockchip/rk3568_grf.h>
+#include <soc/rockchip/rk3588_grf.h>
-#define RK3399_DMC_NUM_CH 2
+#define DMC_MAX_CHANNELS 4
/* DDRMON_CTRL */
#define DDRMON_CTRL 0x04
-#define CLR_DDRMON_CTRL (0x1f0000 << 0)
-#define LPDDR4_EN (0x10001 << 4)
-#define HARDWARE_EN (0x10001 << 3)
-#define LPDDR3_EN (0x10001 << 2)
-#define SOFTWARE_EN (0x10001 << 1)
-#define SOFTWARE_DIS (0x10000 << 1)
-#define TIME_CNT_EN (0x10001 << 0)
-
+#define DDRMON_CTRL_LPDDR5 BIT(6)
+#define DDRMON_CTRL_DDR4 BIT(5)
+#define DDRMON_CTRL_LPDDR4 BIT(4)
+#define DDRMON_CTRL_HARDWARE_EN BIT(3)
+#define DDRMON_CTRL_LPDDR23 BIT(2)
+#define DDRMON_CTRL_SOFTWARE_EN BIT(1)
+#define DDRMON_CTRL_TIMER_CNT_EN BIT(0)
+#define DDRMON_CTRL_LP5_BANK_MODE_MASK GENMASK(8, 7)
+
+#define DDRMON_CH0_WR_NUM 0x20
+#define DDRMON_CH0_RD_NUM 0x24
#define DDRMON_CH0_COUNT_NUM 0x28
#define DDRMON_CH0_DFI_ACCESS_NUM 0x2c
#define DDRMON_CH1_COUNT_NUM 0x3c
#define DDRMON_CH1_DFI_ACCESS_NUM 0x40
-struct dmc_usage {
- u32 access;
- u32 total;
+#define PERF_EVENT_CYCLES 0x0
+#define PERF_EVENT_READ_BYTES 0x1
+#define PERF_EVENT_WRITE_BYTES 0x2
+#define PERF_EVENT_READ_BYTES0 0x3
+#define PERF_EVENT_WRITE_BYTES0 0x4
+#define PERF_EVENT_READ_BYTES1 0x5
+#define PERF_EVENT_WRITE_BYTES1 0x6
+#define PERF_EVENT_READ_BYTES2 0x7
+#define PERF_EVENT_WRITE_BYTES2 0x8
+#define PERF_EVENT_READ_BYTES3 0x9
+#define PERF_EVENT_WRITE_BYTES3 0xa
+#define PERF_EVENT_BYTES 0xb
+#define PERF_ACCESS_TYPE_MAX 0xc
+
+/**
+ * struct dmc_count_channel - structure to hold counter values from the DDR controller
+ * @access: Number of read and write accesses
+ * @clock_cycles: DDR clock cycles
+ * @read_access: number of read accesses
+ * @write_access: number of write accesses
+ */
+struct dmc_count_channel {
+ u64 access;
+ u64 clock_cycles;
+ u64 read_access;
+ u64 write_access;
+};
+
+struct dmc_count {
+ struct dmc_count_channel c[DMC_MAX_CHANNELS];
};
/*
@@ -49,177 +88,788 @@ struct dmc_usage {
*/
struct rockchip_dfi {
struct devfreq_event_dev *edev;
- struct devfreq_event_desc *desc;
- struct dmc_usage ch_usage[RK3399_DMC_NUM_CH];
+ struct devfreq_event_desc desc;
+ struct dmc_count last_event_count;
+
+ struct dmc_count last_perf_count;
+ struct dmc_count total_count;
+ seqlock_t count_seqlock; /* protects last_perf_count and total_count */
+
struct device *dev;
void __iomem *regs;
struct regmap *regmap_pmu;
struct clk *clk;
+ int usecount;
+ struct mutex mutex;
+ u32 ddr_type;
+ unsigned int channel_mask;
+ unsigned int max_channels;
+ enum cpuhp_state cpuhp_state;
+ struct hlist_node node;
+ struct pmu pmu;
+ struct hrtimer timer;
+ unsigned int cpu;
+ int active_events;
+ int burst_len;
+ int buswidth[DMC_MAX_CHANNELS];
+ int ddrmon_stride;
+ bool ddrmon_ctrl_single;
+ u32 lp5_bank_mode;
+ bool lp5_ckr; /* true if in 4:1 command-to-data clock ratio mode */
+ unsigned int count_multiplier; /* number of data clocks per count */
};
-static void rockchip_dfi_start_hardware_counter(struct devfreq_event_dev *edev)
+static int rockchip_dfi_ddrtype_to_ctrl(struct rockchip_dfi *dfi, u32 *ctrl)
{
- struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
- void __iomem *dfi_regs = info->regs;
- u32 val;
- u32 ddr_type;
+ u32 ddrmon_ver;
+
+ switch (dfi->ddr_type) {
+ case ROCKCHIP_DDRTYPE_LPDDR2:
+ case ROCKCHIP_DDRTYPE_LPDDR3:
+ *ctrl = FIELD_PREP_WM16(DDRMON_CTRL_LPDDR23, 1) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LPDDR4, 0) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LPDDR5, 0);
+ break;
+ case ROCKCHIP_DDRTYPE_LPDDR4:
+ case ROCKCHIP_DDRTYPE_LPDDR4X:
+ *ctrl = FIELD_PREP_WM16(DDRMON_CTRL_LPDDR23, 0) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LPDDR4, 1) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LPDDR5, 0);
+ break;
+ case ROCKCHIP_DDRTYPE_LPDDR5:
+ ddrmon_ver = readl_relaxed(dfi->regs);
+ if (ddrmon_ver < 0x40) {
+ *ctrl = FIELD_PREP_WM16(DDRMON_CTRL_LPDDR23, 0) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LPDDR4, 0) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LPDDR5, 1) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LP5_BANK_MODE_MASK,
+ dfi->lp5_bank_mode);
+ break;
+ }
- /* get ddr type */
- regmap_read(info->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
- ddr_type = (val >> RK3399_PMUGRF_DDRTYPE_SHIFT) &
- RK3399_PMUGRF_DDRTYPE_MASK;
+ /*
+ * As it is unknown whether the unpleasant special case
+ * behaviour used by the vendor kernel is needed for any
+ * shipping hardware, ask users to report if they have
+ * some of that hardware.
+ */
+ dev_err(&dfi->edev->dev,
+ "unsupported DDRMON version 0x%04X, please let linux-rockchip know!\n",
+ ddrmon_ver);
+ return -EOPNOTSUPP;
+ default:
+ dev_err(&dfi->edev->dev, "unsupported memory type 0x%X\n",
+ dfi->ddr_type);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
+{
+ void __iomem *dfi_regs = dfi->regs;
+ int i, ret = 0;
+ u32 ctrl;
+
+ mutex_lock(&dfi->mutex);
+
+ dfi->usecount++;
+ if (dfi->usecount > 1)
+ goto out;
+
+ ret = clk_prepare_enable(dfi->clk);
+ if (ret) {
+ dev_err(&dfi->edev->dev, "failed to enable dfi clk: %d\n", ret);
+ goto out;
+ }
+
+ ret = rockchip_dfi_ddrtype_to_ctrl(dfi, &ctrl);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < dfi->max_channels; i++) {
- /* clear DDRMON_CTRL setting */
- writel_relaxed(CLR_DDRMON_CTRL, dfi_regs + DDRMON_CTRL);
+ if (!(dfi->channel_mask & BIT(i)))
+ continue;
- /* set ddr type to dfi */
- if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR3)
- writel_relaxed(LPDDR3_EN, dfi_regs + DDRMON_CTRL);
- else if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR4)
- writel_relaxed(LPDDR4_EN, dfi_regs + DDRMON_CTRL);
+ /* clear DDRMON_CTRL setting */
+ writel_relaxed(FIELD_PREP_WM16(DDRMON_CTRL_TIMER_CNT_EN, 0) |
+ FIELD_PREP_WM16(DDRMON_CTRL_SOFTWARE_EN, 0) |
+ FIELD_PREP_WM16(DDRMON_CTRL_HARDWARE_EN, 0),
+ dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
- /* enable count, use software mode */
- writel_relaxed(SOFTWARE_EN, dfi_regs + DDRMON_CTRL);
+ writel_relaxed(ctrl, dfi_regs + i * dfi->ddrmon_stride +
+ DDRMON_CTRL);
+
+ /* enable count, use software mode */
+ writel_relaxed(FIELD_PREP_WM16(DDRMON_CTRL_SOFTWARE_EN, 1),
+ dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
+
+ if (dfi->ddrmon_ctrl_single)
+ break;
+ }
+out:
+ mutex_unlock(&dfi->mutex);
+
+ return ret;
}
-static void rockchip_dfi_stop_hardware_counter(struct devfreq_event_dev *edev)
+static void rockchip_dfi_disable(struct rockchip_dfi *dfi)
{
- struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
- void __iomem *dfi_regs = info->regs;
+ void __iomem *dfi_regs = dfi->regs;
+ int i;
+
+ mutex_lock(&dfi->mutex);
+
+ dfi->usecount--;
+
+ WARN_ON_ONCE(dfi->usecount < 0);
+
+ if (dfi->usecount > 0)
+ goto out;
+
+ for (i = 0; i < dfi->max_channels; i++) {
+ if (!(dfi->channel_mask & BIT(i)))
+ continue;
+
+ writel_relaxed(FIELD_PREP_WM16(DDRMON_CTRL_SOFTWARE_EN, 0),
+ dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
- writel_relaxed(SOFTWARE_DIS, dfi_regs + DDRMON_CTRL);
+ if (dfi->ddrmon_ctrl_single)
+ break;
+ }
+
+ clk_disable_unprepare(dfi->clk);
+out:
+ mutex_unlock(&dfi->mutex);
}
-static int rockchip_dfi_get_busier_ch(struct devfreq_event_dev *edev)
+static void rockchip_dfi_read_counters(struct rockchip_dfi *dfi, struct dmc_count *res)
{
- struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
- u32 tmp, max = 0;
- u32 i, busier_ch = 0;
- void __iomem *dfi_regs = info->regs;
+ u32 i;
+ void __iomem *dfi_regs = dfi->regs;
+
+ for (i = 0; i < dfi->max_channels; i++) {
+ if (!(dfi->channel_mask & BIT(i)))
+ continue;
+ res->c[i].read_access = readl_relaxed(dfi_regs +
+ DDRMON_CH0_RD_NUM + i * dfi->ddrmon_stride);
+ res->c[i].write_access = readl_relaxed(dfi_regs +
+ DDRMON_CH0_WR_NUM + i * dfi->ddrmon_stride);
+ res->c[i].access = readl_relaxed(dfi_regs +
+ DDRMON_CH0_DFI_ACCESS_NUM + i * dfi->ddrmon_stride);
+ res->c[i].clock_cycles = readl_relaxed(dfi_regs +
+ DDRMON_CH0_COUNT_NUM + i * dfi->ddrmon_stride);
+ }
+}
- rockchip_dfi_stop_hardware_counter(edev);
+static int rockchip_dfi_event_disable(struct devfreq_event_dev *edev)
+{
+ struct rockchip_dfi *dfi = devfreq_event_get_drvdata(edev);
+
+ rockchip_dfi_disable(dfi);
- /* Find out which channel is busier */
- for (i = 0; i < RK3399_DMC_NUM_CH; i++) {
- info->ch_usage[i].access = readl_relaxed(dfi_regs +
- DDRMON_CH0_DFI_ACCESS_NUM + i * 20) * 4;
- info->ch_usage[i].total = readl_relaxed(dfi_regs +
- DDRMON_CH0_COUNT_NUM + i * 20);
- tmp = info->ch_usage[i].access;
- if (tmp > max) {
- busier_ch = i;
- max = tmp;
+ return 0;
+}
+
+static int rockchip_dfi_event_enable(struct devfreq_event_dev *edev)
+{
+ struct rockchip_dfi *dfi = devfreq_event_get_drvdata(edev);
+
+ return rockchip_dfi_enable(dfi);
+}
+
+static int rockchip_dfi_set_event(struct devfreq_event_dev *edev)
+{
+ return 0;
+}
+
+static int rockchip_dfi_get_event(struct devfreq_event_dev *edev,
+ struct devfreq_event_data *edata)
+{
+ struct rockchip_dfi *dfi = devfreq_event_get_drvdata(edev);
+ struct dmc_count count;
+ struct dmc_count *last = &dfi->last_event_count;
+ u32 access = 0, clock_cycles = 0;
+ int i;
+
+ rockchip_dfi_read_counters(dfi, &count);
+
+ /* We can only report one channel, so find the busiest one */
+ for (i = 0; i < dfi->max_channels; i++) {
+ u32 a, c;
+
+ if (!(dfi->channel_mask & BIT(i)))
+ continue;
+
+ a = count.c[i].access - last->c[i].access;
+ c = count.c[i].clock_cycles - last->c[i].clock_cycles;
+
+ if (a > access) {
+ access = a;
+ clock_cycles = c;
}
}
- rockchip_dfi_start_hardware_counter(edev);
- return busier_ch;
+ edata->load_count = access * 4;
+ edata->total_count = clock_cycles;
+
+ dfi->last_event_count = count;
+
+ return 0;
+}
+
+static const struct devfreq_event_ops rockchip_dfi_ops = {
+ .disable = rockchip_dfi_event_disable,
+ .enable = rockchip_dfi_event_enable,
+ .get_event = rockchip_dfi_get_event,
+ .set_event = rockchip_dfi_set_event,
+};
+
+#ifdef CONFIG_PERF_EVENTS
+
+static void rockchip_ddr_perf_counters_add(struct rockchip_dfi *dfi,
+ const struct dmc_count *now,
+ struct dmc_count *res)
+{
+ const struct dmc_count *last = &dfi->last_perf_count;
+ int i;
+
+ for (i = 0; i < dfi->max_channels; i++) {
+ res->c[i].read_access = dfi->total_count.c[i].read_access +
+ (u32)(now->c[i].read_access - last->c[i].read_access);
+ res->c[i].write_access = dfi->total_count.c[i].write_access +
+ (u32)(now->c[i].write_access - last->c[i].write_access);
+ res->c[i].access = dfi->total_count.c[i].access +
+ (u32)(now->c[i].access - last->c[i].access);
+ res->c[i].clock_cycles = dfi->total_count.c[i].clock_cycles +
+ (u32)(now->c[i].clock_cycles - last->c[i].clock_cycles);
+ }
+}
+
+static ssize_t ddr_perf_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct rockchip_dfi *dfi = container_of(pmu, struct rockchip_dfi, pmu);
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(dfi->cpu));
+}
+
+static struct device_attribute ddr_perf_cpumask_attr =
+ __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
+
+static struct attribute *ddr_perf_cpumask_attrs[] = {
+ &ddr_perf_cpumask_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ddr_perf_cpumask_attr_group = {
+ .attrs = ddr_perf_cpumask_attrs,
+};
+
+PMU_EVENT_ATTR_STRING(cycles, ddr_pmu_cycles, "event="__stringify(PERF_EVENT_CYCLES))
+
+#define DFI_PMU_EVENT_ATTR(_name, _var, _str) \
+ PMU_EVENT_ATTR_STRING(_name, _var, _str); \
+ PMU_EVENT_ATTR_STRING(_name.unit, _var##_unit, "MB"); \
+ PMU_EVENT_ATTR_STRING(_name.scale, _var##_scale, "9.536743164e-07")
+
+DFI_PMU_EVENT_ATTR(read-bytes0, ddr_pmu_read_bytes0, "event="__stringify(PERF_EVENT_READ_BYTES0));
+DFI_PMU_EVENT_ATTR(write-bytes0, ddr_pmu_write_bytes0, "event="__stringify(PERF_EVENT_WRITE_BYTES0));
+
+DFI_PMU_EVENT_ATTR(read-bytes1, ddr_pmu_read_bytes1, "event="__stringify(PERF_EVENT_READ_BYTES1));
+DFI_PMU_EVENT_ATTR(write-bytes1, ddr_pmu_write_bytes1, "event="__stringify(PERF_EVENT_WRITE_BYTES1));
+
+DFI_PMU_EVENT_ATTR(read-bytes2, ddr_pmu_read_bytes2, "event="__stringify(PERF_EVENT_READ_BYTES2));
+DFI_PMU_EVENT_ATTR(write-bytes2, ddr_pmu_write_bytes2, "event="__stringify(PERF_EVENT_WRITE_BYTES2));
+
+DFI_PMU_EVENT_ATTR(read-bytes3, ddr_pmu_read_bytes3, "event="__stringify(PERF_EVENT_READ_BYTES3));
+DFI_PMU_EVENT_ATTR(write-bytes3, ddr_pmu_write_bytes3, "event="__stringify(PERF_EVENT_WRITE_BYTES3));
+
+DFI_PMU_EVENT_ATTR(read-bytes, ddr_pmu_read_bytes, "event="__stringify(PERF_EVENT_READ_BYTES));
+DFI_PMU_EVENT_ATTR(write-bytes, ddr_pmu_write_bytes, "event="__stringify(PERF_EVENT_WRITE_BYTES));
+
+DFI_PMU_EVENT_ATTR(bytes, ddr_pmu_bytes, "event="__stringify(PERF_EVENT_BYTES));
+
+#define DFI_ATTR_MB(_name) \
+ &_name.attr.attr, \
+ &_name##_unit.attr.attr, \
+ &_name##_scale.attr.attr
+
+static struct attribute *ddr_perf_events_attrs[] = {
+ &ddr_pmu_cycles.attr.attr,
+ DFI_ATTR_MB(ddr_pmu_read_bytes),
+ DFI_ATTR_MB(ddr_pmu_write_bytes),
+ DFI_ATTR_MB(ddr_pmu_read_bytes0),
+ DFI_ATTR_MB(ddr_pmu_write_bytes0),
+ DFI_ATTR_MB(ddr_pmu_read_bytes1),
+ DFI_ATTR_MB(ddr_pmu_write_bytes1),
+ DFI_ATTR_MB(ddr_pmu_read_bytes2),
+ DFI_ATTR_MB(ddr_pmu_write_bytes2),
+ DFI_ATTR_MB(ddr_pmu_read_bytes3),
+ DFI_ATTR_MB(ddr_pmu_write_bytes3),
+ DFI_ATTR_MB(ddr_pmu_bytes),
+ NULL,
+};
+
+static const struct attribute_group ddr_perf_events_attr_group = {
+ .name = "events",
+ .attrs = ddr_perf_events_attrs,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-7");
+
+static struct attribute *ddr_perf_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static const struct attribute_group ddr_perf_format_attr_group = {
+ .name = "format",
+ .attrs = ddr_perf_format_attrs,
+};
+
+static const struct attribute_group *attr_groups[] = {
+ &ddr_perf_events_attr_group,
+ &ddr_perf_cpumask_attr_group,
+ &ddr_perf_format_attr_group,
+ NULL,
+};
+
+static int rockchip_ddr_perf_event_init(struct perf_event *event)
+{
+ struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu);
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (event->attach_state & PERF_ATTACH_TASK)
+ return -EINVAL;
+
+ if (event->cpu < 0) {
+ dev_warn(dfi->dev, "Can't provide per-task data!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u64 rockchip_ddr_perf_event_get_count(struct perf_event *event)
+{
+ struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu);
+ int blen = dfi->burst_len;
+ struct dmc_count total, now;
+ unsigned int seq;
+ u64 count = 0;
+ int i;
+
+ rockchip_dfi_read_counters(dfi, &now);
+
+ do {
+ seq = read_seqbegin(&dfi->count_seqlock);
+ rockchip_ddr_perf_counters_add(dfi, &now, &total);
+ } while (read_seqretry(&dfi->count_seqlock, seq));
+
+ switch (event->attr.config) {
+ case PERF_EVENT_CYCLES:
+ count = total.c[0].clock_cycles * dfi->count_multiplier;
+ break;
+ case PERF_EVENT_READ_BYTES:
+ for (i = 0; i < dfi->max_channels; i++)
+ count += total.c[i].read_access * blen * dfi->buswidth[i];
+ break;
+ case PERF_EVENT_WRITE_BYTES:
+ for (i = 0; i < dfi->max_channels; i++)
+ count += total.c[i].write_access * blen * dfi->buswidth[i];
+ break;
+ case PERF_EVENT_READ_BYTES0:
+ count = total.c[0].read_access * blen * dfi->buswidth[0];
+ break;
+ case PERF_EVENT_WRITE_BYTES0:
+ count = total.c[0].write_access * blen * dfi->buswidth[0];
+ break;
+ case PERF_EVENT_READ_BYTES1:
+ count = total.c[1].read_access * blen * dfi->buswidth[1];
+ break;
+ case PERF_EVENT_WRITE_BYTES1:
+ count = total.c[1].write_access * blen * dfi->buswidth[1];
+ break;
+ case PERF_EVENT_READ_BYTES2:
+ count = total.c[2].read_access * blen * dfi->buswidth[2];
+ break;
+ case PERF_EVENT_WRITE_BYTES2:
+ count = total.c[2].write_access * blen * dfi->buswidth[2];
+ break;
+ case PERF_EVENT_READ_BYTES3:
+ count = total.c[3].read_access * blen * dfi->buswidth[3];
+ break;
+ case PERF_EVENT_WRITE_BYTES3:
+ count = total.c[3].write_access * blen * dfi->buswidth[3];
+ break;
+ case PERF_EVENT_BYTES:
+ for (i = 0; i < dfi->max_channels; i++)
+ count += total.c[i].access * blen * dfi->buswidth[i];
+ break;
+ }
+
+ return count;
+}
+
+static void rockchip_ddr_perf_event_update(struct perf_event *event)
+{
+ u64 now;
+ s64 prev;
+
+ if (event->attr.config >= PERF_ACCESS_TYPE_MAX)
+ return;
+
+ now = rockchip_ddr_perf_event_get_count(event);
+ prev = local64_xchg(&event->hw.prev_count, now);
+ local64_add(now - prev, &event->count);
+}
+
+static void rockchip_ddr_perf_event_start(struct perf_event *event, int flags)
+{
+ u64 now = rockchip_ddr_perf_event_get_count(event);
+
+ local64_set(&event->hw.prev_count, now);
+}
+
+static int rockchip_ddr_perf_event_add(struct perf_event *event, int flags)
+{
+ struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu);
+
+ dfi->active_events++;
+
+ if (dfi->active_events == 1) {
+ dfi->total_count = (struct dmc_count){};
+ rockchip_dfi_read_counters(dfi, &dfi->last_perf_count);
+ hrtimer_start(&dfi->timer, ns_to_ktime(NSEC_PER_SEC), HRTIMER_MODE_REL);
+ }
+
+ if (flags & PERF_EF_START)
+ rockchip_ddr_perf_event_start(event, flags);
+
+ return 0;
+}
+
+static void rockchip_ddr_perf_event_stop(struct perf_event *event, int flags)
+{
+ rockchip_ddr_perf_event_update(event);
+}
+
+static void rockchip_ddr_perf_event_del(struct perf_event *event, int flags)
+{
+ struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu);
+
+ rockchip_ddr_perf_event_stop(event, PERF_EF_UPDATE);
+
+ dfi->active_events--;
+
+ if (dfi->active_events == 0)
+ hrtimer_cancel(&dfi->timer);
}
-static int rockchip_dfi_disable(struct devfreq_event_dev *edev)
+static enum hrtimer_restart rockchip_dfi_timer(struct hrtimer *timer)
{
- struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
+ struct rockchip_dfi *dfi = container_of(timer, struct rockchip_dfi, timer);
+ struct dmc_count now, total;
+
+ rockchip_dfi_read_counters(dfi, &now);
+
+ write_seqlock(&dfi->count_seqlock);
- rockchip_dfi_stop_hardware_counter(edev);
- clk_disable_unprepare(info->clk);
+ rockchip_ddr_perf_counters_add(dfi, &now, &total);
+ dfi->total_count = total;
+ dfi->last_perf_count = now;
+
+ write_sequnlock(&dfi->count_seqlock);
+
+ hrtimer_forward_now(&dfi->timer, ns_to_ktime(NSEC_PER_SEC));
+
+ return HRTIMER_RESTART;
+};
+
+static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct rockchip_dfi *dfi = hlist_entry_safe(node, struct rockchip_dfi, node);
+ int target;
+
+ if (cpu != dfi->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&dfi->pmu, cpu, target);
+ dfi->cpu = target;
return 0;
}
-static int rockchip_dfi_enable(struct devfreq_event_dev *edev)
+static void rockchip_ddr_cpuhp_remove_state(void *data)
+{
+ struct rockchip_dfi *dfi = data;
+
+ cpuhp_remove_multi_state(dfi->cpuhp_state);
+
+ rockchip_dfi_disable(dfi);
+}
+
+static void rockchip_ddr_cpuhp_remove_instance(void *data)
{
- struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
+ struct rockchip_dfi *dfi = data;
+
+ cpuhp_state_remove_instance_nocalls(dfi->cpuhp_state, &dfi->node);
+}
+
+static void rockchip_ddr_perf_remove(void *data)
+{
+ struct rockchip_dfi *dfi = data;
+
+ perf_pmu_unregister(&dfi->pmu);
+}
+
+static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
+{
+ struct pmu *pmu = &dfi->pmu;
int ret;
- ret = clk_prepare_enable(info->clk);
+ seqlock_init(&dfi->count_seqlock);
+
+ pmu->module = THIS_MODULE;
+ pmu->capabilities = PERF_PMU_CAP_NO_EXCLUDE;
+ pmu->task_ctx_nr = perf_invalid_context;
+ pmu->attr_groups = attr_groups;
+ pmu->event_init = rockchip_ddr_perf_event_init;
+ pmu->add = rockchip_ddr_perf_event_add;
+ pmu->del = rockchip_ddr_perf_event_del;
+ pmu->start = rockchip_ddr_perf_event_start;
+ pmu->stop = rockchip_ddr_perf_event_stop;
+ pmu->read = rockchip_ddr_perf_event_update;
+
+ dfi->cpu = raw_smp_processor_id();
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "rockchip_ddr_perf_pmu",
+ NULL,
+ ddr_perf_offline_cpu);
+
+ if (ret < 0) {
+ dev_err(dfi->dev, "cpuhp_setup_state_multi failed: %d\n", ret);
+ return ret;
+ }
+
+ dfi->cpuhp_state = ret;
+
+ rockchip_dfi_enable(dfi);
+
+ ret = devm_add_action_or_reset(dfi->dev, rockchip_ddr_cpuhp_remove_state, dfi);
+ if (ret)
+ return ret;
+
+ ret = cpuhp_state_add_instance_nocalls(dfi->cpuhp_state, &dfi->node);
if (ret) {
- dev_err(&edev->dev, "failed to enable dfi clk: %d\n", ret);
+ dev_err(dfi->dev, "Error %d registering hotplug\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dfi->dev, rockchip_ddr_cpuhp_remove_instance, dfi);
+ if (ret)
return ret;
+
+ hrtimer_setup(&dfi->timer, rockchip_dfi_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ switch (dfi->ddr_type) {
+ case ROCKCHIP_DDRTYPE_LPDDR2:
+ case ROCKCHIP_DDRTYPE_LPDDR3:
+ dfi->burst_len = 8;
+ break;
+ case ROCKCHIP_DDRTYPE_LPDDR4:
+ case ROCKCHIP_DDRTYPE_LPDDR4X:
+ case ROCKCHIP_DDRTYPE_LPDDR5:
+ dfi->burst_len = 16;
+ break;
}
- rockchip_dfi_start_hardware_counter(edev);
+ if (!dfi->count_multiplier)
+ dfi->count_multiplier = 1;
+
+ ret = perf_pmu_register(pmu, "rockchip_ddr", -1);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dfi->dev, rockchip_ddr_perf_remove, dfi);
+}
+#else
+static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
+{
return 0;
}
+#endif
-static int rockchip_dfi_set_event(struct devfreq_event_dev *edev)
+static int rk3399_dfi_init(struct rockchip_dfi *dfi)
{
+ struct regmap *regmap_pmu = dfi->regmap_pmu;
+ u32 val;
+
+ dfi->clk = devm_clk_get(dfi->dev, "pclk_ddr_mon");
+ if (IS_ERR(dfi->clk))
+ return dev_err_probe(dfi->dev, PTR_ERR(dfi->clk),
+ "Cannot get the clk pclk_ddr_mon\n");
+
+ /* get ddr type */
+ regmap_read(regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
+ dfi->ddr_type = FIELD_GET(RK3399_PMUGRF_OS_REG2_DDRTYPE, val);
+
+ dfi->channel_mask = GENMASK(1, 0);
+ dfi->max_channels = 2;
+
+ dfi->buswidth[0] = FIELD_GET(RK3399_PMUGRF_OS_REG2_BW_CH0, val) == 0 ? 4 : 2;
+ dfi->buswidth[1] = FIELD_GET(RK3399_PMUGRF_OS_REG2_BW_CH1, val) == 0 ? 4 : 2;
+
+ dfi->ddrmon_stride = 0x14;
+ dfi->ddrmon_ctrl_single = true;
+
return 0;
-}
+};
-static int rockchip_dfi_get_event(struct devfreq_event_dev *edev,
- struct devfreq_event_data *edata)
+static int rk3568_dfi_init(struct rockchip_dfi *dfi)
{
- struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
- int busier_ch;
+ struct regmap *regmap_pmu = dfi->regmap_pmu;
+ u32 reg2, reg3;
+
+ regmap_read(regmap_pmu, RK3568_PMUGRF_OS_REG2, &reg2);
+ regmap_read(regmap_pmu, RK3568_PMUGRF_OS_REG3, &reg3);
+
+ /* lower 3 bits of the DDR type */
+ dfi->ddr_type = FIELD_GET(RK3568_PMUGRF_OS_REG2_DRAMTYPE_INFO, reg2);
- busier_ch = rockchip_dfi_get_busier_ch(edev);
+ /*
+ * For version three and higher the upper two bits of the DDR type are
+ * in RK3568_PMUGRF_OS_REG3
+ */
+ if (FIELD_GET(RK3568_PMUGRF_OS_REG3_SYSREG_VERSION, reg3) >= 0x3)
+ dfi->ddr_type |= FIELD_GET(RK3568_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3, reg3) << 3;
- edata->load_count = info->ch_usage[busier_ch].access;
- edata->total_count = info->ch_usage[busier_ch].total;
+ dfi->channel_mask = BIT(0);
+ dfi->max_channels = 1;
+
+ dfi->buswidth[0] = FIELD_GET(RK3568_PMUGRF_OS_REG2_BW_CH0, reg2) == 0 ? 4 : 2;
+
+ dfi->ddrmon_stride = 0x0; /* not relevant, we only have a single channel on this SoC */
+ dfi->ddrmon_ctrl_single = true;
return 0;
-}
+};
-static const struct devfreq_event_ops rockchip_dfi_ops = {
- .disable = rockchip_dfi_disable,
- .enable = rockchip_dfi_enable,
- .get_event = rockchip_dfi_get_event,
- .set_event = rockchip_dfi_set_event,
+static int rk3588_dfi_init(struct rockchip_dfi *dfi)
+{
+ struct regmap *regmap_pmu = dfi->regmap_pmu;
+ u32 reg2, reg3, reg4, reg6;
+
+ regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG2, &reg2);
+ regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG3, &reg3);
+ regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG4, &reg4);
+
+ /* lower 3 bits of the DDR type */
+ dfi->ddr_type = FIELD_GET(RK3588_PMUGRF_OS_REG2_DRAMTYPE_INFO, reg2);
+
+ /*
+ * For version three and higher the upper two bits of the DDR type are
+ * in RK3588_PMUGRF_OS_REG3
+ */
+ if (FIELD_GET(RK3588_PMUGRF_OS_REG3_SYSREG_VERSION, reg3) >= 0x3)
+ dfi->ddr_type |= FIELD_GET(RK3588_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3, reg3) << 3;
+
+ dfi->buswidth[0] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH0, reg2) == 0 ? 4 : 2;
+ dfi->buswidth[1] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH1, reg2) == 0 ? 4 : 2;
+ dfi->buswidth[2] = FIELD_GET(RK3568_PMUGRF_OS_REG2_BW_CH0, reg4) == 0 ? 4 : 2;
+ dfi->buswidth[3] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH1, reg4) == 0 ? 4 : 2;
+ dfi->channel_mask = FIELD_GET(RK3588_PMUGRF_OS_REG2_CH_INFO, reg2) |
+ FIELD_GET(RK3588_PMUGRF_OS_REG2_CH_INFO, reg4) << 2;
+ dfi->max_channels = 4;
+
+ dfi->ddrmon_stride = 0x4000;
+ dfi->count_multiplier = 2;
+
+ if (dfi->ddr_type == ROCKCHIP_DDRTYPE_LPDDR5) {
+ regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG6, &reg6);
+ dfi->lp5_bank_mode = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_BANK_MODE, reg6) << 7;
+ dfi->lp5_ckr = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_CKR, reg6);
+ if (dfi->lp5_ckr)
+ dfi->count_multiplier *= 2;
+ }
+
+ return 0;
};
static const struct of_device_id rockchip_dfi_id_match[] = {
- { .compatible = "rockchip,rk3399-dfi" },
+ { .compatible = "rockchip,rk3399-dfi", .data = rk3399_dfi_init },
+ { .compatible = "rockchip,rk3568-dfi", .data = rk3568_dfi_init },
+ { .compatible = "rockchip,rk3588-dfi", .data = rk3588_dfi_init },
{ },
};
+
MODULE_DEVICE_TABLE(of, rockchip_dfi_id_match);
static int rockchip_dfi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct rockchip_dfi *data;
+ struct rockchip_dfi *dfi;
struct devfreq_event_desc *desc;
struct device_node *np = pdev->dev.of_node, *node;
+ int (*soc_init)(struct rockchip_dfi *dfi);
+ int ret;
- data = devm_kzalloc(dev, sizeof(struct rockchip_dfi), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ soc_init = of_device_get_match_data(&pdev->dev);
+ if (!soc_init)
+ return -EINVAL;
- data->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(data->regs))
- return PTR_ERR(data->regs);
+ dfi = devm_kzalloc(dev, sizeof(*dfi), GFP_KERNEL);
+ if (!dfi)
+ return -ENOMEM;
- data->clk = devm_clk_get(dev, "pclk_ddr_mon");
- if (IS_ERR(data->clk))
- return dev_err_probe(dev, PTR_ERR(data->clk),
- "Cannot get the clk pclk_ddr_mon\n");
+ dfi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dfi->regs))
+ return PTR_ERR(dfi->regs);
- /* try to find the optional reference to the pmu syscon */
node = of_parse_phandle(np, "rockchip,pmu", 0);
- if (node) {
- data->regmap_pmu = syscon_node_to_regmap(node);
- of_node_put(node);
- if (IS_ERR(data->regmap_pmu))
- return PTR_ERR(data->regmap_pmu);
- }
- data->dev = dev;
+ if (!node)
+ return dev_err_probe(&pdev->dev, -ENODEV, "Can't find pmu_grf registers\n");
- desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
+ dfi->regmap_pmu = syscon_node_to_regmap(node);
+ of_node_put(node);
+ if (IS_ERR(dfi->regmap_pmu))
+ return PTR_ERR(dfi->regmap_pmu);
+
+ dfi->dev = dev;
+ mutex_init(&dfi->mutex);
+ desc = &dfi->desc;
desc->ops = &rockchip_dfi_ops;
- desc->driver_data = data;
+ desc->driver_data = dfi;
desc->name = np->name;
- data->desc = desc;
- data->edev = devm_devfreq_event_add_edev(&pdev->dev, desc);
- if (IS_ERR(data->edev)) {
+ ret = soc_init(dfi);
+ if (ret)
+ return ret;
+
+ dfi->edev = devm_devfreq_event_add_edev(&pdev->dev, desc);
+ if (IS_ERR(dfi->edev)) {
dev_err(&pdev->dev,
"failed to add devfreq-event device\n");
- return PTR_ERR(data->edev);
+ return PTR_ERR(dfi->edev);
}
- platform_set_drvdata(pdev, data);
+ ret = rockchip_ddr_perf_init(dfi);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, dfi);
return 0;
}
@@ -229,6 +879,7 @@ static struct platform_driver rockchip_dfi_driver = {
.driver = {
.name = "rockchip-dfi",
.of_match_table = rockchip_dfi_id_match,
+ .suppress_bind_attrs = true,
},
};
module_platform_driver(rockchip_dfi_driver);
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 027e8f336acc..b9ea7ad2e51b 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -160,7 +160,6 @@ static void exynos_bus_exit(struct device *dev)
platform_device_unregister(bus->icc_pdev);
dev_pm_opp_of_remove_table(dev);
- clk_disable_unprepare(bus->clk);
dev_pm_opp_put_regulators(bus->opp_token);
}
@@ -171,7 +170,6 @@ static void exynos_bus_passive_exit(struct device *dev)
platform_device_unregister(bus->icc_pdev);
dev_pm_opp_of_remove_table(dev);
- clk_disable_unprepare(bus->clk);
}
static int exynos_bus_parent_parse_of(struct device_node *np,
@@ -238,8 +236,7 @@ err_regulator:
return ret;
}
-static int exynos_bus_parse_of(struct device_node *np,
- struct exynos_bus *bus)
+static int exynos_bus_parse_of(struct exynos_bus *bus)
{
struct device *dev = bus->dev;
struct dev_pm_opp *opp;
@@ -247,23 +244,16 @@ static int exynos_bus_parse_of(struct device_node *np,
int ret;
/* Get the clock to provide each bus with source clock */
- bus->clk = devm_clk_get(dev, "bus");
- if (IS_ERR(bus->clk)) {
- dev_err(dev, "failed to get bus clock\n");
- return PTR_ERR(bus->clk);
- }
-
- ret = clk_prepare_enable(bus->clk);
- if (ret < 0) {
- dev_err(dev, "failed to get enable clock\n");
- return ret;
- }
+ bus->clk = devm_clk_get_enabled(dev, "bus");
+ if (IS_ERR(bus->clk))
+ return dev_err_probe(dev, PTR_ERR(bus->clk),
+ "failed to get bus clock\n");
/* Get the freq and voltage from OPP table to scale the bus freq */
ret = dev_pm_opp_of_add_table(dev);
if (ret < 0) {
dev_err(dev, "failed to get OPP table\n");
- goto err_clk;
+ return ret;
}
rate = clk_get_rate(bus->clk);
@@ -281,8 +271,6 @@ static int exynos_bus_parse_of(struct device_node *np,
err_opp:
dev_pm_opp_of_remove_table(dev);
-err_clk:
- clk_disable_unprepare(bus->clk);
return ret;
}
@@ -419,7 +407,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
}
/* Parse the device-tree to get the resource information */
- ret = exynos_bus_parse_of(np, bus);
+ ret = exynos_bus_parse_of(bus);
if (ret < 0)
goto err_reg;
@@ -432,7 +420,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
goto err;
/* Create child platform device for the interconnect provider */
- if (of_get_property(dev->of_node, "#interconnect-cells", NULL)) {
+ if (of_property_present(dev->of_node, "#interconnect-cells")) {
bus->icc_pdev = platform_device_register_data(
dev, "exynos-generic-icc",
PLATFORM_DEVID_AUTO, NULL, 0);
@@ -453,7 +441,6 @@ static int exynos_bus_probe(struct platform_device *pdev)
err:
dev_pm_opp_of_remove_table(dev);
- clk_disable_unprepare(bus->clk);
err_reg:
dev_pm_opp_put_regulators(bus->opp_token);
@@ -467,7 +454,6 @@ static void exynos_bus_shutdown(struct platform_device *pdev)
devfreq_suspend_device(bus->devfreq);
}
-#ifdef CONFIG_PM_SLEEP
static int exynos_bus_resume(struct device *dev)
{
struct exynos_bus *bus = dev_get_drvdata(dev);
@@ -495,11 +481,9 @@ static int exynos_bus_suspend(struct device *dev)
return 0;
}
-#endif
-static const struct dev_pm_ops exynos_bus_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(exynos_bus_suspend, exynos_bus_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(exynos_bus_pm,
+ exynos_bus_suspend, exynos_bus_resume);
static const struct of_device_id exynos_bus_of_match[] = {
{ .compatible = "samsung,exynos-bus", },
@@ -512,12 +496,13 @@ static struct platform_driver exynos_bus_platdrv = {
.shutdown = exynos_bus_shutdown,
.driver = {
.name = "exynos-bus",
- .pm = &exynos_bus_pm,
- .of_match_table = of_match_ptr(exynos_bus_of_match),
+ .pm = pm_sleep_ptr(&exynos_bus_pm),
+ .of_match_table = exynos_bus_of_match,
},
};
module_platform_driver(exynos_bus_platdrv);
+MODULE_SOFTDEP("pre: exynos_ppmu");
MODULE_DESCRIPTION("Generic Exynos Bus frequency driver");
MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
deleted file mode 100644
index 0adfebc0467a..000000000000
--- a/drivers/devfreq/governor.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * governor.h - internal header for devfreq governors.
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This header is for devfreq governors in drivers/devfreq/
- */
-
-#ifndef _GOVERNOR_H
-#define _GOVERNOR_H
-
-#include <linux/devfreq.h>
-
-#define DEVFREQ_NAME_LEN 16
-
-#define to_devfreq(DEV) container_of((DEV), struct devfreq, dev)
-
-/* Devfreq events */
-#define DEVFREQ_GOV_START 0x1
-#define DEVFREQ_GOV_STOP 0x2
-#define DEVFREQ_GOV_UPDATE_INTERVAL 0x3
-#define DEVFREQ_GOV_SUSPEND 0x4
-#define DEVFREQ_GOV_RESUME 0x5
-
-#define DEVFREQ_MIN_FREQ 0
-#define DEVFREQ_MAX_FREQ ULONG_MAX
-
-/*
- * Definition of the governor feature flags
- * - DEVFREQ_GOV_FLAG_IMMUTABLE
- * : This governor is never changeable to other governors.
- * - DEVFREQ_GOV_FLAG_IRQ_DRIVEN
- * : The devfreq won't schedule the work for this governor.
- */
-#define DEVFREQ_GOV_FLAG_IMMUTABLE BIT(0)
-#define DEVFREQ_GOV_FLAG_IRQ_DRIVEN BIT(1)
-
-/*
- * Definition of governor attribute flags except for common sysfs attributes
- * - DEVFREQ_GOV_ATTR_POLLING_INTERVAL
- * : Indicate polling_interval sysfs attribute
- * - DEVFREQ_GOV_ATTR_TIMER
- * : Indicate timer sysfs attribute
- */
-#define DEVFREQ_GOV_ATTR_POLLING_INTERVAL BIT(0)
-#define DEVFREQ_GOV_ATTR_TIMER BIT(1)
-
-/**
- * struct devfreq_cpu_data - Hold the per-cpu data
- * @node: list node
- * @dev: reference to cpu device.
- * @first_cpu: the cpumask of the first cpu of a policy.
- * @opp_table: reference to cpu opp table.
- * @cur_freq: the current frequency of the cpu.
- * @min_freq: the min frequency of the cpu.
- * @max_freq: the max frequency of the cpu.
- *
- * This structure stores the required cpu_data of a cpu.
- * This is auto-populated by the governor.
- */
-struct devfreq_cpu_data {
- struct list_head node;
-
- struct device *dev;
- unsigned int first_cpu;
-
- struct opp_table *opp_table;
- unsigned int cur_freq;
- unsigned int min_freq;
- unsigned int max_freq;
-};
-
-/**
- * struct devfreq_governor - Devfreq policy governor
- * @node: list node - contains registered devfreq governors
- * @name: Governor's name
- * @attrs: Governor's sysfs attribute flags
- * @flags: Governor's feature flags
- * @get_target_freq: Returns desired operating frequency for the device.
- * Basically, get_target_freq will run
- * devfreq_dev_profile.get_dev_status() to get the
- * status of the device (load = busy_time / total_time).
- * @event_handler: Callback for devfreq core framework to notify events
- * to governors. Events include per device governor
- * init and exit, opp changes out of devfreq, suspend
- * and resume of per device devfreq during device idle.
- *
- * Note that the callbacks are called with devfreq->lock locked by devfreq.
- */
-struct devfreq_governor {
- struct list_head node;
-
- const char name[DEVFREQ_NAME_LEN];
- const u64 attrs;
- const u64 flags;
- int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
- int (*event_handler)(struct devfreq *devfreq,
- unsigned int event, void *data);
-};
-
-void devfreq_monitor_start(struct devfreq *devfreq);
-void devfreq_monitor_stop(struct devfreq *devfreq);
-void devfreq_monitor_suspend(struct devfreq *devfreq);
-void devfreq_monitor_resume(struct devfreq *devfreq);
-void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay);
-
-int devfreq_add_governor(struct devfreq_governor *governor);
-int devfreq_remove_governor(struct devfreq_governor *governor);
-
-int devm_devfreq_add_governor(struct device *dev,
- struct devfreq_governor *governor);
-
-int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
-int devfreq_update_target(struct devfreq *devfreq, unsigned long freq);
-void devfreq_get_freq_range(struct devfreq *devfreq, unsigned long *min_freq,
- unsigned long *max_freq);
-
-static inline int devfreq_update_stats(struct devfreq *df)
-{
- if (!df->profile->get_dev_status)
- return -EINVAL;
-
- return df->profile->get_dev_status(df->dev.parent, &df->last_status);
-}
-#endif /* _GOVERNOR_H */
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
index 953cf9a1e9f7..8cd6f9a59f64 100644
--- a/drivers/devfreq/governor_passive.c
+++ b/drivers/devfreq/governor_passive.c
@@ -14,8 +14,33 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/units.h>
-#include "governor.h"
+
+/**
+ * struct devfreq_cpu_data - Hold the per-cpu data
+ * @node: list node
+ * @dev: reference to cpu device.
+ * @first_cpu: the cpumask of the first cpu of a policy.
+ * @opp_table: reference to cpu opp table.
+ * @cur_freq: the current frequency of the cpu.
+ * @min_freq: the min frequency of the cpu.
+ * @max_freq: the max frequency of the cpu.
+ *
+ * This structure stores the required cpu_data of a cpu.
+ * This is auto-populated by the governor.
+ */
+struct devfreq_cpu_data {
+ struct list_head node;
+
+ struct device *dev;
+ unsigned int first_cpu;
+
+ struct opp_table *opp_table;
+ unsigned int cur_freq;
+ unsigned int min_freq;
+ unsigned int max_freq;
+};
static struct devfreq_cpu_data *
get_parent_cpu_data(struct devfreq_passive_data *p_data,
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
index 5dbc1e56ec08..fdb22bf512cf 100644
--- a/drivers/devfreq/governor_performance.c
+++ b/drivers/devfreq/governor_performance.c
@@ -7,8 +7,8 @@
*/
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/module.h>
-#include "governor.h"
static int devfreq_performance_func(struct devfreq *df,
unsigned long *freq)
@@ -58,4 +58,5 @@ static void __exit devfreq_performance_exit(void)
return;
}
module_exit(devfreq_performance_exit);
+MODULE_DESCRIPTION("DEVFREQ Performance governor");
MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
index 4746af2435b0..ee2d6ec8a512 100644
--- a/drivers/devfreq/governor_powersave.c
+++ b/drivers/devfreq/governor_powersave.c
@@ -7,8 +7,8 @@
*/
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/module.h>
-#include "governor.h"
static int devfreq_powersave_func(struct devfreq *df,
unsigned long *freq)
@@ -58,4 +58,5 @@ static void __exit devfreq_powersave_exit(void)
return;
}
module_exit(devfreq_powersave_exit);
+MODULE_DESCRIPTION("DEVFREQ Powersave governor");
MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index d57b82a2b570..ac9c5e9e51a4 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -9,12 +9,12 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/math64.h>
-#include "governor.h"
/* Default constants for DevFreq-Simple-Ondemand (DFSO) */
#define DFSO_UPTHRESHOLD (90)
-#define DFSO_DOWNDIFFERENCTIAL (5)
+#define DFSO_DOWNDIFFERENTIAL (5)
static int devfreq_simple_ondemand_func(struct devfreq *df,
unsigned long *freq)
{
@@ -22,7 +22,7 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
struct devfreq_dev_status *stat;
unsigned long long a, b;
unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD;
- unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
+ unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENTIAL;
struct devfreq_simple_ondemand_data *data = df->data;
err = devfreq_update_stats(df);
@@ -140,4 +140,5 @@ static void __exit devfreq_simple_ondemand_exit(void)
return;
}
module_exit(devfreq_simple_ondemand_exit);
+MODULE_DESCRIPTION("DEVFREQ Simple On-demand governor");
MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index ab9db7adb3ad..395174f93960 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -9,10 +9,11 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
+#include <linux/kstrtox.h>
#include <linux/pm.h>
#include <linux/mutex.h>
#include <linux/module.h>
-#include "governor.h"
struct userspace_data {
unsigned long user_frequency;
@@ -21,7 +22,7 @@ struct userspace_data {
static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
{
- struct userspace_data *data = df->data;
+ struct userspace_data *data = df->governor_data;
if (data->valid)
*freq = data->user_frequency;
@@ -39,10 +40,13 @@ static ssize_t set_freq_store(struct device *dev, struct device_attribute *attr,
unsigned long wanted;
int err = 0;
+ err = kstrtoul(buf, 0, &wanted);
+ if (err)
+ return err;
+
mutex_lock(&devfreq->lock);
- data = devfreq->data;
+ data = devfreq->governor_data;
- sscanf(buf, "%lu", &wanted);
data->user_frequency = wanted;
data->valid = true;
err = update_devfreq(devfreq);
@@ -60,7 +64,7 @@ static ssize_t set_freq_show(struct device *dev,
int err = 0;
mutex_lock(&devfreq->lock);
- data = devfreq->data;
+ data = devfreq->governor_data;
if (data->valid)
err = sprintf(buf, "%lu\n", data->user_frequency);
@@ -91,7 +95,7 @@ static int userspace_init(struct devfreq *devfreq)
goto out;
}
data->valid = false;
- devfreq->data = data;
+ devfreq->governor_data = data;
err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
out:
@@ -107,8 +111,8 @@ static void userspace_exit(struct devfreq *devfreq)
if (devfreq->dev.kobj.sd)
sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
- kfree(devfreq->data);
- devfreq->data = NULL;
+ kfree(devfreq->governor_data);
+ devfreq->governor_data = NULL;
}
static int devfreq_userspace_handler(struct devfreq *devfreq,
@@ -153,4 +157,5 @@ static void __exit devfreq_userspace_exit(void)
return;
}
module_exit(devfreq_userspace_exit);
+MODULE_DESCRIPTION("DEVFREQ Userspace governor");
MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/hisi_uncore_freq.c b/drivers/devfreq/hisi_uncore_freq.c
new file mode 100644
index 000000000000..4d00d813c8ac
--- /dev/null
+++ b/drivers/devfreq/hisi_uncore_freq.c
@@ -0,0 +1,658 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HiSilicon uncore frequency scaling driver
+ *
+ * Copyright (c) 2025 HiSilicon Co., Ltd
+ */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
+#include <linux/device.h>
+#include <linux/dev_printk.h>
+#include <linux/errno.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/property.h>
+#include <linux/topology.h>
+#include <linux/units.h>
+#include <acpi/pcc.h>
+
+struct hisi_uncore_pcc_data {
+ u16 status;
+ u16 resv;
+ u32 data;
+};
+
+struct hisi_uncore_pcc_shmem {
+ struct acpi_pcct_shared_memory head;
+ struct hisi_uncore_pcc_data pcc_data;
+};
+
+enum hisi_uncore_pcc_cmd_type {
+ HUCF_PCC_CMD_GET_CAP = 0,
+ HUCF_PCC_CMD_GET_FREQ,
+ HUCF_PCC_CMD_SET_FREQ,
+ HUCF_PCC_CMD_GET_MODE,
+ HUCF_PCC_CMD_SET_MODE,
+ HUCF_PCC_CMD_GET_PLAT_FREQ_NUM,
+ HUCF_PCC_CMD_GET_PLAT_FREQ_BY_IDX,
+ HUCF_PCC_CMD_MAX = 256
+};
+
+static int hisi_platform_gov_usage;
+static DEFINE_MUTEX(hisi_platform_gov_usage_lock);
+
+enum hisi_uncore_freq_mode {
+ HUCF_MODE_PLATFORM = 0,
+ HUCF_MODE_OS,
+ HUCF_MODE_MAX
+};
+
+#define HUCF_CAP_PLATFORM_CTRL BIT(0)
+
+/**
+ * struct hisi_uncore_freq - hisi uncore frequency scaling device data
+ * @dev: device of this frequency scaling driver
+ * @cl: mailbox client object
+ * @pchan: PCC mailbox channel
+ * @chan_id: PCC channel ID
+ * @last_cmd_cmpl_time: timestamp of the last completed PCC command
+ * @pcc_lock: PCC channel lock
+ * @devfreq: devfreq data of this hisi_uncore_freq device
+ * @related_cpus: CPUs whose performance is majorly affected by this
+ * uncore frequency domain
+ * @cap: capability flag
+ */
+struct hisi_uncore_freq {
+ struct device *dev;
+ struct mbox_client cl;
+ struct pcc_mbox_chan *pchan;
+ int chan_id;
+ ktime_t last_cmd_cmpl_time;
+ struct mutex pcc_lock;
+ struct devfreq *devfreq;
+ struct cpumask related_cpus;
+ u32 cap;
+};
+
+/* PCC channel timeout = PCC nominal latency * NUM */
+#define HUCF_PCC_POLL_TIMEOUT_NUM 1000
+#define HUCF_PCC_POLL_INTERVAL_US 5
+
+/* Default polling interval in ms for devfreq governors*/
+#define HUCF_DEFAULT_POLLING_MS 100
+
+static void hisi_uncore_free_pcc_chan(struct hisi_uncore_freq *uncore)
+{
+ guard(mutex)(&uncore->pcc_lock);
+ pcc_mbox_free_channel(uncore->pchan);
+ uncore->pchan = NULL;
+}
+
+static void devm_hisi_uncore_free_pcc_chan(void *data)
+{
+ hisi_uncore_free_pcc_chan(data);
+}
+
+static int hisi_uncore_request_pcc_chan(struct hisi_uncore_freq *uncore)
+{
+ struct device *dev = uncore->dev;
+ struct pcc_mbox_chan *pcc_chan;
+
+ uncore->cl = (struct mbox_client) {
+ .dev = dev,
+ .tx_block = false,
+ .knows_txdone = true,
+ };
+
+ pcc_chan = pcc_mbox_request_channel(&uncore->cl, uncore->chan_id);
+ if (IS_ERR(pcc_chan))
+ return dev_err_probe(dev, PTR_ERR(pcc_chan),
+ "Failed to request PCC channel %u\n", uncore->chan_id);
+
+ if (!pcc_chan->shmem_base_addr) {
+ pcc_mbox_free_channel(pcc_chan);
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid PCC shared memory address\n");
+ }
+
+ if (pcc_chan->shmem_size < sizeof(struct hisi_uncore_pcc_shmem)) {
+ pcc_mbox_free_channel(pcc_chan);
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid PCC shared memory size (%lluB)\n",
+ pcc_chan->shmem_size);
+ }
+
+ uncore->pchan = pcc_chan;
+
+ return devm_add_action_or_reset(uncore->dev,
+ devm_hisi_uncore_free_pcc_chan, uncore);
+}
+
+static acpi_status hisi_uncore_pcc_reg_scan(struct acpi_resource *res,
+ void *ctx)
+{
+ struct acpi_resource_generic_register *reg;
+ struct hisi_uncore_freq *uncore;
+
+ if (!res || res->type != ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
+ return AE_OK;
+
+ reg = &res->data.generic_reg;
+ if (reg->space_id != ACPI_ADR_SPACE_PLATFORM_COMM)
+ return AE_OK;
+
+ if (!ctx)
+ return AE_ERROR;
+
+ uncore = ctx;
+ /* PCC subspace ID stored in Access Size */
+ uncore->chan_id = reg->access_size;
+
+ return AE_CTRL_TERMINATE;
+}
+
+static int hisi_uncore_init_pcc_chan(struct hisi_uncore_freq *uncore)
+{
+ acpi_handle handle = ACPI_HANDLE(uncore->dev);
+ acpi_status status;
+ int rc;
+
+ uncore->chan_id = -1;
+ status = acpi_walk_resources(handle, METHOD_NAME__CRS,
+ hisi_uncore_pcc_reg_scan, uncore);
+ if (ACPI_FAILURE(status) || uncore->chan_id < 0)
+ return dev_err_probe(uncore->dev, -ENODEV,
+ "Failed to get a PCC channel\n");
+
+
+ rc = devm_mutex_init(uncore->dev, &uncore->pcc_lock);
+ if (rc)
+ return rc;
+
+ return hisi_uncore_request_pcc_chan(uncore);
+}
+
+static int hisi_uncore_cmd_send(struct hisi_uncore_freq *uncore,
+ u8 cmd, u32 *data)
+{
+ struct hisi_uncore_pcc_shmem __iomem *addr;
+ struct hisi_uncore_pcc_shmem shmem;
+ struct pcc_mbox_chan *pchan;
+ unsigned int mrtt;
+ s64 time_delta;
+ u16 status;
+ int rc;
+
+ guard(mutex)(&uncore->pcc_lock);
+
+ pchan = uncore->pchan;
+ if (!pchan)
+ return -ENODEV;
+
+ addr = (struct hisi_uncore_pcc_shmem __iomem *)pchan->shmem;
+ if (!addr)
+ return -EINVAL;
+
+ /* Handle the Minimum Request Turnaround Time (MRTT) */
+ mrtt = pchan->min_turnaround_time;
+ time_delta = ktime_us_delta(ktime_get(), uncore->last_cmd_cmpl_time);
+ if (mrtt > time_delta)
+ udelay(mrtt - time_delta);
+
+ /* Copy data */
+ shmem.head = (struct acpi_pcct_shared_memory) {
+ .signature = PCC_SIGNATURE | uncore->chan_id,
+ .command = cmd,
+ };
+ shmem.pcc_data.data = *data;
+ memcpy_toio(addr, &shmem, sizeof(shmem));
+
+ /* Ring doorbell */
+ rc = mbox_send_message(pchan->mchan, &cmd);
+ if (rc < 0) {
+ dev_err(uncore->dev, "Failed to send mbox message, %d\n", rc);
+ return rc;
+ }
+
+ /* Wait status */
+ rc = readw_poll_timeout(&addr->head.status, status,
+ status & (PCC_STATUS_CMD_COMPLETE |
+ PCC_STATUS_ERROR),
+ HUCF_PCC_POLL_INTERVAL_US,
+ pchan->latency * HUCF_PCC_POLL_TIMEOUT_NUM);
+ if (rc) {
+ dev_err(uncore->dev, "PCC channel response timeout, cmd=%u\n", cmd);
+ } else if (status & PCC_STATUS_ERROR) {
+ dev_err(uncore->dev, "PCC cmd error, cmd=%u\n", cmd);
+ rc = -EIO;
+ }
+
+ uncore->last_cmd_cmpl_time = ktime_get();
+
+ /* Copy data back */
+ memcpy_fromio(data, &addr->pcc_data.data, sizeof(*data));
+
+ /* Clear mailbox active req */
+ mbox_client_txdone(pchan->mchan, rc);
+
+ return rc;
+}
+
+static int hisi_uncore_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct hisi_uncore_freq *uncore = dev_get_drvdata(dev);
+ struct dev_pm_opp *opp;
+ u32 data;
+
+ if (WARN_ON(!uncore || !uncore->pchan))
+ return -ENODEV;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "Failed to get opp for freq %lu hz\n", *freq);
+ return PTR_ERR(opp);
+ }
+
+ data = (u32)(dev_pm_opp_get_freq(opp) / HZ_PER_MHZ);
+
+ dev_pm_opp_put(opp);
+
+ return hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_FREQ, &data);
+}
+
+static int hisi_uncore_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ /* Not used */
+ return 0;
+}
+
+static int hisi_uncore_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct hisi_uncore_freq *uncore = dev_get_drvdata(dev);
+ u32 data = 0;
+ int rc;
+
+ if (WARN_ON(!uncore || !uncore->pchan))
+ return -ENODEV;
+
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_FREQ, &data);
+
+ /*
+ * Upon a failure, 'data' remains 0 and 'freq' is set to 0 rather than a
+ * random value. devfreq shouldn't use 'freq' in that case though.
+ */
+ *freq = data * HZ_PER_MHZ;
+
+ return rc;
+}
+
+static void devm_hisi_uncore_remove_opp(void *data)
+{
+ struct hisi_uncore_freq *uncore = data;
+
+ dev_pm_opp_remove_all_dynamic(uncore->dev);
+}
+
+static int hisi_uncore_init_opp(struct hisi_uncore_freq *uncore)
+{
+ struct device *dev = uncore->dev;
+ unsigned long freq_mhz;
+ u32 num, index;
+ u32 data = 0;
+ int rc;
+
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_PLAT_FREQ_NUM,
+ &data);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get plat freq num\n");
+
+ num = data;
+
+ for (index = 0; index < num; index++) {
+ data = index;
+ rc = hisi_uncore_cmd_send(uncore,
+ HUCF_PCC_CMD_GET_PLAT_FREQ_BY_IDX,
+ &data);
+ if (rc) {
+ dev_pm_opp_remove_all_dynamic(dev);
+ return dev_err_probe(dev, rc,
+ "Failed to get plat freq at index %u\n", index);
+ }
+ freq_mhz = data;
+
+ /* Don't care OPP voltage, take 1V as default */
+ rc = dev_pm_opp_add(dev, freq_mhz * HZ_PER_MHZ, 1000000);
+ if (rc) {
+ dev_pm_opp_remove_all_dynamic(dev);
+ return dev_err_probe(dev, rc,
+ "Add OPP %lu failed\n", freq_mhz);
+ }
+ }
+
+ return devm_add_action_or_reset(dev, devm_hisi_uncore_remove_opp,
+ uncore);
+}
+
+static int hisi_platform_gov_func(struct devfreq *df, unsigned long *freq)
+{
+ /*
+ * Platform-controlled mode doesn't care the frequency issued from
+ * devfreq, so just pick the max freq.
+ */
+ *freq = DEVFREQ_MAX_FREQ;
+
+ return 0;
+}
+
+static int hisi_platform_gov_handler(struct devfreq *df, unsigned int event,
+ void *val)
+{
+ struct hisi_uncore_freq *uncore = dev_get_drvdata(df->dev.parent);
+ int rc = 0;
+ u32 data;
+
+ if (WARN_ON(!uncore || !uncore->pchan))
+ return -ENODEV;
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ data = HUCF_MODE_PLATFORM;
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_MODE, &data);
+ if (rc)
+ dev_err(uncore->dev, "Failed to set platform mode (%d)\n", rc);
+ break;
+ case DEVFREQ_GOV_STOP:
+ data = HUCF_MODE_OS;
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_MODE, &data);
+ if (rc)
+ dev_err(uncore->dev, "Failed to set os mode (%d)\n", rc);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+/*
+ * In the platform-controlled mode, the platform decides the uncore frequency
+ * and ignores the frequency issued from the driver.
+ * Thus, create a pseudo 'hisi_platform' governor that stops devfreq monitor
+ * from working so as to save meaningless overhead.
+ */
+static struct devfreq_governor hisi_platform_governor = {
+ .name = "hisi_platform",
+ /*
+ * Set interrupt_driven to skip the devfreq monitor mechanism, though
+ * this governor is not interrupt-driven.
+ */
+ .flags = DEVFREQ_GOV_FLAG_IRQ_DRIVEN,
+ .get_target_freq = hisi_platform_gov_func,
+ .event_handler = hisi_platform_gov_handler,
+};
+
+static void hisi_uncore_remove_platform_gov(struct hisi_uncore_freq *uncore)
+{
+ u32 data = HUCF_MODE_PLATFORM;
+ int rc;
+
+ if (!(uncore->cap & HUCF_CAP_PLATFORM_CTRL))
+ return;
+
+ guard(mutex)(&hisi_platform_gov_usage_lock);
+
+ if (--hisi_platform_gov_usage == 0) {
+ rc = devfreq_remove_governor(&hisi_platform_governor);
+ if (rc)
+ dev_err(uncore->dev, "Failed to remove hisi_platform gov (%d)\n", rc);
+ }
+
+ /*
+ * Set to the platform-controlled mode on exit if supported, so as to
+ * have a certain behaviour when the driver is detached.
+ */
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_MODE, &data);
+ if (rc)
+ dev_err(uncore->dev, "Failed to set platform mode on exit (%d)\n", rc);
+}
+
+static void devm_hisi_uncore_remove_platform_gov(void *data)
+{
+ hisi_uncore_remove_platform_gov(data);
+}
+
+static int hisi_uncore_add_platform_gov(struct hisi_uncore_freq *uncore)
+{
+ if (!(uncore->cap & HUCF_CAP_PLATFORM_CTRL))
+ return 0;
+
+ guard(mutex)(&hisi_platform_gov_usage_lock);
+
+ if (hisi_platform_gov_usage == 0) {
+ int rc = devfreq_add_governor(&hisi_platform_governor);
+ if (rc)
+ return rc;
+ }
+ hisi_platform_gov_usage++;
+
+ return devm_add_action_or_reset(uncore->dev,
+ devm_hisi_uncore_remove_platform_gov,
+ uncore);
+}
+
+/*
+ * Returns:
+ * 0 if success, uncore->related_cpus is set.
+ * -EINVAL if property not found, or property found but without elements in it,
+ * or invalid arguments received in any of the subroutine.
+ * Other error codes if it goes wrong.
+ */
+static int hisi_uncore_mark_related_cpus(struct hisi_uncore_freq *uncore,
+ char *property, int (*get_topo_id)(int cpu),
+ const struct cpumask *(*get_cpumask)(int cpu))
+{
+ unsigned int i, cpu;
+ size_t len;
+ int rc;
+
+ rc = device_property_count_u32(uncore->dev, property);
+ if (rc < 0)
+ return rc;
+ if (rc == 0)
+ return -EINVAL;
+
+ len = rc;
+ u32 *num __free(kfree) = kcalloc(len, sizeof(*num), GFP_KERNEL);
+ if (!num)
+ return -ENOMEM;
+
+ rc = device_property_read_u32_array(uncore->dev, property, num, len);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < len; i++) {
+ for_each_possible_cpu(cpu) {
+ if (get_topo_id(cpu) != num[i])
+ continue;
+
+ cpumask_or(&uncore->related_cpus,
+ &uncore->related_cpus, get_cpumask(cpu));
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int get_package_id(int cpu)
+{
+ return topology_physical_package_id(cpu);
+}
+
+static const struct cpumask *get_package_cpumask(int cpu)
+{
+ return topology_core_cpumask(cpu);
+}
+
+static int get_cluster_id(int cpu)
+{
+ return topology_cluster_id(cpu);
+}
+
+static const struct cpumask *get_cluster_cpumask(int cpu)
+{
+ return topology_cluster_cpumask(cpu);
+}
+
+static int hisi_uncore_mark_related_cpus_wrap(struct hisi_uncore_freq *uncore)
+{
+ int rc;
+
+ cpumask_clear(&uncore->related_cpus);
+
+ rc = hisi_uncore_mark_related_cpus(uncore, "related-package",
+ get_package_id,
+ get_package_cpumask);
+ /* Success, or firmware probably broken */
+ if (!rc || rc != -EINVAL)
+ return rc;
+
+ /* Try another property name if rc == -EINVAL */
+ return hisi_uncore_mark_related_cpus(uncore, "related-cluster",
+ get_cluster_id,
+ get_cluster_cpumask);
+}
+
+static ssize_t related_cpus_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hisi_uncore_freq *uncore = dev_get_drvdata(dev->parent);
+
+ return cpumap_print_to_pagebuf(true, buf, &uncore->related_cpus);
+}
+
+static DEVICE_ATTR_RO(related_cpus);
+
+static struct attribute *hisi_uncore_freq_attrs[] = {
+ &dev_attr_related_cpus.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(hisi_uncore_freq);
+
+static int hisi_uncore_devfreq_register(struct hisi_uncore_freq *uncore)
+{
+ struct devfreq_dev_profile *profile;
+ struct device *dev = uncore->dev;
+ unsigned long freq;
+ u32 data;
+ int rc;
+
+ rc = hisi_uncore_get_cur_freq(dev, &freq);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get plat init freq\n");
+
+ profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
+ if (!profile)
+ return -ENOMEM;
+
+ *profile = (struct devfreq_dev_profile) {
+ .initial_freq = freq,
+ .polling_ms = HUCF_DEFAULT_POLLING_MS,
+ .timer = DEVFREQ_TIMER_DELAYED,
+ .target = hisi_uncore_target,
+ .get_dev_status = hisi_uncore_get_dev_status,
+ .get_cur_freq = hisi_uncore_get_cur_freq,
+ .dev_groups = hisi_uncore_freq_groups,
+ };
+
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_MODE, &data);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get operate mode\n");
+
+ if (data == HUCF_MODE_PLATFORM)
+ uncore->devfreq = devm_devfreq_add_device(dev, profile,
+ hisi_platform_governor.name, NULL);
+ else
+ uncore->devfreq = devm_devfreq_add_device(dev, profile,
+ DEVFREQ_GOV_PERFORMANCE, NULL);
+ if (IS_ERR(uncore->devfreq))
+ return dev_err_probe(dev, PTR_ERR(uncore->devfreq),
+ "Failed to add devfreq device\n");
+
+ return 0;
+}
+
+static int hisi_uncore_freq_probe(struct platform_device *pdev)
+{
+ struct hisi_uncore_freq *uncore;
+ struct device *dev = &pdev->dev;
+ u32 cap;
+ int rc;
+
+ uncore = devm_kzalloc(dev, sizeof(*uncore), GFP_KERNEL);
+ if (!uncore)
+ return -ENOMEM;
+
+ uncore->dev = dev;
+ platform_set_drvdata(pdev, uncore);
+
+ rc = hisi_uncore_init_pcc_chan(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to init PCC channel\n");
+
+ rc = hisi_uncore_init_opp(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to init OPP\n");
+
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_CAP, &cap);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get capability\n");
+
+ uncore->cap = cap;
+
+ rc = hisi_uncore_add_platform_gov(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to add hisi_platform governor\n");
+
+ rc = hisi_uncore_mark_related_cpus_wrap(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to mark related cpus\n");
+
+ rc = hisi_uncore_devfreq_register(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to register devfreq\n");
+
+ return 0;
+}
+
+static const struct acpi_device_id hisi_uncore_freq_acpi_match[] = {
+ { "HISI04F1", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_uncore_freq_acpi_match);
+
+static struct platform_driver hisi_uncore_freq_drv = {
+ .probe = hisi_uncore_freq_probe,
+ .driver = {
+ .name = "hisi_uncore_freq",
+ .acpi_match_table = hisi_uncore_freq_acpi_match,
+ },
+};
+module_platform_driver(hisi_uncore_freq_drv);
+
+MODULE_DESCRIPTION("HiSilicon uncore frequency scaling driver");
+MODULE_AUTHOR("Jie Zhan <zhanjie9@hisilicon.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/imx-bus.c b/drivers/devfreq/imx-bus.c
index a727067980fb..49798f542d68 100644
--- a/drivers/devfreq/imx-bus.c
+++ b/drivers/devfreq/imx-bus.c
@@ -7,7 +7,7 @@
#include <linux/devfreq.h>
#include <linux/device.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -59,7 +59,7 @@ static int imx_bus_init_icc(struct device *dev)
struct imx_bus *priv = dev_get_drvdata(dev);
const char *icc_driver_name;
- if (!of_get_property(dev->of_node, "#interconnect-cells", NULL))
+ if (!of_property_present(dev->of_node, "#interconnect-cells"))
return 0;
if (!IS_ENABLED(CONFIG_INTERCONNECT_IMX)) {
dev_warn(dev, "imx interconnect drivers disabled\n");
diff --git a/drivers/devfreq/imx8m-ddrc.c b/drivers/devfreq/imx8m-ddrc.c
index 16636973eb10..e1348490c8aa 100644
--- a/drivers/devfreq/imx8m-ddrc.c
+++ b/drivers/devfreq/imx8m-ddrc.c
@@ -3,9 +3,9 @@
* Copyright 2019 NXP
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/devfreq.h>
#include <linux/pm_opp.h>
diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c
index e5458ada5197..4c22be728f6a 100644
--- a/drivers/devfreq/mtk-cci-devfreq.c
+++ b/drivers/devfreq/mtk-cci-devfreq.c
@@ -8,7 +8,6 @@
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/regulator/consumer.h>
@@ -87,7 +86,7 @@ static int mtk_ccifreq_set_voltage(struct mtk_ccifreq_drv *drv, int new_voltage)
soc_data->sram_max_volt);
return ret;
}
- } else if (pre_voltage > new_voltage) {
+ } else {
voltage = max(new_voltage,
pre_vsram - soc_data->max_volt_shift);
ret = regulator_set_voltage(drv->proc_reg, voltage,
@@ -127,7 +126,7 @@ static int mtk_ccifreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
struct mtk_ccifreq_drv *drv = dev_get_drvdata(dev);
- struct clk *cci_pll = clk_get_parent(drv->cci_clk);
+ struct clk *cci_pll;
struct dev_pm_opp *opp;
unsigned long opp_rate;
int voltage, pre_voltage, inter_voltage, target_voltage, ret;
@@ -138,17 +137,19 @@ static int mtk_ccifreq_target(struct device *dev, unsigned long *freq,
if (drv->pre_freq == *freq)
return 0;
+ mutex_lock(&drv->reg_lock);
+
inter_voltage = drv->inter_voltage;
+ cci_pll = clk_get_parent(drv->cci_clk);
opp_rate = *freq;
opp = devfreq_recommended_opp(dev, &opp_rate, 1);
if (IS_ERR(opp)) {
dev_err(dev, "failed to find opp for freq: %ld\n", opp_rate);
- return PTR_ERR(opp);
+ ret = PTR_ERR(opp);
+ goto out_unlock;
}
- mutex_lock(&drv->reg_lock);
-
voltage = dev_pm_opp_get_voltage(opp);
dev_pm_opp_put(opp);
@@ -227,9 +228,9 @@ static int mtk_ccifreq_opp_notifier(struct notifier_block *nb,
drv = container_of(nb, struct mtk_ccifreq_drv, opp_nb);
if (event == OPP_EVENT_ADJUST_VOLTAGE) {
+ mutex_lock(&drv->reg_lock);
freq = dev_pm_opp_get_freq(opp);
- mutex_lock(&drv->reg_lock);
/* current opp item is changed */
if (freq == drv->pre_freq) {
volt = dev_pm_opp_get_voltage(opp);
@@ -385,13 +386,14 @@ out_disable_cci_clk:
out_free_resources:
if (regulator_is_enabled(drv->proc_reg))
regulator_disable(drv->proc_reg);
- if (drv->sram_reg && regulator_is_enabled(drv->sram_reg))
+ if (!IS_ERR_OR_NULL(drv->sram_reg) &&
+ regulator_is_enabled(drv->sram_reg))
regulator_disable(drv->sram_reg);
return ret;
}
-static int mtk_ccifreq_remove(struct platform_device *pdev)
+static void mtk_ccifreq_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_ccifreq_drv *drv;
@@ -404,8 +406,6 @@ static int mtk_ccifreq_remove(struct platform_device *pdev)
regulator_disable(drv->proc_reg);
if (drv->sram_reg)
regulator_disable(drv->sram_reg);
-
- return 0;
}
static const struct mtk_ccifreq_platform_data mt8183_platform_data = {
@@ -431,7 +431,7 @@ MODULE_DEVICE_TABLE(of, mtk_ccifreq_machines);
static struct platform_driver mtk_ccifreq_platdrv = {
.probe = mtk_ccifreq_probe,
- .remove = mtk_ccifreq_remove,
+ .remove = mtk_ccifreq_remove,
.driver = {
.name = "mtk-ccifreq",
.of_match_table = mtk_ccifreq_machines,
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index daff40702615..dbdce7636ca5 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -22,6 +22,7 @@
#include <linux/suspend.h>
#include <soc/rockchip/pm_domains.h>
+#include <soc/rockchip/rockchip_grf.h>
#include <soc/rockchip/rk3399_grf.h>
#include <soc/rockchip/rockchip_sip.h>
@@ -381,17 +382,16 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
}
regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
- ddr_type = (val >> RK3399_PMUGRF_DDRTYPE_SHIFT) &
- RK3399_PMUGRF_DDRTYPE_MASK;
+ ddr_type = FIELD_GET(RK3399_PMUGRF_OS_REG2_DDRTYPE, val);
switch (ddr_type) {
- case RK3399_PMUGRF_DDRTYPE_DDR3:
+ case ROCKCHIP_DDRTYPE_DDR3:
data->odt_dis_freq = data->ddr3_odt_dis_freq;
break;
- case RK3399_PMUGRF_DDRTYPE_LPDDR3:
+ case ROCKCHIP_DDRTYPE_LPDDR3:
data->odt_dis_freq = data->lpddr3_odt_dis_freq;
break;
- case RK3399_PMUGRF_DDRTYPE_LPDDR4:
+ case ROCKCHIP_DDRTYPE_LPDDR4:
data->odt_dis_freq = data->lpddr4_odt_dis_freq;
break;
default:
@@ -459,13 +459,11 @@ err_edev:
return ret;
}
-static int rk3399_dmcfreq_remove(struct platform_device *pdev)
+static void rk3399_dmcfreq_remove(struct platform_device *pdev)
{
struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(&pdev->dev);
devfreq_event_disable_edev(dmcfreq->edev);
-
- return 0;
}
static const struct of_device_id rk3399dmc_devfreq_of_match[] = {
diff --git a/drivers/devfreq/sun8i-a33-mbus.c b/drivers/devfreq/sun8i-a33-mbus.c
index 13d32213139f..4bd5657558d6 100644
--- a/drivers/devfreq/sun8i-a33-mbus.c
+++ b/drivers/devfreq/sun8i-a33-mbus.c
@@ -360,7 +360,7 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev)
if (IS_ERR(priv->reg_mbus))
return PTR_ERR(priv->reg_mbus);
- priv->clk_bus = devm_clk_get(dev, "bus");
+ priv->clk_bus = devm_clk_get_enabled(dev, "bus");
if (IS_ERR(priv->clk_bus))
return dev_err_probe(dev, PTR_ERR(priv->clk_bus),
"failed to get bus clock\n");
@@ -375,24 +375,15 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(priv->clk_mbus),
"failed to get mbus clock\n");
- ret = clk_prepare_enable(priv->clk_bus);
- if (ret)
- return dev_err_probe(dev, ret,
- "failed to enable bus clock\n");
-
/* Lock the DRAM clock rate to keep priv->nominal_bw in sync. */
- ret = clk_rate_exclusive_get(priv->clk_dram);
- if (ret) {
- err = "failed to lock dram clock rate\n";
- goto err_disable_bus;
- }
+ ret = devm_clk_rate_exclusive_get(dev, priv->clk_dram);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to lock dram clock rate\n");
/* Lock the MBUS clock rate to keep MBUS_TMR_PERIOD in sync. */
- ret = clk_rate_exclusive_get(priv->clk_mbus);
- if (ret) {
- err = "failed to lock mbus clock rate\n";
- goto err_unlock_dram;
- }
+ ret = devm_clk_rate_exclusive_get(dev, priv->clk_mbus);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to lock mbus clock rate\n");
priv->gov_data.upthreshold = 10;
priv->gov_data.downdifferential = 5;
@@ -405,10 +396,8 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev)
priv->profile.max_state = max_state;
ret = devm_pm_opp_set_clkname(dev, "dram");
- if (ret) {
- err = "failed to add OPP table\n";
- goto err_unlock_mbus;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add OPP table\n");
base_freq = clk_get_rate(clk_get_parent(priv->clk_dram));
for (i = 0; i < max_state; ++i) {
@@ -448,17 +437,11 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev)
err_remove_opps:
dev_pm_opp_remove_all_dynamic(dev);
-err_unlock_mbus:
- clk_rate_exclusive_put(priv->clk_mbus);
-err_unlock_dram:
- clk_rate_exclusive_put(priv->clk_dram);
-err_disable_bus:
- clk_disable_unprepare(priv->clk_bus);
return dev_err_probe(dev, ret, err);
}
-static int sun8i_a33_mbus_remove(struct platform_device *pdev)
+static void sun8i_a33_mbus_remove(struct platform_device *pdev)
{
struct sun8i_a33_mbus *priv = platform_get_drvdata(pdev);
unsigned long initial_freq = priv->profile.initial_freq;
@@ -472,11 +455,6 @@ static int sun8i_a33_mbus_remove(struct platform_device *pdev)
dev_warn(dev, "failed to restore DRAM frequency: %d\n", ret);
dev_pm_opp_remove_all_dynamic(dev);
- clk_rate_exclusive_put(priv->clk_mbus);
- clk_rate_exclusive_put(priv->clk_dram);
- clk_disable_unprepare(priv->clk_bus);
-
- return 0;
}
static const struct sun8i_a33_mbus_variant sun50i_a64_mbus = {
@@ -497,7 +475,7 @@ static SIMPLE_DEV_PM_OPS(sun8i_a33_mbus_pm_ops,
static struct platform_driver sun8i_a33_mbus_driver = {
.probe = sun8i_a33_mbus_probe,
- .remove = sun8i_a33_mbus_remove,
+ .remove = sun8i_a33_mbus_remove,
.driver = {
.name = "sun8i-a33-mbus",
.of_match_table = sun8i_a33_mbus_of_match,
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index 503376b894b6..8b57194ac698 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -9,11 +9,13 @@
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/minmax.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/reset.h>
@@ -21,8 +23,6 @@
#include <soc/tegra/fuse.h>
-#include "governor.h"
-
#define ACTMON_GLB_STATUS 0x0
#define ACTMON_GLB_PERIOD_CTRL 0x4
@@ -326,14 +326,9 @@ static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
unsigned int i;
const struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
- for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
- if (cpu_freq >= ratio->cpu_freq) {
- if (ratio->emc_freq >= tegra->max_freq)
- return tegra->max_freq;
- else
- return ratio->emc_freq;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++)
+ if (cpu_freq >= ratio->cpu_freq)
+ return min(ratio->emc_freq, tegra->max_freq);
return 0;
}