summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/arch_numa.c2
-rw-r--r--drivers/base/arch_topology.c56
-rw-r--r--drivers/base/power/main.c148
-rw-r--r--drivers/base/regmap/internal.h1
-rw-r--r--drivers/base/regmap/regmap-debugfs.c8
-rw-r--r--drivers/base/regmap/regmap-kunit.c60
-rw-r--r--drivers/base/regmap/regmap-ram.c4
-rw-r--r--drivers/base/regmap/regmap-raw-ram.c31
-rw-r--r--drivers/base/regmap/regmap.c2
9 files changed, 197 insertions, 115 deletions
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
index eaa31e567d1e..5b59d133b6af 100644
--- a/drivers/base/arch_numa.c
+++ b/drivers/base/arch_numa.c
@@ -144,7 +144,7 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid)
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
-static int __init early_cpu_to_node(int cpu)
+int __init early_cpu_to_node(int cpu)
{
return cpu_to_node_map[cpu];
}
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index b741b5ba82bd..5aaa0865625d 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
+#include <linux/units.h>
#define CREATE_TRACE_POINTS
#include <trace/events/thermal_pressure.h>
@@ -26,7 +27,8 @@
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant;
-static DEFINE_PER_CPU(u32, freq_factor) = 1;
+DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1;
+EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref);
static bool supports_scale_freq_counters(const struct cpumask *cpus)
{
@@ -170,9 +172,9 @@ DEFINE_PER_CPU(unsigned long, thermal_pressure);
* operating on stale data when hot-plug is used for some CPUs. The
* @capped_freq reflects the currently allowed max CPUs frequency due to
* thermal capping. It might be also a boost frequency value, which is bigger
- * than the internal 'freq_factor' max frequency. In such case the pressure
- * value should simply be removed, since this is an indication that there is
- * no thermal throttling. The @capped_freq must be provided in kHz.
+ * than the internal 'capacity_freq_ref' max frequency. In such case the
+ * pressure value should simply be removed, since this is an indication that
+ * there is no thermal throttling. The @capped_freq must be provided in kHz.
*/
void topology_update_thermal_pressure(const struct cpumask *cpus,
unsigned long capped_freq)
@@ -183,10 +185,7 @@ void topology_update_thermal_pressure(const struct cpumask *cpus,
cpu = cpumask_first(cpus);
max_capacity = arch_scale_cpu_capacity(cpu);
- max_freq = per_cpu(freq_factor, cpu);
-
- /* Convert to MHz scale which is used in 'freq_factor' */
- capped_freq /= 1000;
+ max_freq = arch_scale_freq_ref(cpu);
/*
* Handle properly the boost frequencies, which should simply clean
@@ -279,13 +278,13 @@ void topology_normalize_cpu_scale(void)
capacity_scale = 1;
for_each_possible_cpu(cpu) {
- capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
+ capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
capacity_scale = max(capacity, capacity_scale);
}
pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
for_each_possible_cpu(cpu) {
- capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
+ capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
capacity_scale);
topology_set_cpu_scale(cpu, capacity);
@@ -321,15 +320,15 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
cpu_node, raw_capacity[cpu]);
/*
- * Update freq_factor for calculating early boot cpu capacities.
+ * Update capacity_freq_ref for calculating early boot CPU capacities.
* For non-clk CPU DVFS mechanism, there's no way to get the
* frequency value now, assuming they are running at the same
- * frequency (by keeping the initial freq_factor value).
+ * frequency (by keeping the initial capacity_freq_ref value).
*/
cpu_clk = of_clk_get(cpu_node, 0);
if (!PTR_ERR_OR_ZERO(cpu_clk)) {
- per_cpu(freq_factor, cpu) =
- clk_get_rate(cpu_clk) / 1000;
+ per_cpu(capacity_freq_ref, cpu) =
+ clk_get_rate(cpu_clk) / HZ_PER_KHZ;
clk_put(cpu_clk);
}
} else {
@@ -345,11 +344,16 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
return !ret;
}
+void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate)
+{
+}
+
#ifdef CONFIG_ACPI_CPPC_LIB
#include <acpi/cppc_acpi.h>
void topology_init_cpu_capacity_cppc(void)
{
+ u64 capacity, capacity_scale = 0;
struct cppc_perf_caps perf_caps;
int cpu;
@@ -366,6 +370,10 @@ void topology_init_cpu_capacity_cppc(void)
(perf_caps.highest_perf >= perf_caps.nominal_perf) &&
(perf_caps.highest_perf >= perf_caps.lowest_perf)) {
raw_capacity[cpu] = perf_caps.highest_perf;
+ capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]);
+
+ per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]);
+
pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
cpu, raw_capacity[cpu]);
continue;
@@ -376,7 +384,18 @@ void topology_init_cpu_capacity_cppc(void)
goto exit;
}
- topology_normalize_cpu_scale();
+ for_each_possible_cpu(cpu) {
+ freq_inv_set_max_ratio(cpu,
+ per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
+
+ capacity = raw_capacity[cpu];
+ capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
+ capacity_scale);
+ topology_set_cpu_scale(cpu, capacity);
+ pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
+ cpu, topology_get_cpu_scale(cpu));
+ }
+
schedule_work(&update_topology_flags_work);
pr_debug("cpu_capacity: cpu_capacity initialization done\n");
@@ -410,8 +429,11 @@ init_cpu_capacity_callback(struct notifier_block *nb,
cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
- for_each_cpu(cpu, policy->related_cpus)
- per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
+ for_each_cpu(cpu, policy->related_cpus) {
+ per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq;
+ freq_inv_set_max_ratio(cpu,
+ per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
+ }
if (cpumask_empty(cpus_to_visit)) {
topology_normalize_cpu_scale();
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index f85f3515c258..9c5a5f4dba5a 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -579,7 +579,7 @@ bool dev_pm_skip_resume(struct device *dev)
}
/**
- * device_resume_noirq - Execute a "noirq resume" callback for given device.
+ * __device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
@@ -587,7 +587,7 @@ bool dev_pm_skip_resume(struct device *dev)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
+static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -655,7 +655,13 @@ Skip:
Out:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
- return error;
+
+ if (error) {
+ suspend_stats.failed_resume_noirq++;
+ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
+ }
}
static bool is_async(struct device *dev)
@@ -668,11 +674,15 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
{
reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(func, dev);
+ if (!is_async(dev))
+ return false;
+
+ get_device(dev);
+
+ if (async_schedule_dev_nocall(func, dev))
return true;
- }
+
+ put_device(dev);
return false;
}
@@ -680,15 +690,19 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
-
- error = device_resume_noirq(dev, pm_transition, true);
- if (error)
- pm_dev_err(dev, pm_transition, " async", error);
+ __device_resume_noirq(dev, pm_transition, true);
put_device(dev);
}
+static void device_resume_noirq(struct device *dev)
+{
+ if (dpm_async_fn(dev, async_resume_noirq))
+ return;
+
+ __device_resume_noirq(dev, pm_transition, false);
+}
+
static void dpm_noirq_resume_devices(pm_message_t state)
{
struct device *dev;
@@ -698,14 +712,6 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_lock(&dpm_list_mtx);
pm_transition = state;
- /*
- * Advanced the async threads upfront,
- * in case the starting of async threads is
- * delayed by non-async resuming devices.
- */
- list_for_each_entry(dev, &dpm_noirq_list, power.entry)
- dpm_async_fn(dev, async_resume_noirq);
-
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
get_device(dev);
@@ -713,17 +719,7 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
- if (!is_async(dev)) {
- int error;
-
- error = device_resume_noirq(dev, state, false);
- if (error) {
- suspend_stats.failed_resume_noirq++;
- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " noirq", error);
- }
- }
+ device_resume_noirq(dev);
put_device(dev);
@@ -751,14 +747,14 @@ void dpm_resume_noirq(pm_message_t state)
}
/**
- * device_resume_early - Execute an "early resume" callback for given device.
+ * __device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_resume_early(struct device *dev, pm_message_t state, bool async)
+static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -811,21 +807,31 @@ Out:
pm_runtime_enable(dev);
complete_all(&dev->power.completion);
- return error;
+
+ if (error) {
+ suspend_stats.failed_resume_early++;
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async early" : " early", error);
+ }
}
static void async_resume_early(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
-
- error = device_resume_early(dev, pm_transition, true);
- if (error)
- pm_dev_err(dev, pm_transition, " async", error);
+ __device_resume_early(dev, pm_transition, true);
put_device(dev);
}
+static void device_resume_early(struct device *dev)
+{
+ if (dpm_async_fn(dev, async_resume_early))
+ return;
+
+ __device_resume_early(dev, pm_transition, false);
+}
+
/**
* dpm_resume_early - Execute "early resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
@@ -839,14 +845,6 @@ void dpm_resume_early(pm_message_t state)
mutex_lock(&dpm_list_mtx);
pm_transition = state;
- /*
- * Advanced the async threads upfront,
- * in case the starting of async threads is
- * delayed by non-async resuming devices.
- */
- list_for_each_entry(dev, &dpm_late_early_list, power.entry)
- dpm_async_fn(dev, async_resume_early);
-
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
get_device(dev);
@@ -854,17 +852,7 @@ void dpm_resume_early(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
- if (!is_async(dev)) {
- int error;
-
- error = device_resume_early(dev, state, false);
- if (error) {
- suspend_stats.failed_resume_early++;
- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " early", error);
- }
- }
+ device_resume_early(dev);
put_device(dev);
@@ -888,12 +876,12 @@ void dpm_resume_start(pm_message_t state)
EXPORT_SYMBOL_GPL(dpm_resume_start);
/**
- * device_resume - Execute "resume" callbacks for given device.
+ * __device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
*/
-static int device_resume(struct device *dev, pm_message_t state, bool async)
+static void __device_resume(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -975,20 +963,30 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
TRACE_RESUME(error);
- return error;
+ if (error) {
+ suspend_stats.failed_resume++;
+ dpm_save_failed_step(SUSPEND_RESUME);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async" : "", error);
+ }
}
static void async_resume(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
- error = device_resume(dev, pm_transition, true);
- if (error)
- pm_dev_err(dev, pm_transition, " async", error);
+ __device_resume(dev, pm_transition, true);
put_device(dev);
}
+static void device_resume(struct device *dev)
+{
+ if (dpm_async_fn(dev, async_resume))
+ return;
+
+ __device_resume(dev, pm_transition, false);
+}
+
/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -1008,27 +1006,17 @@ void dpm_resume(pm_message_t state)
pm_transition = state;
async_error = 0;
- list_for_each_entry(dev, &dpm_suspended_list, power.entry)
- dpm_async_fn(dev, async_resume);
-
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
+
get_device(dev);
- if (!is_async(dev)) {
- int error;
- mutex_unlock(&dpm_list_mtx);
+ mutex_unlock(&dpm_list_mtx);
+
+ device_resume(dev);
- error = device_resume(dev, state, false);
- if (error) {
- suspend_stats.failed_resume++;
- dpm_save_failed_step(SUSPEND_RESUME);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, "", error);
- }
+ mutex_lock(&dpm_list_mtx);
- mutex_lock(&dpm_list_mtx);
- }
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 9a9ea514c2d8..583dd5d7d46b 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -318,6 +318,7 @@ struct regmap_ram_data {
bool *read;
bool *written;
enum regmap_endian reg_endian;
+ bool (*noinc_reg)(struct regmap_ram_data *data, unsigned int reg);
};
/*
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index bdd80b73c3e6..fb84cda92a75 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -226,8 +226,8 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
if (*ppos < 0 || !count)
return -EINVAL;
- if (count > (PAGE_SIZE << MAX_ORDER))
- count = PAGE_SIZE << MAX_ORDER;
+ if (count > (PAGE_SIZE << MAX_PAGE_ORDER))
+ count = PAGE_SIZE << MAX_PAGE_ORDER;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
@@ -373,8 +373,8 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
if (*ppos < 0 || !count)
return -EINVAL;
- if (count > (PAGE_SIZE << MAX_ORDER))
- count = PAGE_SIZE << MAX_ORDER;
+ if (count > (PAGE_SIZE << MAX_PAGE_ORDER))
+ count = PAGE_SIZE << MAX_PAGE_ORDER;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index e14cc03a17f6..026bdcb45127 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -1186,6 +1186,65 @@ static void raw_write(struct kunit *test)
regmap_exit(map);
}
+static bool reg_zero(struct device *dev, unsigned int reg)
+{
+ return reg == 0;
+}
+
+static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg)
+{
+ return reg == 0;
+}
+
+static void raw_noinc_write(struct kunit *test)
+{
+ struct raw_test_types *t = (struct raw_test_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val, val_test, val_last;
+ u16 val_array[BLOCK_TEST_SIZE];
+
+ config = raw_regmap_config;
+ config.volatile_reg = reg_zero;
+ config.writeable_noinc_reg = reg_zero;
+ config.readable_noinc_reg = reg_zero;
+
+ map = gen_raw_regmap(&config, t, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ data->noinc_reg = ram_reg_zero;
+
+ get_random_bytes(&val_array, sizeof(val_array));
+
+ if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
+ val_test = be16_to_cpu(val_array[1]) + 100;
+ val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
+ } else {
+ val_test = le16_to_cpu(val_array[1]) + 100;
+ val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
+ }
+
+ /* Put some data into the register following the noinc register */
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test));
+
+ /* Write some data to the noinc register */
+ KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array,
+ sizeof(val_array)));
+
+ /* We should read back the last value written */
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val));
+ KUNIT_ASSERT_EQ(test, val_last, val);
+
+ /* Make sure we didn't touch the register after the noinc register */
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
+ KUNIT_ASSERT_EQ(test, val_test, val);
+
+ regmap_exit(map);
+}
+
static void raw_sync(struct kunit *test)
{
struct raw_test_types *t = (struct raw_test_types *)test->param_value;
@@ -1284,6 +1343,7 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
+ KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
{}
};
diff --git a/drivers/base/regmap/regmap-ram.c b/drivers/base/regmap/regmap-ram.c
index 85f34a5dee04..192d6b131dff 100644
--- a/drivers/base/regmap/regmap-ram.c
+++ b/drivers/base/regmap/regmap-ram.c
@@ -65,12 +65,12 @@ struct regmap *__regmap_init_ram(const struct regmap_config *config,
return ERR_PTR(-EINVAL);
}
- data->read = kcalloc(sizeof(bool), config->max_register + 1,
+ data->read = kcalloc(config->max_register + 1, sizeof(bool),
GFP_KERNEL);
if (!data->read)
return ERR_PTR(-ENOMEM);
- data->written = kcalloc(sizeof(bool), config->max_register + 1,
+ data->written = kcalloc(config->max_register + 1, sizeof(bool),
GFP_KERNEL);
if (!data->written)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/base/regmap/regmap-raw-ram.c b/drivers/base/regmap/regmap-raw-ram.c
index c9b800885f3b..93ae07b503fd 100644
--- a/drivers/base/regmap/regmap-raw-ram.c
+++ b/drivers/base/regmap/regmap-raw-ram.c
@@ -41,10 +41,15 @@ static int regmap_raw_ram_gather_write(void *context,
return -EINVAL;
r = decode_reg(data->reg_endian, reg);
- memcpy(&our_buf[r], val, val_len);
-
- for (i = 0; i < val_len / 2; i++)
- data->written[r + i] = true;
+ if (data->noinc_reg && data->noinc_reg(data, r)) {
+ memcpy(&our_buf[r], val + val_len - 2, 2);
+ data->written[r] = true;
+ } else {
+ memcpy(&our_buf[r], val, val_len);
+
+ for (i = 0; i < val_len / 2; i++)
+ data->written[r + i] = true;
+ }
return 0;
}
@@ -70,10 +75,16 @@ static int regmap_raw_ram_read(void *context,
return -EINVAL;
r = decode_reg(data->reg_endian, reg);
- memcpy(val, &our_buf[r], val_len);
-
- for (i = 0; i < val_len / 2; i++)
- data->read[r + i] = true;
+ if (data->noinc_reg && data->noinc_reg(data, r)) {
+ for (i = 0; i < val_len; i += 2)
+ memcpy(val + i, &our_buf[r], 2);
+ data->read[r] = true;
+ } else {
+ memcpy(val, &our_buf[r], val_len);
+
+ for (i = 0; i < val_len / 2; i++)
+ data->read[r + i] = true;
+ }
return 0;
}
@@ -111,12 +122,12 @@ struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
return ERR_PTR(-EINVAL);
}
- data->read = kcalloc(sizeof(bool), config->max_register + 1,
+ data->read = kcalloc(config->max_register + 1, sizeof(bool),
GFP_KERNEL);
if (!data->read)
return ERR_PTR(-ENOMEM);
- data->written = kcalloc(sizeof(bool), config->max_register + 1,
+ data->written = kcalloc(config->max_register + 1, sizeof(bool),
GFP_KERNEL);
if (!data->written)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index ea6157747199..6db77d8e45f9 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -2136,7 +2136,7 @@ static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg,
}
/**
- * regmap_noinc_write(): Write data from a register without incrementing the
+ * regmap_noinc_write(): Write data to a register without incrementing the
* register number
*
* @map: Register map to write to