summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig1
-rw-r--r--drivers/base/Makefile3
-rw-r--r--drivers/base/arch_numa.c230
-rw-r--r--drivers/base/arch_topology.c177
-rw-r--r--drivers/base/attribute_container.c48
-rw-r--r--drivers/base/auxiliary.c42
-rw-r--r--drivers/base/auxiliary_sysfs.c113
-rw-r--r--drivers/base/base.h28
-rw-r--r--drivers/base/bus.c37
-rw-r--r--drivers/base/cacheinfo.c120
-rw-r--r--drivers/base/class.c60
-rw-r--r--drivers/base/component.c4
-rw-r--r--drivers/base/core.c623
-rw-r--r--drivers/base/cpu.c32
-rw-r--r--drivers/base/dd.c65
-rw-r--r--drivers/base/devcoredump.c68
-rw-r--r--drivers/base/devres.c59
-rw-r--r--drivers/base/driver.c21
-rw-r--r--drivers/base/faux.c232
-rw-r--r--drivers/base/firmware_loader/Kconfig7
-rw-r--r--drivers/base/firmware_loader/builtin/main.c2
-rw-r--r--drivers/base/firmware_loader/fallback_table.c8
-rw-r--r--drivers/base/firmware_loader/main.c143
-rw-r--r--drivers/base/firmware_loader/sysfs.c14
-rw-r--r--drivers/base/firmware_loader/sysfs.h2
-rw-r--r--drivers/base/init.c1
-rw-r--r--drivers/base/isa.c2
-rw-r--r--drivers/base/memory.c29
-rw-r--r--drivers/base/module.c50
-rw-r--r--drivers/base/node.c19
-rw-r--r--drivers/base/physical_location.c4
-rw-r--r--drivers/base/platform-msi.c355
-rw-r--r--drivers/base/platform.c28
-rw-r--r--drivers/base/power/common.c203
-rw-r--r--drivers/base/power/main.c317
-rw-r--r--drivers/base/power/qos.c1
-rw-r--r--drivers/base/power/runtime.c36
-rw-r--r--drivers/base/power/sysfs.c18
-rw-r--r--drivers/base/power/wakeirq.c30
-rw-r--r--drivers/base/power/wakeup.c11
-rw-r--r--drivers/base/property.c121
-rw-r--r--drivers/base/regmap/internal.h16
-rw-r--r--drivers/base/regmap/regcache-flat.c4
-rw-r--r--drivers/base/regmap/regcache-maple.c34
-rw-r--r--drivers/base/regmap/regcache-rbtree.c12
-rw-r--r--drivers/base/regmap/regcache.c16
-rw-r--r--drivers/base/regmap/regmap-ac97.c1
-rw-r--r--drivers/base/regmap/regmap-i2c.c4
-rw-r--r--drivers/base/regmap/regmap-i3c.c2
-rw-r--r--drivers/base/regmap/regmap-irq.c50
-rw-r--r--drivers/base/regmap/regmap-kunit.c1261
-rw-r--r--drivers/base/regmap/regmap-mdio.c2
-rw-r--r--drivers/base/regmap/regmap-ram.c6
-rw-r--r--drivers/base/regmap/regmap-raw-ram.c6
-rw-r--r--drivers/base/regmap/regmap-sccb.c1
-rw-r--r--drivers/base/regmap/regmap-sdw-mbq.c221
-rw-r--r--drivers/base/regmap/regmap-sdw.c2
-rw-r--r--drivers/base/regmap/regmap-slimbus.c1
-rw-r--r--drivers/base/regmap/regmap-spi-avmm.c1
-rw-r--r--drivers/base/regmap/regmap-spi.c4
-rw-r--r--drivers/base/regmap/regmap-spmi.c1
-rw-r--r--drivers/base/regmap/regmap-w1.c1
-rw-r--r--drivers/base/regmap/regmap.c181
-rw-r--r--drivers/base/regmap/trace.h18
-rw-r--r--drivers/base/swnode.c14
-rw-r--r--drivers/base/test/Kconfig1
-rw-r--r--drivers/base/test/platform-device-test.c41
-rw-r--r--drivers/base/topology.c64
-rw-r--r--drivers/base/trace.h8
69 files changed, 3456 insertions, 1881 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 2b8fd6bb7da0..064eb52ff7e2 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -226,6 +226,7 @@ config GENERIC_ARCH_TOPOLOGY
config GENERIC_ARCH_NUMA
bool
+ select NUMA_MEMBLKS
help
Enable support for generic NUMA implementation. Currently, RISC-V
and ARM64 use it.
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 3079bfe53d04..8074a10183dc 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,7 +6,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o \
topology.o container.o property.o cacheinfo.o \
- swnode.o
+ swnode.o faux.o
obj-$(CONFIG_AUXILIARY_BUS) += auxiliary.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-y += power/
@@ -16,6 +16,7 @@ obj-$(CONFIG_NUMA) += node.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory.o
ifeq ($(CONFIG_SYSFS),y)
obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_AUXILIARY_BUS) += auxiliary_sysfs.o
endif
obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
obj-$(CONFIG_REGMAP) += regmap/
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
index 5b59d133b6af..c99f2ab105e5 100644
--- a/drivers/base/arch_numa.c
+++ b/drivers/base/arch_numa.c
@@ -12,16 +12,12 @@
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/numa_memblks.h>
#include <asm/sections.h>
-struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL(node_data);
-nodemask_t numa_nodes_parsed __initdata;
static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE };
-static int numa_distance_cnt;
-static u8 *numa_distance;
bool numa_off;
static __init int numa_parse_early_param(char *opt)
@@ -30,6 +26,8 @@ static __init int numa_parse_early_param(char *opt)
return -EINVAL;
if (str_has_prefix(opt, "off"))
numa_off = true;
+ if (!strncmp(opt, "fake=", 5))
+ return numa_emu_cmdline(opt + 5);
return 0;
}
@@ -61,6 +59,7 @@ EXPORT_SYMBOL(cpumask_of_node);
#endif
+#ifndef CONFIG_NUMA_EMU
static void numa_update_cpu(unsigned int cpu, bool remove)
{
int nid = cpu_to_node(cpu);
@@ -83,6 +82,7 @@ void numa_remove_cpu(unsigned int cpu)
{
numa_update_cpu(cpu, true);
}
+#endif
void numa_clear_node(unsigned int cpu)
{
@@ -144,7 +144,7 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid)
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
-int __init early_cpu_to_node(int cpu)
+int early_cpu_to_node(int cpu)
{
return cpu_to_node_map[cpu];
}
@@ -189,174 +189,28 @@ void __init setup_per_cpu_areas(void)
}
#endif
-/**
- * numa_add_memblk() - Set node id to memblk
- * @nid: NUMA node ID of the new memblk
- * @start: Start address of the new memblk
- * @end: End address of the new memblk
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int __init numa_add_memblk(int nid, u64 start, u64 end)
-{
- int ret;
-
- ret = memblock_set_node(start, (end - start), &memblock.memory, nid);
- if (ret < 0) {
- pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n",
- start, (end - 1), nid);
- return ret;
- }
-
- node_set(nid, numa_nodes_parsed);
- return ret;
-}
-
/*
* Initialize NODE_DATA for a node on the local memory
*/
static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
{
- const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
- u64 nd_pa;
- void *nd;
- int tnid;
-
if (start_pfn >= end_pfn)
pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
- nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
- if (!nd_pa)
- panic("Cannot allocate %zu bytes for node %d data\n",
- nd_size, nid);
+ alloc_node_data(nid);
- nd = __va(nd_pa);
-
- /* report and initialize */
- pr_info("NODE_DATA [mem %#010Lx-%#010Lx]\n",
- nd_pa, nd_pa + nd_size - 1);
- tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
- if (tnid != nid)
- pr_info("NODE_DATA(%d) on node %d\n", nid, tnid);
-
- node_data[nid] = nd;
- memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
NODE_DATA(nid)->node_id = nid;
NODE_DATA(nid)->node_start_pfn = start_pfn;
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
}
-/*
- * numa_free_distance
- *
- * The current table is freed.
- */
-void __init numa_free_distance(void)
-{
- size_t size;
-
- if (!numa_distance)
- return;
-
- size = numa_distance_cnt * numa_distance_cnt *
- sizeof(numa_distance[0]);
-
- memblock_free(numa_distance, size);
- numa_distance_cnt = 0;
- numa_distance = NULL;
-}
-
-/*
- * Create a new NUMA distance table.
- */
-static int __init numa_alloc_distance(void)
-{
- size_t size;
- int i, j;
-
- size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]);
- numa_distance = memblock_alloc(size, PAGE_SIZE);
- if (WARN_ON(!numa_distance))
- return -ENOMEM;
-
- numa_distance_cnt = nr_node_ids;
-
- /* fill with the default distances */
- for (i = 0; i < numa_distance_cnt; i++)
- for (j = 0; j < numa_distance_cnt; j++)
- numa_distance[i * numa_distance_cnt + j] = i == j ?
- LOCAL_DISTANCE : REMOTE_DISTANCE;
-
- pr_debug("Initialized distance table, cnt=%d\n", numa_distance_cnt);
-
- return 0;
-}
-
-/**
- * numa_set_distance() - Set inter node NUMA distance from node to node.
- * @from: the 'from' node to set distance
- * @to: the 'to' node to set distance
- * @distance: NUMA distance
- *
- * Set the distance from node @from to @to to @distance.
- * If distance table doesn't exist, a warning is printed.
- *
- * If @from or @to is higher than the highest known node or lower than zero
- * or @distance doesn't make sense, the call is ignored.
- */
-void __init numa_set_distance(int from, int to, int distance)
-{
- if (!numa_distance) {
- pr_warn_once("Warning: distance table not allocated yet\n");
- return;
- }
-
- if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
- from < 0 || to < 0) {
- pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
- from, to, distance);
- return;
- }
-
- if ((u8)distance != distance ||
- (from == to && distance != LOCAL_DISTANCE)) {
- pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
- from, to, distance);
- return;
- }
-
- numa_distance[from * numa_distance_cnt + to] = distance;
-}
-
-/*
- * Return NUMA distance @from to @to
- */
-int __node_distance(int from, int to)
-{
- if (from >= numa_distance_cnt || to >= numa_distance_cnt)
- return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
- return numa_distance[from * numa_distance_cnt + to];
-}
-EXPORT_SYMBOL(__node_distance);
-
static int __init numa_register_nodes(void)
{
int nid;
- struct memblock_region *mblk;
-
- /* Check that valid nid is set to memblks */
- for_each_mem_region(mblk) {
- int mblk_nid = memblock_get_region_node(mblk);
- phys_addr_t start = mblk->base;
- phys_addr_t end = mblk->base + mblk->size - 1;
-
- if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) {
- pr_warn("Warning: invalid memblk node %d [mem %pap-%pap]\n",
- mblk_nid, &start, &end);
- return -EINVAL;
- }
- }
+
+ /* Check the validity of the memblock/node mapping */
+ if (!memblock_validate_numa_coverage(0))
+ return -EINVAL;
/* Finally register nodes. */
for_each_node_mask(nid, numa_nodes_parsed) {
@@ -381,11 +235,7 @@ static int __init numa_init(int (*init_func)(void))
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
- ret = numa_alloc_distance();
- if (ret < 0)
- return ret;
-
- ret = init_func();
+ ret = numa_memblks_init(init_func, /* memblock_force_top_down */ false);
if (ret < 0)
goto out_free_distance;
@@ -403,7 +253,7 @@ static int __init numa_init(int (*init_func)(void))
return 0;
out_free_distance:
- numa_free_distance();
+ numa_reset_distance();
return ret;
}
@@ -433,6 +283,7 @@ static int __init dummy_numa_init(void)
pr_err("NUMA init failed\n");
return ret;
}
+ node_set(0, numa_nodes_parsed);
numa_off = true;
return 0;
@@ -445,7 +296,7 @@ static int __init arch_acpi_numa_init(void)
ret = acpi_numa_init();
if (ret) {
- pr_info("Failed to initialise from firmware\n");
+ pr_debug("Failed to initialise from firmware\n");
return ret;
}
@@ -475,3 +326,54 @@ void __init arch_numa_init(void)
numa_init(dummy_numa_init);
}
+
+#ifdef CONFIG_NUMA_EMU
+void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys,
+ unsigned int nr_emu_nids)
+{
+ int i, j;
+
+ /*
+ * Transform cpu_to_node_map table to use emulated nids by
+ * reverse-mapping phys_nid. The maps should always exist but fall
+ * back to zero just in case.
+ */
+ for (i = 0; i < ARRAY_SIZE(cpu_to_node_map); i++) {
+ if (cpu_to_node_map[i] == NUMA_NO_NODE)
+ continue;
+ for (j = 0; j < nr_emu_nids; j++)
+ if (cpu_to_node_map[i] == emu_nid_to_phys[j])
+ break;
+ cpu_to_node_map[i] = j < nr_emu_nids ? j : 0;
+ }
+}
+
+u64 __init numa_emu_dma_end(void)
+{
+ return memblock_start_of_DRAM() + SZ_4G;
+}
+
+void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable)
+{
+ struct cpumask *mask;
+
+ if (node == NUMA_NO_NODE)
+ return;
+
+ mask = node_to_cpumask_map[node];
+ if (!cpumask_available(mask)) {
+ pr_err("node_to_cpumask_map[%i] NULL\n", node);
+ dump_stack();
+ return;
+ }
+
+ if (enable)
+ cpumask_set_cpu(cpu, mask);
+ else
+ cpumask_clear_cpu(cpu, mask);
+
+ pr_debug("%s cpu %d node %d: mask now %*pbl\n",
+ enable ? "numa_add_cpu" : "numa_remove_cpu",
+ cpu, node, cpumask_pr_args(mask));
+}
+#endif /* CONFIG_NUMA_EMU */
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 024b78a0cfc1..3ebe77566788 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -8,6 +8,7 @@
#include <linux/acpi.h>
#include <linux/cacheinfo.h>
+#include <linux/cleanup.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
@@ -22,7 +23,7 @@
#include <linux/units.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/thermal_pressure.h>
+#include <trace/events/hw_pressure.h>
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
@@ -160,26 +161,26 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
per_cpu(cpu_scale, cpu) = capacity;
}
-DEFINE_PER_CPU(unsigned long, thermal_pressure);
+DEFINE_PER_CPU(unsigned long, hw_pressure);
/**
- * topology_update_thermal_pressure() - Update thermal pressure for CPUs
+ * topology_update_hw_pressure() - Update HW pressure for CPUs
* @cpus : The related CPUs for which capacity has been reduced
* @capped_freq : The maximum allowed frequency that CPUs can run at
*
- * Update the value of thermal pressure for all @cpus in the mask. The
+ * Update the value of HW pressure for all @cpus in the mask. The
* cpumask should include all (online+offline) affected CPUs, to avoid
* operating on stale data when hot-plug is used for some CPUs. The
* @capped_freq reflects the currently allowed max CPUs frequency due to
- * thermal capping. It might be also a boost frequency value, which is bigger
+ * HW capping. It might be also a boost frequency value, which is bigger
* than the internal 'capacity_freq_ref' max frequency. In such case the
* pressure value should simply be removed, since this is an indication that
- * there is no thermal throttling. The @capped_freq must be provided in kHz.
+ * there is no HW throttling. The @capped_freq must be provided in kHz.
*/
-void topology_update_thermal_pressure(const struct cpumask *cpus,
+void topology_update_hw_pressure(const struct cpumask *cpus,
unsigned long capped_freq)
{
- unsigned long max_capacity, capacity, th_pressure;
+ unsigned long max_capacity, capacity, pressure;
u32 max_freq;
int cpu;
@@ -189,21 +190,21 @@ void topology_update_thermal_pressure(const struct cpumask *cpus,
/*
* Handle properly the boost frequencies, which should simply clean
- * the thermal pressure value.
+ * the HW pressure value.
*/
if (max_freq <= capped_freq)
capacity = max_capacity;
else
capacity = mult_frac(max_capacity, capped_freq, max_freq);
- th_pressure = max_capacity - capacity;
+ pressure = max_capacity - capacity;
- trace_thermal_pressure_update(cpu, th_pressure);
+ trace_hw_pressure_update(cpu, pressure);
for_each_cpu(cpu, cpus)
- WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
+ WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure);
}
-EXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
+EXPORT_SYMBOL_GPL(topology_update_hw_pressure);
static ssize_t cpu_capacity_show(struct device *dev,
struct device_attribute *attr,
@@ -365,7 +366,7 @@ void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate)
#ifdef CONFIG_ACPI_CPPC_LIB
#include <acpi/cppc_acpi.h>
-void topology_init_cpu_capacity_cppc(void)
+static inline void topology_init_cpu_capacity_cppc(void)
{
u64 capacity, capacity_scale = 0;
struct cppc_perf_caps perf_caps;
@@ -416,6 +417,10 @@ void topology_init_cpu_capacity_cppc(void)
exit:
free_raw_capacity();
}
+void acpi_processor_init_invariance_cppc(void)
+{
+ topology_init_cpu_capacity_cppc();
+}
#endif
#ifdef CONFIG_CPU_FREQ
@@ -513,10 +518,10 @@ core_initcall(free_raw_capacity);
*/
static int __init get_cpu_for_node(struct device_node *node)
{
- struct device_node *cpu_node;
int cpu;
+ struct device_node *cpu_node __free(device_node) =
+ of_parse_phandle(node, "cpu", 0);
- cpu_node = of_parse_phandle(node, "cpu", 0);
if (!cpu_node)
return -1;
@@ -527,7 +532,6 @@ static int __init get_cpu_for_node(struct device_node *node)
pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
cpu_node, cpumask_pr_args(cpu_possible_mask));
- of_node_put(cpu_node);
return cpu;
}
@@ -538,28 +542,28 @@ static int __init parse_core(struct device_node *core, int package_id,
bool leaf = true;
int i = 0;
int cpu;
- struct device_node *t;
do {
snprintf(name, sizeof(name), "thread%d", i);
- t = of_get_child_by_name(core, name);
- if (t) {
- leaf = false;
- cpu = get_cpu_for_node(t);
- if (cpu >= 0) {
- cpu_topology[cpu].package_id = package_id;
- cpu_topology[cpu].cluster_id = cluster_id;
- cpu_topology[cpu].core_id = core_id;
- cpu_topology[cpu].thread_id = i;
- } else if (cpu != -ENODEV) {
- pr_err("%pOF: Can't get CPU for thread\n", t);
- of_node_put(t);
- return -EINVAL;
- }
- of_node_put(t);
+ struct device_node *t __free(device_node) =
+ of_get_child_by_name(core, name);
+
+ if (!t)
+ break;
+
+ leaf = false;
+ cpu = get_cpu_for_node(t);
+ if (cpu >= 0) {
+ cpu_topology[cpu].package_id = package_id;
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ cpu_topology[cpu].thread_id = i;
+ } else if (cpu != -ENODEV) {
+ pr_err("%pOF: Can't get CPU for thread\n", t);
+ return -EINVAL;
}
i++;
- } while (t);
+ } while (1);
cpu = get_cpu_for_node(core);
if (cpu >= 0) {
@@ -586,7 +590,6 @@ static int __init parse_cluster(struct device_node *cluster, int package_id,
char name[20];
bool leaf = true;
bool has_cores = false;
- struct device_node *c;
int core_id = 0;
int i, ret;
@@ -598,49 +601,50 @@ static int __init parse_cluster(struct device_node *cluster, int package_id,
i = 0;
do {
snprintf(name, sizeof(name), "cluster%d", i);
- c = of_get_child_by_name(cluster, name);
- if (c) {
- leaf = false;
- ret = parse_cluster(c, package_id, i, depth + 1);
- if (depth > 0)
- pr_warn("Topology for clusters of clusters not yet supported\n");
- of_node_put(c);
- if (ret != 0)
- return ret;
- }
+ struct device_node *c __free(device_node) =
+ of_get_child_by_name(cluster, name);
+
+ if (!c)
+ break;
+
+ leaf = false;
+ ret = parse_cluster(c, package_id, i, depth + 1);
+ if (depth > 0)
+ pr_warn("Topology for clusters of clusters not yet supported\n");
+ if (ret != 0)
+ return ret;
i++;
- } while (c);
+ } while (1);
/* Now check for cores */
i = 0;
do {
snprintf(name, sizeof(name), "core%d", i);
- c = of_get_child_by_name(cluster, name);
- if (c) {
- has_cores = true;
-
- if (depth == 0) {
- pr_err("%pOF: cpu-map children should be clusters\n",
- c);
- of_node_put(c);
- return -EINVAL;
- }
+ struct device_node *c __free(device_node) =
+ of_get_child_by_name(cluster, name);
- if (leaf) {
- ret = parse_core(c, package_id, cluster_id,
- core_id++);
- } else {
- pr_err("%pOF: Non-leaf cluster with core %s\n",
- cluster, name);
- ret = -EINVAL;
- }
+ if (!c)
+ break;
- of_node_put(c);
+ has_cores = true;
+
+ if (depth == 0) {
+ pr_err("%pOF: cpu-map children should be clusters\n", c);
+ return -EINVAL;
+ }
+
+ if (leaf) {
+ ret = parse_core(c, package_id, cluster_id, core_id++);
if (ret != 0)
return ret;
+ } else {
+ pr_err("%pOF: Non-leaf cluster with core %s\n",
+ cluster, name);
+ return -EINVAL;
}
+
i++;
- } while (c);
+ } while (1);
if (leaf && !has_cores)
pr_warn("%pOF: empty cluster\n", cluster);
@@ -651,22 +655,24 @@ static int __init parse_cluster(struct device_node *cluster, int package_id,
static int __init parse_socket(struct device_node *socket)
{
char name[20];
- struct device_node *c;
bool has_socket = false;
int package_id = 0, ret;
do {
snprintf(name, sizeof(name), "socket%d", package_id);
- c = of_get_child_by_name(socket, name);
- if (c) {
- has_socket = true;
- ret = parse_cluster(c, package_id, -1, 0);
- of_node_put(c);
- if (ret != 0)
- return ret;
- }
+ struct device_node *c __free(device_node) =
+ of_get_child_by_name(socket, name);
+
+ if (!c)
+ break;
+
+ has_socket = true;
+ ret = parse_cluster(c, package_id, -1, 0);
+ if (ret != 0)
+ return ret;
+
package_id++;
- } while (c);
+ } while (1);
if (!has_socket)
ret = parse_cluster(socket, 0, -1, 0);
@@ -676,11 +682,11 @@ static int __init parse_socket(struct device_node *socket)
static int __init parse_dt_topology(void)
{
- struct device_node *cn, *map;
int ret = 0;
int cpu;
+ struct device_node *cn __free(device_node) =
+ of_find_node_by_path("/cpus");
- cn = of_find_node_by_path("/cpus");
if (!cn) {
pr_err("No CPU information found in DT\n");
return 0;
@@ -690,13 +696,15 @@ static int __init parse_dt_topology(void)
* When topology is provided cpu-map is essentially a root
* cluster with restricted subnodes.
*/
- map = of_get_child_by_name(cn, "cpu-map");
+ struct device_node *map __free(device_node) =
+ of_get_child_by_name(cn, "cpu-map");
+
if (!map)
- goto out;
+ return ret;
ret = parse_socket(map);
if (ret != 0)
- goto out_map;
+ return ret;
topology_normalize_cpu_scale();
@@ -706,14 +714,9 @@ static int __init parse_dt_topology(void)
*/
for_each_possible_cpu(cpu)
if (cpu_topology[cpu].package_id < 0) {
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-out_map:
- of_node_put(map);
-out:
- of_node_put(cn);
return ret;
}
#endif
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index 01ef796c2055..b6f941a6ab69 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -346,8 +346,7 @@ attribute_container_device_trigger_safe(struct device *dev,
* @fn: the function to execute for each classdev.
*
* This function is for executing a trigger when you need to know both
- * the container and the classdev. If you only care about the
- * container, then use attribute_container_trigger() instead.
+ * the container and the classdev.
*/
void
attribute_container_device_trigger(struct device *dev,
@@ -379,33 +378,6 @@ attribute_container_device_trigger(struct device *dev,
}
/**
- * attribute_container_trigger - trigger a function for each matching container
- *
- * @dev: The generic device to activate the trigger for
- * @fn: the function to trigger
- *
- * This routine triggers a function that only needs to know the
- * matching containers (not the classdev) associated with a device.
- * It is more lightweight than attribute_container_device_trigger, so
- * should be used in preference unless the triggering function
- * actually needs to know the classdev.
- */
-void
-attribute_container_trigger(struct device *dev,
- int (*fn)(struct attribute_container *,
- struct device *))
-{
- struct attribute_container *cont;
-
- mutex_lock(&attribute_container_mutex);
- list_for_each_entry(cont, &attribute_container_list, node) {
- if (cont->match(cont, dev))
- fn(cont, dev);
- }
- mutex_unlock(&attribute_container_mutex);
-}
-
-/**
* attribute_container_add_attrs - add attributes
*
* @classdev: The class device
@@ -459,24 +431,6 @@ attribute_container_add_class_device(struct device *classdev)
}
/**
- * attribute_container_add_class_device_adapter - simple adapter for triggers
- *
- * @cont: the container to register.
- * @dev: the generic device to activate the trigger for
- * @classdev: the class device to add
- *
- * This function is identical to attribute_container_add_class_device except
- * that it is designed to be called from the triggers
- */
-int
-attribute_container_add_class_device_adapter(struct attribute_container *cont,
- struct device *dev,
- struct device *classdev)
-{
- return attribute_container_add_class_device(classdev);
-}
-
-/**
* attribute_container_remove_attrs - remove any attribute files
*
* @classdev: The class device to remove the files from
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index d3a2c40c2f12..afa4df4c5a3f 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -92,7 +92,7 @@
* Auxiliary devices are created and registered by a subsystem-level core
* device that needs to break up its functionality into smaller fragments. One
* way to extend the scope of an auxiliary_device is to encapsulate it within a
- * domain- pecific structure defined by the parent device. This structure
+ * domain-specific structure defined by the parent device. This structure
* contains the auxiliary_device and any associated shared data/callbacks
* needed to establish the connection with the parent.
*
@@ -177,10 +177,10 @@ static const struct auxiliary_device_id *auxiliary_match_id(const struct auxilia
return NULL;
}
-static int auxiliary_match(struct device *dev, struct device_driver *drv)
+static int auxiliary_match(struct device *dev, const struct device_driver *drv)
{
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
- struct auxiliary_driver *auxdrv = to_auxiliary_drv(drv);
+ const struct auxiliary_driver *auxdrv = to_auxiliary_drv(drv);
return !!auxiliary_match_id(auxdrv->id_table, auxdev);
}
@@ -203,7 +203,7 @@ static const struct dev_pm_ops auxiliary_dev_pm_ops = {
static int auxiliary_bus_probe(struct device *dev)
{
- struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
+ const struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
int ret;
@@ -222,7 +222,7 @@ static int auxiliary_bus_probe(struct device *dev)
static void auxiliary_bus_remove(struct device *dev)
{
- struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
+ const struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
if (auxdrv->remove)
@@ -232,7 +232,7 @@ static void auxiliary_bus_remove(struct device *dev)
static void auxiliary_bus_shutdown(struct device *dev)
{
- struct auxiliary_driver *auxdrv = NULL;
+ const struct auxiliary_driver *auxdrv = NULL;
struct auxiliary_device *auxdev;
if (dev->driver) {
@@ -287,6 +287,7 @@ int auxiliary_device_init(struct auxiliary_device *auxdev)
dev->bus = &auxiliary_bus_type;
device_initialize(&auxdev->dev);
+ mutex_init(&auxdev->sysfs.lock);
return 0;
}
EXPORT_SYMBOL_GPL(auxiliary_device_init);
@@ -335,35 +336,6 @@ int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname)
EXPORT_SYMBOL_GPL(__auxiliary_device_add);
/**
- * auxiliary_find_device - auxiliary device iterator for locating a particular device.
- * @start: Device to begin with
- * @data: Data to pass to match function
- * @match: Callback function to check device
- *
- * This function returns a reference to a device that is 'found'
- * for later use, as determined by the @match callback.
- *
- * The reference returned should be released with put_device().
- *
- * The callback should return 0 if the device doesn't match and non-zero
- * if it does. If the callback returns non-zero, this function will
- * return to the caller and not iterate over any more devices.
- */
-struct auxiliary_device *auxiliary_find_device(struct device *start,
- const void *data,
- int (*match)(struct device *dev, const void *data))
-{
- struct device *dev;
-
- dev = bus_find_device(&auxiliary_bus_type, start, data, match);
- if (!dev)
- return NULL;
-
- return to_auxiliary_dev(dev);
-}
-EXPORT_SYMBOL_GPL(auxiliary_find_device);
-
-/**
* __auxiliary_driver_register - register a driver for auxiliary bus devices
* @auxdrv: auxiliary_driver structure
* @owner: owning module/driver
diff --git a/drivers/base/auxiliary_sysfs.c b/drivers/base/auxiliary_sysfs.c
new file mode 100644
index 000000000000..754f21730afd
--- /dev/null
+++ b/drivers/base/auxiliary_sysfs.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/slab.h>
+
+#define AUXILIARY_MAX_IRQ_NAME 11
+
+struct auxiliary_irq_info {
+ struct device_attribute sysfs_attr;
+ char name[AUXILIARY_MAX_IRQ_NAME];
+};
+
+static struct attribute *auxiliary_irq_attrs[] = {
+ NULL
+};
+
+static const struct attribute_group auxiliary_irqs_group = {
+ .name = "irqs",
+ .attrs = auxiliary_irq_attrs,
+};
+
+static int auxiliary_irq_dir_prepare(struct auxiliary_device *auxdev)
+{
+ int ret = 0;
+
+ guard(mutex)(&auxdev->sysfs.lock);
+ if (auxdev->sysfs.irq_dir_exists)
+ return 0;
+
+ ret = devm_device_add_group(&auxdev->dev, &auxiliary_irqs_group);
+ if (ret)
+ return ret;
+
+ auxdev->sysfs.irq_dir_exists = true;
+ xa_init(&auxdev->sysfs.irqs);
+ return 0;
+}
+
+/**
+ * auxiliary_device_sysfs_irq_add - add a sysfs entry for the given IRQ
+ * @auxdev: auxiliary bus device to add the sysfs entry.
+ * @irq: The associated interrupt number.
+ *
+ * This function should be called after auxiliary device have successfully
+ * received the irq.
+ * The driver is responsible to add a unique irq for the auxiliary device. The
+ * driver can invoke this function from multiple thread context safely for
+ * unique irqs of the auxiliary devices. The driver must not invoke this API
+ * multiple times if the irq is already added previously.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int auxiliary_device_sysfs_irq_add(struct auxiliary_device *auxdev, int irq)
+{
+ struct auxiliary_irq_info *info __free(kfree) = NULL;
+ struct device *dev = &auxdev->dev;
+ int ret;
+
+ ret = auxiliary_irq_dir_prepare(auxdev);
+ if (ret)
+ return ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ sysfs_attr_init(&info->sysfs_attr.attr);
+ snprintf(info->name, AUXILIARY_MAX_IRQ_NAME, "%d", irq);
+
+ ret = xa_insert(&auxdev->sysfs.irqs, irq, info, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ info->sysfs_attr.attr.name = info->name;
+ ret = sysfs_add_file_to_group(&dev->kobj, &info->sysfs_attr.attr,
+ auxiliary_irqs_group.name);
+ if (ret)
+ goto sysfs_add_err;
+
+ xa_store(&auxdev->sysfs.irqs, irq, no_free_ptr(info), GFP_KERNEL);
+ return 0;
+
+sysfs_add_err:
+ xa_erase(&auxdev->sysfs.irqs, irq);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(auxiliary_device_sysfs_irq_add);
+
+/**
+ * auxiliary_device_sysfs_irq_remove - remove a sysfs entry for the given IRQ
+ * @auxdev: auxiliary bus device to add the sysfs entry.
+ * @irq: the IRQ to remove.
+ *
+ * This function should be called to remove an IRQ sysfs entry.
+ * The driver must invoke this API when IRQ is released by the device.
+ */
+void auxiliary_device_sysfs_irq_remove(struct auxiliary_device *auxdev, int irq)
+{
+ struct auxiliary_irq_info *info __free(kfree) = xa_load(&auxdev->sysfs.irqs, irq);
+ struct device *dev = &auxdev->dev;
+
+ if (!info) {
+ dev_err(&auxdev->dev, "IRQ %d doesn't exist\n", irq);
+ return;
+ }
+ sysfs_remove_file_from_group(&dev->kobj, &info->sysfs_attr.attr,
+ auxiliary_irqs_group.name);
+ xa_erase(&auxdev->sysfs.irqs, irq);
+}
+EXPORT_SYMBOL_GPL(auxiliary_device_sysfs_irq_remove);
diff --git a/drivers/base/base.h b/drivers/base/base.h
index eb4c0ace9242..0042e4774b0c 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -112,7 +112,7 @@ struct device_private {
struct klist_node knode_bus;
struct klist_node knode_class;
struct list_head deferred_probe;
- struct device_driver *async_driver;
+ const struct device_driver *async_driver;
char *deferred_probe_reason;
struct device *device;
u8 dead:1;
@@ -137,6 +137,7 @@ int hypervisor_init(void);
static inline int hypervisor_init(void) { return 0; }
#endif
int platform_bus_init(void);
+int faux_bus_init(void);
void cpu_dev_init(void);
void container_dev_init(void);
#ifdef CONFIG_AUXILIARY_BUS
@@ -145,7 +146,7 @@ void auxiliary_bus_init(void);
static inline void auxiliary_bus_init(void) { }
#endif
-struct kobject *virtual_device_parent(struct device *dev);
+struct kobject *virtual_device_parent(void);
int bus_add_device(struct device *dev);
void bus_probe_device(struct device *dev);
@@ -155,13 +156,13 @@ bool bus_is_registered(const struct bus_type *bus);
int bus_add_driver(struct device_driver *drv);
void bus_remove_driver(struct device_driver *drv);
-void device_release_driver_internal(struct device *dev, struct device_driver *drv,
+void device_release_driver_internal(struct device *dev, const struct device_driver *drv,
struct device *parent);
-void driver_detach(struct device_driver *drv);
+void driver_detach(const struct device_driver *drv);
void driver_deferred_probe_del(struct device *dev);
void device_set_deferred_probe_reason(const struct device *dev, struct va_format *vaf);
-static inline int driver_match_device(struct device_driver *drv,
+static inline int driver_match_device(const struct device_driver *drv,
struct device *dev)
{
return drv->bus->match ? drv->bus->match(dev, drv) : 1;
@@ -175,8 +176,8 @@ static inline void dev_sync_state(struct device *dev)
dev->driver->sync_state(dev);
}
-int driver_add_groups(struct device_driver *drv, const struct attribute_group **groups);
-void driver_remove_groups(struct device_driver *drv, const struct attribute_group **groups);
+int driver_add_groups(const struct device_driver *drv, const struct attribute_group **groups);
+void driver_remove_groups(const struct device_driver *drv, const struct attribute_group **groups);
void device_driver_detach(struct device *dev);
int devres_release_all(struct device *dev);
@@ -192,11 +193,14 @@ extern struct kset *devices_kset;
void devices_kset_move_last(struct device *dev);
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
-void module_add_driver(struct module *mod, struct device_driver *drv);
-void module_remove_driver(struct device_driver *drv);
+int module_add_driver(struct module *mod, const struct device_driver *drv);
+void module_remove_driver(const struct device_driver *drv);
#else
-static inline void module_add_driver(struct module *mod,
- struct device_driver *drv) { }
+static inline int module_add_driver(struct module *mod,
+ struct device_driver *drv)
+{
+ return 0;
+}
static inline void module_remove_driver(struct device_driver *drv) { }
#endif
@@ -207,7 +211,7 @@ static inline int devtmpfs_init(void) { return 0; }
#endif
#ifdef CONFIG_BLOCK
-extern struct class block_class;
+extern const struct class block_class;
static inline bool is_blockdev(struct device *dev)
{
return dev->class == &block_class;
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index daee55c9b2d9..6b9e65a42cd2 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -152,7 +152,8 @@ static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr,
{
struct bus_attribute *bus_attr = to_bus_attr(attr);
struct subsys_private *subsys_priv = to_subsys_private(kobj);
- ssize_t ret = 0;
+ /* return -EIO for reading a bus attribute without show() */
+ ssize_t ret = -EIO;
if (bus_attr->show)
ret = bus_attr->show(subsys_priv->bus, buf);
@@ -164,7 +165,8 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
{
struct bus_attribute *bus_attr = to_bus_attr(attr);
struct subsys_private *subsys_priv = to_subsys_private(kobj);
- ssize_t ret = 0;
+ /* return -EIO for writing a bus attribute without store() */
+ ssize_t ret = -EIO;
if (bus_attr->store)
ret = bus_attr->store(subsys_priv->bus, buf, count);
@@ -352,7 +354,7 @@ static struct device *next_device(struct klist_iter *i)
* count in the supplied callback.
*/
int bus_for_each_dev(const struct bus_type *bus, struct device *start,
- void *data, int (*fn)(struct device *, void *))
+ void *data, device_iter_t fn)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
@@ -389,7 +391,7 @@ EXPORT_SYMBOL_GPL(bus_for_each_dev);
*/
struct device *bus_find_device(const struct bus_type *bus,
struct device *start, const void *data,
- int (*match)(struct device *dev, const void *data))
+ device_match_t match)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
@@ -400,9 +402,12 @@ struct device *bus_find_device(const struct bus_type *bus,
klist_iter_init_node(&sp->klist_devices, &i,
(start ? &start->p->knode_bus : NULL));
- while ((dev = next_device(&i)))
- if (match(dev, data) && get_device(dev))
+ while ((dev = next_device(&i))) {
+ if (match(dev, data)) {
+ get_device(dev);
break;
+ }
+ }
klist_iter_exit(&i);
subsys_put(sp);
return dev;
@@ -674,7 +679,12 @@ int bus_add_driver(struct device_driver *drv)
if (error)
goto out_del_list;
}
- module_add_driver(drv->owner, drv);
+ error = module_add_driver(drv->owner, drv);
+ if (error) {
+ printk(KERN_ERR "%s: failed to create module links for %s\n",
+ __func__, drv->name);
+ goto out_detach;
+ }
error = driver_create_file(drv, &driver_attr_uevent);
if (error) {
@@ -699,6 +709,8 @@ int bus_add_driver(struct device_driver *drv)
return 0;
+out_detach:
+ driver_detach(drv);
out_del_list:
klist_del(&priv->knode_bus);
out_unregister:
@@ -913,6 +925,8 @@ bus_devices_fail:
bus_remove_file(bus, &bus_attr_uevent);
bus_uevent_fail:
kset_unregister(&priv->subsys);
+ /* Above kset_unregister() will kfree @priv */
+ priv = NULL;
out:
kfree(priv);
return retval;
@@ -1287,7 +1301,7 @@ int subsys_virtual_register(const struct bus_type *subsys,
{
struct kobject *virtual_dir;
- virtual_dir = virtual_device_parent(NULL);
+ virtual_dir = virtual_device_parent();
if (!virtual_dir)
return -ENOMEM;
@@ -1378,8 +1392,13 @@ int __init buses_init(void)
return -ENOMEM;
system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj);
- if (!system_kset)
+ if (!system_kset) {
+ /* Do error handling here as devices_init() do */
+ kset_unregister(bus_kset);
+ bus_kset = NULL;
+ pr_err("%s: failed to create and add kset 'bus'\n", __func__);
return -ENOMEM;
+ }
return 0;
}
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index f1e79263fe61..cf0d455209d7 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -58,7 +58,7 @@ bool last_level_cache_is_valid(unsigned int cpu)
{
struct cacheinfo *llc;
- if (!cache_leaves(cpu))
+ if (!cache_leaves(cpu) || !per_cpu_cacheinfo(cpu))
return false;
llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
@@ -202,29 +202,24 @@ static void cache_of_set_props(struct cacheinfo *this_leaf,
static int cache_setup_of_node(unsigned int cpu)
{
- struct device_node *np, *prev;
struct cacheinfo *this_leaf;
unsigned int index = 0;
- np = of_cpu_device_node_get(cpu);
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
if (!np) {
pr_err("Failed to find cpu%d device node\n", cpu);
return -ENOENT;
}
if (!of_check_cache_nodes(np)) {
- of_node_put(np);
return -ENOENT;
}
- prev = np;
-
while (index < cache_leaves(cpu)) {
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
if (this_leaf->level != 1) {
+ struct device_node *prev __free(device_node) = np;
np = of_find_next_cache_node(np);
- of_node_put(prev);
- prev = np;
if (!np)
break;
}
@@ -233,8 +228,6 @@ static int cache_setup_of_node(unsigned int cpu)
index++;
}
- of_node_put(np);
-
if (index != cache_leaves(cpu)) /* not all OF nodes populated */
return -ENOENT;
@@ -243,17 +236,14 @@ static int cache_setup_of_node(unsigned int cpu)
static bool of_check_cache_nodes(struct device_node *np)
{
- struct device_node *next;
-
if (of_property_present(np, "cache-size") ||
of_property_present(np, "i-cache-size") ||
of_property_present(np, "d-cache-size") ||
of_property_present(np, "cache-unified"))
return true;
- next = of_find_next_cache_node(np);
+ struct device_node *next __free(device_node) = of_find_next_cache_node(np);
if (next) {
- of_node_put(next);
return true;
}
@@ -264,11 +254,11 @@ static int of_count_cache_leaves(struct device_node *np)
{
unsigned int leaves = 0;
- if (of_property_read_bool(np, "cache-size"))
+ if (of_property_present(np, "cache-size"))
++leaves;
- if (of_property_read_bool(np, "i-cache-size"))
+ if (of_property_present(np, "i-cache-size"))
++leaves;
- if (of_property_read_bool(np, "d-cache-size"))
+ if (of_property_present(np, "d-cache-size"))
++leaves;
if (!leaves) {
@@ -287,12 +277,10 @@ static int of_count_cache_leaves(struct device_node *np)
int init_of_cache_level(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
- struct device_node *np = of_cpu_device_node_get(cpu);
- struct device_node *prev = NULL;
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
unsigned int levels = 0, leaves, level;
if (!of_check_cache_nodes(np)) {
- of_node_put(np);
return -ENOENT;
}
@@ -300,30 +288,27 @@ int init_of_cache_level(unsigned int cpu)
if (leaves > 0)
levels = 1;
- prev = np;
- while ((np = of_find_next_cache_node(np))) {
- of_node_put(prev);
- prev = np;
+ while (1) {
+ struct device_node *prev __free(device_node) = np;
+ np = of_find_next_cache_node(np);
+ if (!np)
+ break;
+
if (!of_device_is_compatible(np, "cache"))
- goto err_out;
+ return -EINVAL;
if (of_property_read_u32(np, "cache-level", &level))
- goto err_out;
+ return -EINVAL;
if (level <= levels)
- goto err_out;
+ return -EINVAL;
leaves += of_count_cache_leaves(np);
levels = level;
}
- of_node_put(np);
this_cpu_ci->num_levels = levels;
this_cpu_ci->num_leaves = leaves;
return 0;
-
-err_out:
- of_node_put(np);
- return -EINVAL;
}
#else
@@ -382,9 +367,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
for_each_online_cpu(i) {
- struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
-
- if (i == cpu || !sib_cpu_ci->info_list)
+ if (i == cpu || !per_cpu_cacheinfo(i))
continue;/* skip if itself or no cacheinfo */
for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
@@ -424,10 +407,7 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
for (index = 0; index < cache_leaves(cpu); index++) {
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
- struct cpu_cacheinfo *sib_cpu_ci =
- get_cpu_cacheinfo(sibling);
-
- if (sibling == cpu || !sib_cpu_ci->info_list)
+ if (sibling == cpu || !per_cpu_cacheinfo(sibling))
continue;/* skip if itself or no cacheinfo */
for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
@@ -478,11 +458,9 @@ int __weak populate_cache_leaves(unsigned int cpu)
return -ENOENT;
}
-static inline
-int allocate_cache_info(int cpu)
+static inline int allocate_cache_info(int cpu)
{
- per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
- sizeof(struct cacheinfo), GFP_ATOMIC);
+ per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), sizeof(struct cacheinfo), GFP_ATOMIC);
if (!per_cpu_cacheinfo(cpu)) {
cache_leaves(cpu) = 0;
return -ENOMEM;
@@ -554,7 +532,11 @@ static inline int init_level_allocate_ci(unsigned int cpu)
*/
ci_cacheinfo(cpu)->early_ci_levels = false;
- if (cache_leaves(cpu) <= early_leaves)
+ /*
+ * Some architectures (e.g., x86) do not use early initialization.
+ * Allocate memory now in such case.
+ */
+ if (cache_leaves(cpu) <= early_leaves && per_cpu_cacheinfo(cpu))
return 0;
kfree(per_cpu_cacheinfo(cpu));
@@ -898,6 +880,37 @@ err:
return rc;
}
+static unsigned int cpu_map_shared_cache(bool online, unsigned int cpu,
+ cpumask_t **map)
+{
+ struct cacheinfo *llc, *sib_llc;
+ unsigned int sibling;
+
+ if (!last_level_cache_is_valid(cpu))
+ return 0;
+
+ llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
+
+ if (llc->type != CACHE_TYPE_DATA && llc->type != CACHE_TYPE_UNIFIED)
+ return 0;
+
+ if (online) {
+ *map = &llc->shared_cpu_map;
+ return cpumask_weight(*map);
+ }
+
+ /* shared_cpu_map of offlined CPU will be cleared, so use sibling map */
+ for_each_cpu(sibling, &llc->shared_cpu_map) {
+ if (sibling == cpu || !last_level_cache_is_valid(sibling))
+ continue;
+ sib_llc = per_cpu_cacheinfo_idx(sibling, cache_leaves(sibling) - 1);
+ *map = &sib_llc->shared_cpu_map;
+ return cpumask_weight(*map);
+ }
+
+ return 0;
+}
+
/*
* Calculate the size of the per-CPU data cache slice. This can be
* used to estimate the size of the data cache slice that can be used
@@ -929,28 +942,31 @@ static void update_per_cpu_data_slice_size_cpu(unsigned int cpu)
ci->per_cpu_data_slice_size = llc->size / nr_shared;
}
-static void update_per_cpu_data_slice_size(bool cpu_online, unsigned int cpu)
+static void update_per_cpu_data_slice_size(bool cpu_online, unsigned int cpu,
+ cpumask_t *cpu_map)
{
unsigned int icpu;
- for_each_online_cpu(icpu) {
+ for_each_cpu(icpu, cpu_map) {
if (!cpu_online && icpu == cpu)
continue;
update_per_cpu_data_slice_size_cpu(icpu);
+ setup_pcp_cacheinfo(icpu);
}
}
static int cacheinfo_cpu_online(unsigned int cpu)
{
int rc = detect_cache_attributes(cpu);
+ cpumask_t *cpu_map;
if (rc)
return rc;
rc = cache_add_dev(cpu);
if (rc)
goto err;
- update_per_cpu_data_slice_size(true, cpu);
- setup_pcp_cacheinfo();
+ if (cpu_map_shared_cache(true, cpu, &cpu_map))
+ update_per_cpu_data_slice_size(true, cpu, cpu_map);
return 0;
err:
free_cache_attributes(cpu);
@@ -959,12 +975,16 @@ err:
static int cacheinfo_cpu_pre_down(unsigned int cpu)
{
+ cpumask_t *cpu_map;
+ unsigned int nr_shared;
+
+ nr_shared = cpu_map_shared_cache(false, cpu, &cpu_map);
if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
cpu_cache_sysfs_exit(cpu);
free_cache_attributes(cpu);
- update_per_cpu_data_slice_size(false, cpu);
- setup_pcp_cacheinfo();
+ if (nr_shared > 1)
+ update_per_cpu_data_slice_size(false, cpu, cpu_map);
return 0;
}
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 7b38fdf8e1d7..2526c57d924e 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -183,6 +183,17 @@ int class_register(const struct class *cls)
pr_debug("device class '%s': registering\n", cls->name);
+ if (cls->ns_type && !cls->namespace) {
+ pr_err("%s: class '%s' does not have namespace\n",
+ __func__, cls->name);
+ return -EINVAL;
+ }
+ if (!cls->ns_type && cls->namespace) {
+ pr_err("%s: class '%s' does not have ns_type\n",
+ __func__, cls->name);
+ return -EINVAL;
+ }
+
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
@@ -312,8 +323,12 @@ void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
struct subsys_private *sp = class_to_subsys(class);
struct klist_node *start_knode = NULL;
- if (!sp)
+ memset(iter, 0, sizeof(*iter));
+ if (!sp) {
+ pr_crit("%s: class %p was not registered yet\n",
+ __func__, class);
return;
+ }
if (start)
start_knode = &start->p->knode_class;
@@ -340,6 +355,9 @@ struct device *class_dev_iter_next(struct class_dev_iter *iter)
struct klist_node *knode;
struct device *dev;
+ if (!iter->sp)
+ return NULL;
+
while (1) {
knode = klist_next(&iter->ki);
if (!knode)
@@ -384,7 +402,7 @@ EXPORT_SYMBOL_GPL(class_dev_iter_exit);
* code. There's no locking restriction.
*/
int class_for_each_device(const struct class *class, const struct device *start,
- void *data, int (*fn)(struct device *, void *))
+ void *data, device_iter_t fn)
{
struct subsys_private *sp = class_to_subsys(class);
struct class_dev_iter iter;
@@ -394,7 +412,7 @@ int class_for_each_device(const struct class *class, const struct device *start,
if (!class)
return -EINVAL;
if (!sp) {
- WARN(1, "%s called for class '%s' before it was initialized",
+ WARN(1, "%s called for class '%s' before it was registered",
__func__, class->name);
return -EINVAL;
}
@@ -433,8 +451,7 @@ EXPORT_SYMBOL_GPL(class_for_each_device);
* code. There's no locking restriction.
*/
struct device *class_find_device(const struct class *class, const struct device *start,
- const void *data,
- int (*match)(struct device *, const void *))
+ const void *data, device_match_t match)
{
struct subsys_private *sp = class_to_subsys(class);
struct class_dev_iter iter;
@@ -443,7 +460,7 @@ struct device *class_find_device(const struct class *class, const struct device
if (!class)
return NULL;
if (!sp) {
- WARN(1, "%s called for class '%s' before it was initialized",
+ WARN(1, "%s called for class '%s' before it was registered",
__func__, class->name);
return NULL;
}
@@ -584,30 +601,10 @@ EXPORT_SYMBOL_GPL(class_compat_unregister);
* a bus device
* @cls: the compatibility class
* @dev: the target bus device
- * @device_link: an optional device to which a "device" link should be created
*/
-int class_compat_create_link(struct class_compat *cls, struct device *dev,
- struct device *device_link)
+int class_compat_create_link(struct class_compat *cls, struct device *dev)
{
- int error;
-
- error = sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
- if (error)
- return error;
-
- /*
- * Optionally add a "device" link (typically to the parent), as a
- * class device would have one and we want to provide as much
- * backwards compatibility as possible.
- */
- if (device_link) {
- error = sysfs_create_link(&dev->kobj, &device_link->kobj,
- "device");
- if (error)
- sysfs_remove_link(cls->kobj, dev_name(dev));
- }
-
- return error;
+ return sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
}
EXPORT_SYMBOL_GPL(class_compat_create_link);
@@ -616,14 +613,9 @@ EXPORT_SYMBOL_GPL(class_compat_create_link);
* a bus device
* @cls: the compatibility class
* @dev: the target bus device
- * @device_link: an optional device to which a "device" link was previously
- * created
*/
-void class_compat_remove_link(struct class_compat *cls, struct device *dev,
- struct device *device_link)
+void class_compat_remove_link(struct class_compat *cls, struct device *dev)
{
- if (device_link)
- sysfs_remove_link(&dev->kobj, "device");
sysfs_remove_link(cls->kobj, dev_name(dev));
}
EXPORT_SYMBOL_GPL(class_compat_remove_link);
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 7dbf14a1d915..741497324d78 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -751,7 +751,7 @@ static int __component_add(struct device *dev, const struct component_ops *ops,
* component_bind_all(). See also &struct component_ops.
*
* @subcomponent must be nonzero and is used to differentiate between multiple
- * components registerd on the same device @dev. These components are match
+ * components registered on the same device @dev. These components are match
* using component_match_add_typed().
*
* The component needs to be unregistered at driver unload/disconnect by
@@ -781,7 +781,7 @@ EXPORT_SYMBOL_GPL(component_add_typed);
* The component needs to be unregistered at driver unload/disconnect by
* calling component_del().
*
- * See also component_add_typed() for a variant that allows multipled different
+ * See also component_add_typed() for a variant that allows multiple different
* components on the same device.
*/
int component_add(struct device *dev, const struct component_ops *ops)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 9828da9b933c..2fde698430df 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -9,28 +9,29 @@
*/
#include <linux/acpi.h>
+#include <linux/blkdev.h>
+#include <linux/cleanup.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
+#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include <linux/err.h>
#include <linux/fwnode.h>
#include <linux/init.h>
+#include <linux/kdev_t.h>
#include <linux/kstrtox.h>
#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/kdev_t.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/blkdev.h>
-#include <linux/mutex.h>
#include <linux/pm_runtime.h>
-#include <linux/netdevice.h>
-#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
#include <linux/string_helpers.h>
#include <linux/swiotlb.h>
#include <linux/sysfs.h>
-#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include "base.h"
#include "physical_location.h"
@@ -44,6 +45,7 @@ static bool fw_devlink_is_permissive(void);
static void __fw_devlink_link_to_consumers(struct device *dev);
static bool fw_devlink_drv_reg_done;
static bool fw_devlink_best_effort;
+static struct workqueue_struct *device_link_wq;
/**
* __fwnode_link_add - Create a link between two fwnode_handles.
@@ -92,14 +94,12 @@ static int __fwnode_link_add(struct fwnode_handle *con,
return 0;
}
-int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
+int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup,
+ u8 flags)
{
- int ret;
+ guard(mutex)(&fwnode_link_lock);
- mutex_lock(&fwnode_link_lock);
- ret = __fwnode_link_add(con, sup, 0);
- mutex_unlock(&fwnode_link_lock);
- return ret;
+ return __fwnode_link_add(con, sup, flags);
}
/**
@@ -140,10 +140,10 @@ static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
{
struct fwnode_link *link, *tmp;
- mutex_lock(&fwnode_link_lock);
+ guard(mutex)(&fwnode_link_lock);
+
list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook)
__fwnode_link_del(link);
- mutex_unlock(&fwnode_link_lock);
}
/**
@@ -156,10 +156,10 @@ static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
{
struct fwnode_link *link, *tmp;
- mutex_lock(&fwnode_link_lock);
+ guard(mutex)(&fwnode_link_lock);
+
list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook)
__fwnode_link_del(link);
- mutex_unlock(&fwnode_link_lock);
}
/**
@@ -532,13 +532,27 @@ static void devlink_dev_release(struct device *dev)
/*
* It may take a while to complete this work because of the SRCU
* synchronization in device_link_release_fn() and if the consumer or
- * supplier devices get deleted when it runs, so put it into the "long"
- * workqueue.
+ * supplier devices get deleted when it runs, so put it into the
+ * dedicated workqueue.
*/
- queue_work(system_long_wq, &link->rm_work);
+ queue_work(device_link_wq, &link->rm_work);
}
-static struct class devlink_class = {
+/**
+ * device_link_wait_removal - Wait for ongoing devlink removal jobs to terminate
+ */
+void device_link_wait_removal(void)
+{
+ /*
+ * devlink removal jobs are queued in the dedicated work queue.
+ * To be sure that all removal jobs are terminated, ensure that any
+ * scheduled work has run to completion.
+ */
+ flush_workqueue(device_link_wq);
+}
+EXPORT_SYMBOL_GPL(device_link_wait_removal);
+
+static const struct class devlink_class = {
.name = "devlink",
.dev_groups = devlink_groups,
.dev_release = devlink_dev_release,
@@ -546,20 +560,11 @@ static struct class devlink_class = {
static int devlink_add_symlinks(struct device *dev)
{
+ char *buf_con __free(kfree) = NULL, *buf_sup __free(kfree) = NULL;
int ret;
- size_t len;
struct device_link *link = to_devlink(dev);
struct device *sup = link->supplier;
struct device *con = link->consumer;
- char *buf;
-
- len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
- strlen(dev_bus_name(con)) + strlen(dev_name(con)));
- len += strlen(":");
- len += strlen("supplier:") + 1;
- buf = kzalloc(len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier");
if (ret)
@@ -569,58 +574,64 @@ static int devlink_add_symlinks(struct device *dev)
if (ret)
goto err_con;
- snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
- ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
+ buf_con = kasprintf(GFP_KERNEL, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
+ if (!buf_con) {
+ ret = -ENOMEM;
+ goto err_con_dev;
+ }
+
+ ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf_con);
if (ret)
goto err_con_dev;
- snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
- ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
+ buf_sup = kasprintf(GFP_KERNEL, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
+ if (!buf_sup) {
+ ret = -ENOMEM;
+ goto err_sup_dev;
+ }
+
+ ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf_sup);
if (ret)
goto err_sup_dev;
goto out;
err_sup_dev:
- snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
- sysfs_remove_link(&sup->kobj, buf);
+ sysfs_remove_link(&sup->kobj, buf_con);
err_con_dev:
sysfs_remove_link(&link->link_dev.kobj, "consumer");
err_con:
sysfs_remove_link(&link->link_dev.kobj, "supplier");
out:
- kfree(buf);
return ret;
}
static void devlink_remove_symlinks(struct device *dev)
{
+ char *buf_con __free(kfree) = NULL, *buf_sup __free(kfree) = NULL;
struct device_link *link = to_devlink(dev);
- size_t len;
struct device *sup = link->supplier;
struct device *con = link->consumer;
- char *buf;
sysfs_remove_link(&link->link_dev.kobj, "consumer");
sysfs_remove_link(&link->link_dev.kobj, "supplier");
- len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
- strlen(dev_bus_name(con)) + strlen(dev_name(con)));
- len += strlen(":");
- len += strlen("supplier:") + 1;
- buf = kzalloc(len, GFP_KERNEL);
- if (!buf) {
- WARN(1, "Unable to properly free device link symlinks!\n");
- return;
- }
-
if (device_is_registered(con)) {
- snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
- sysfs_remove_link(&con->kobj, buf);
+ buf_sup = kasprintf(GFP_KERNEL, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
+ if (!buf_sup)
+ goto out;
+ sysfs_remove_link(&con->kobj, buf_sup);
}
- snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
- sysfs_remove_link(&sup->kobj, buf);
- kfree(buf);
+
+ buf_con = kasprintf(GFP_KERNEL, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
+ if (!buf_con)
+ goto out;
+ sysfs_remove_link(&sup->kobj, buf_con);
+
+ return;
+
+out:
+ WARN(1, "Unable to properly free device link symlinks!\n");
}
static struct class_interface devlink_class_intf = {
@@ -661,6 +672,9 @@ postcore_initcall(devlink_class_init);
* @supplier: Supplier end of the link.
* @flags: Link flags.
*
+ * Return: On success, a device_link struct will be returned.
+ * On error or invalid flag settings, NULL will be returned.
+ *
* The caller is responsible for the proper synchronization of the link creation
* with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
* runtime PM framework to take the link into account. Second, if the
@@ -1011,7 +1025,8 @@ static struct fwnode_handle *fwnode_links_check_suppliers(
return NULL;
list_for_each_entry(link, &fwnode->suppliers, c_hook)
- if (!(link->flags & FWLINK_FLAG_CYCLE))
+ if (!(link->flags &
+ (FWLINK_FLAG_CYCLE | FWLINK_FLAG_IGNORE)))
return link->supplier;
return NULL;
@@ -1043,20 +1058,16 @@ int device_links_check_suppliers(struct device *dev)
* Device waiting for supplier to become available is not allowed to
* probe.
*/
- mutex_lock(&fwnode_link_lock);
- sup_fw = fwnode_links_check_suppliers(dev->fwnode);
- if (sup_fw) {
- if (!dev_is_best_effort(dev)) {
- fwnode_ret = -EPROBE_DEFER;
- dev_err_probe(dev, -EPROBE_DEFER,
- "wait for supplier %pfwf\n", sup_fw);
- } else {
- fwnode_ret = -EAGAIN;
+ scoped_guard(mutex, &fwnode_link_lock) {
+ sup_fw = fwnode_links_check_suppliers(dev->fwnode);
+ if (sup_fw) {
+ if (dev_is_best_effort(dev))
+ fwnode_ret = -EAGAIN;
+ else
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "wait for supplier %pfwf\n", sup_fw);
}
}
- mutex_unlock(&fwnode_link_lock);
- if (fwnode_ret == -EPROBE_DEFER)
- return fwnode_ret;
device_links_write_lock();
@@ -1075,10 +1086,8 @@ int device_links_check_suppliers(struct device *dev)
}
device_links_missing_supplier(dev);
- dev_err_probe(dev, -EPROBE_DEFER,
- "supplier %s not ready\n",
- dev_name(link->supplier));
- ret = -EPROBE_DEFER;
+ ret = dev_err_probe(dev, -EPROBE_DEFER,
+ "supplier %s not ready\n", dev_name(link->supplier));
break;
}
WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
@@ -1231,9 +1240,8 @@ static ssize_t waiting_for_supplier_show(struct device *dev,
bool val;
device_lock(dev);
- mutex_lock(&fwnode_link_lock);
- val = !!fwnode_links_check_suppliers(dev->fwnode);
- mutex_unlock(&fwnode_link_lock);
+ scoped_guard(mutex, &fwnode_link_lock)
+ val = !!fwnode_links_check_suppliers(dev->fwnode);
device_unlock(dev);
return sysfs_emit(buf, "%u\n", val);
}
@@ -1306,13 +1314,15 @@ void device_links_driver_bound(struct device *dev)
*/
if (dev->fwnode && dev->fwnode->dev == dev) {
struct fwnode_handle *child;
+
fwnode_links_purge_suppliers(dev->fwnode);
- mutex_lock(&fwnode_link_lock);
+
+ guard(mutex)(&fwnode_link_lock);
+
fwnode_for_each_available_child_node(dev->fwnode, child)
__fw_devlink_pickup_dangling_consumers(child,
dev->fwnode);
__fw_devlink_link_to_consumers(dev);
- mutex_unlock(&fwnode_link_lock);
}
device_remove_file(dev, &dev_attr_waiting_for_supplier);
@@ -1871,6 +1881,7 @@ static void fw_devlink_unblock_consumers(struct device *dev)
device_links_write_unlock();
}
+#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev)
static bool fwnode_init_without_drv(struct fwnode_handle *fwnode)
{
@@ -1902,8 +1913,65 @@ static bool fwnode_ancestor_init_without_drv(struct fwnode_handle *fwnode)
}
/**
+ * fwnode_is_ancestor_of - Test if @ancestor is ancestor of @child
+ * @ancestor: Firmware which is tested for being an ancestor
+ * @child: Firmware which is tested for being the child
+ *
+ * A node is considered an ancestor of itself too.
+ *
+ * Return: true if @ancestor is an ancestor of @child. Otherwise, returns false.
+ */
+static bool fwnode_is_ancestor_of(const struct fwnode_handle *ancestor,
+ const struct fwnode_handle *child)
+{
+ struct fwnode_handle *parent;
+
+ if (IS_ERR_OR_NULL(ancestor))
+ return false;
+
+ if (child == ancestor)
+ return true;
+
+ fwnode_for_each_parent_node(child, parent) {
+ if (parent == ancestor) {
+ fwnode_handle_put(parent);
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * fwnode_get_next_parent_dev - Find device of closest ancestor fwnode
+ * @fwnode: firmware node
+ *
+ * Given a firmware node (@fwnode), this function finds its closest ancestor
+ * firmware node that has a corresponding struct device and returns that struct
+ * device.
+ *
+ * The caller is responsible for calling put_device() on the returned device
+ * pointer.
+ *
+ * Return: a pointer to the device of the @fwnode's closest ancestor.
+ */
+static struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent;
+ struct device *dev;
+
+ fwnode_for_each_parent_node(fwnode, parent) {
+ dev = get_dev_from_fwnode(parent);
+ if (dev) {
+ fwnode_handle_put(parent);
+ return dev;
+ }
+ }
+ return NULL;
+}
+
+/**
* __fw_devlink_relax_cycles - Relax and mark dependency cycles.
- * @con: Potential consumer device.
+ * @con_handle: Potential consumer device fwnode.
* @sup_handle: Potential supplier's fwnode.
*
* Needs to be called with fwnode_lock and device link lock held.
@@ -1921,10 +1989,10 @@ static bool fwnode_ancestor_init_without_drv(struct fwnode_handle *fwnode)
*
* Return true if one or more cycles were found. Otherwise, return false.
*/
-static bool __fw_devlink_relax_cycles(struct device *con,
+static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
struct fwnode_handle *sup_handle)
{
- struct device *sup_dev = NULL, *par_dev = NULL;
+ struct device *sup_dev = NULL, *par_dev = NULL, *con_dev = NULL;
struct fwnode_link *link;
struct device_link *dev_link;
bool ret = false;
@@ -1941,28 +2009,31 @@ static bool __fw_devlink_relax_cycles(struct device *con,
sup_handle->flags |= FWNODE_FLAG_VISITED;
- sup_dev = get_dev_from_fwnode(sup_handle);
-
/* Termination condition. */
- if (sup_dev == con) {
+ if (sup_handle == con_handle) {
pr_debug("----- cycle: start -----\n");
ret = true;
goto out;
}
+ sup_dev = get_dev_from_fwnode(sup_handle);
+ con_dev = get_dev_from_fwnode(con_handle);
/*
* If sup_dev is bound to a driver and @con hasn't started binding to a
* driver, sup_dev can't be a consumer of @con. So, no need to check
* further.
*/
if (sup_dev && sup_dev->links.status == DL_DEV_DRIVER_BOUND &&
- con->links.status == DL_DEV_NO_DRIVER) {
+ con_dev && con_dev->links.status == DL_DEV_NO_DRIVER) {
ret = false;
goto out;
}
list_for_each_entry(link, &sup_handle->suppliers, c_hook) {
- if (__fw_devlink_relax_cycles(con, link->supplier)) {
+ if (link->flags & FWLINK_FLAG_IGNORE)
+ continue;
+
+ if (__fw_devlink_relax_cycles(con_handle, link->supplier)) {
__fwnode_link_cycle(link);
ret = true;
}
@@ -1977,7 +2048,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
else
par_dev = fwnode_get_next_parent_dev(sup_handle);
- if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode)) {
+ if (par_dev && __fw_devlink_relax_cycles(con_handle, par_dev->fwnode)) {
pr_debug("%pfwf: cycle: child of %pfwf\n", sup_handle,
par_dev->fwnode);
ret = true;
@@ -1995,7 +2066,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
!(dev_link->flags & DL_FLAG_CYCLE))
continue;
- if (__fw_devlink_relax_cycles(con,
+ if (__fw_devlink_relax_cycles(con_handle,
dev_link->supplier->fwnode)) {
pr_debug("%pfwf: cycle: depends on %pfwf\n", sup_handle,
dev_link->supplier->fwnode);
@@ -2008,6 +2079,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
out:
sup_handle->flags &= ~FWNODE_FLAG_VISITED;
put_device(sup_dev);
+ put_device(con_dev);
put_device(par_dev);
return ret;
}
@@ -2040,10 +2112,8 @@ static int fw_devlink_create_devlink(struct device *con,
int ret = 0;
u32 flags;
- if (con->fwnode == link->consumer)
- flags = fw_devlink_get_flags(link->flags);
- else
- flags = FW_DEVLINK_FLAGS_PERMISSIVE;
+ if (link->flags & FWLINK_FLAG_IGNORE)
+ return 0;
/*
* In some cases, a device P might also be a supplier to its child node
@@ -2065,25 +2135,23 @@ static int fw_devlink_create_devlink(struct device *con,
return -EINVAL;
/*
- * SYNC_STATE_ONLY device links don't block probing and supports cycles.
- * So, one might expect that cycle detection isn't necessary for them.
- * However, if the device link was marked as SYNC_STATE_ONLY because
- * it's part of a cycle, then we still need to do cycle detection. This
- * is because the consumer and supplier might be part of multiple cycles
- * and we need to detect all those cycles.
+ * Don't try to optimize by not calling the cycle detection logic under
+ * certain conditions. There's always some corner case that won't get
+ * detected.
*/
- if (!device_link_flag_is_sync_state_only(flags) ||
- flags & DL_FLAG_CYCLE) {
- device_links_write_lock();
- if (__fw_devlink_relax_cycles(con, sup_handle)) {
- __fwnode_link_cycle(link);
- flags = fw_devlink_get_flags(link->flags);
- pr_debug("----- cycle: end -----\n");
- dev_info(con, "Fixed dependency cycle(s) with %pfwf\n",
- sup_handle);
- }
- device_links_write_unlock();
+ device_links_write_lock();
+ if (__fw_devlink_relax_cycles(link->consumer, sup_handle)) {
+ __fwnode_link_cycle(link);
+ pr_debug("----- cycle: end -----\n");
+ pr_info("%pfwf: Fixed dependency cycle(s) with %pfwf\n",
+ link->consumer, sup_handle);
}
+ device_links_write_unlock();
+
+ if (con->fwnode == link->consumer)
+ flags = fw_devlink_get_flags(link->flags);
+ else
+ flags = FW_DEVLINK_FLAGS_PERMISSIVE;
if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE)
sup_dev = fwnode_get_next_parent_dev(sup_handle);
@@ -2106,8 +2174,8 @@ static int fw_devlink_create_devlink(struct device *con,
}
if (con != sup_dev && !device_link_add(con, sup_dev, flags)) {
- dev_err(con, "Failed to create device link (0x%x) with %s\n",
- flags, dev_name(sup_dev));
+ dev_err(con, "Failed to create device link (0x%x) with supplier %s for %pfwf\n",
+ flags, dev_name(sup_dev), link->consumer);
ret = -EINVAL;
}
@@ -2257,16 +2325,14 @@ static void fw_devlink_link_device(struct device *dev)
fw_devlink_parse_fwtree(fwnode);
- mutex_lock(&fwnode_link_lock);
+ guard(mutex)(&fwnode_link_lock);
+
__fw_devlink_link_to_consumers(dev);
__fw_devlink_link_to_suppliers(dev, fwnode);
- mutex_unlock(&fwnode_link_lock);
}
/* Device links support end. */
-int (*platform_notify)(struct device *dev) = NULL;
-int (*platform_notify_remove)(struct device *dev) = NULL;
static struct kobject *dev_kobj;
/* /sys/dev/char */
@@ -2314,16 +2380,10 @@ static void device_platform_notify(struct device *dev)
acpi_device_notify(dev);
software_node_notify(dev);
-
- if (platform_notify)
- platform_notify(dev);
}
static void device_platform_notify_remove(struct device *dev)
{
- if (platform_notify_remove)
- platform_notify_remove(dev);
-
software_node_notify_remove(dev);
acpi_device_notify_remove(dev);
@@ -2465,6 +2525,15 @@ ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
}
EXPORT_SYMBOL_GPL(device_show_bool);
+ssize_t device_show_string(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+ return sysfs_emit(buf, "%s\n", (char *)ea->var);
+}
+EXPORT_SYMBOL_GPL(device_show_string);
+
/**
* device_release - free device structure.
* @kobj: device's kobject.
@@ -2508,7 +2577,7 @@ static const void *device_namespace(const struct kobject *kobj)
const struct device *dev = kobj_to_dev(kobj);
const void *ns = NULL;
- if (dev->class && dev->class->ns_type)
+ if (dev->class && dev->class->namespace)
ns = dev->class->namespace(dev);
return ns;
@@ -2657,8 +2726,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
if (!env)
return -ENOMEM;
+ /* Synchronize with really_probe() */
+ device_lock(dev);
/* let the kset specific function add its keys */
retval = kset->uevent_ops->uevent(&dev->kobj, env);
+ device_unlock(dev);
if (retval)
goto out;
@@ -2763,15 +2835,6 @@ static void devm_attr_group_remove(struct device *dev, void *res)
sysfs_remove_group(&dev->kobj, group);
}
-static void devm_attr_groups_remove(struct device *dev, void *res)
-{
- union device_attr_group_devres *devres = res;
- const struct attribute_group **groups = devres->groups;
-
- dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
- sysfs_remove_groups(&dev->kobj, groups);
-}
-
/**
* devm_device_add_group - given a device, create a managed attribute group
* @dev: The device to create the group for
@@ -2804,42 +2867,6 @@ int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
}
EXPORT_SYMBOL_GPL(devm_device_add_group);
-/**
- * devm_device_add_groups - create a bunch of managed attribute groups
- * @dev: The device to create the group for
- * @groups: The attribute groups to create, NULL terminated
- *
- * This function creates a bunch of managed attribute groups. If an error
- * occurs when creating a group, all previously created groups will be
- * removed, unwinding everything back to the original state when this
- * function was called. It will explicitly warn and error if any of the
- * attribute files being created already exist.
- *
- * Returns 0 on success or error code from sysfs_create_group on failure.
- */
-int devm_device_add_groups(struct device *dev,
- const struct attribute_group **groups)
-{
- union device_attr_group_devres *devres;
- int error;
-
- devres = devres_alloc(devm_attr_groups_remove,
- sizeof(*devres), GFP_KERNEL);
- if (!devres)
- return -ENOMEM;
-
- error = sysfs_create_groups(&dev->kobj, groups);
- if (error) {
- devres_free(devres);
- return error;
- }
-
- devres->groups = groups;
- devres_add(dev, devres);
- return 0;
-}
-EXPORT_SYMBOL_GPL(devm_device_add_groups);
-
static int device_add_attrs(struct device *dev)
{
const struct class *class = dev->class;
@@ -3127,7 +3154,7 @@ void device_initialize(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_initialize);
-struct kobject *virtual_device_parent(struct device *dev)
+struct kobject *virtual_device_parent(void)
{
static struct kobject *virtual_dir = NULL;
@@ -3205,7 +3232,7 @@ static struct kobject *get_device_parent(struct device *dev,
* in a "glue" directory to prevent namespace collisions.
*/
if (parent == NULL)
- parent_kobj = virtual_device_parent(dev);
+ parent_kobj = virtual_device_parent();
else if (parent->class && !dev->class->ns_type) {
subsys_put(sp);
return &parent->kobj;
@@ -3954,13 +3981,13 @@ const char *device_get_devnode(const struct device *dev,
* other than 0, we break out and return that value.
*/
int device_for_each_child(struct device *parent, void *data,
- int (*fn)(struct device *dev, void *data))
+ device_iter_t fn)
{
struct klist_iter i;
struct device *child;
int error = 0;
- if (!parent->p)
+ if (!parent || !parent->p)
return 0;
klist_iter_init(&parent->p->klist_children, &i);
@@ -3984,13 +4011,13 @@ EXPORT_SYMBOL_GPL(device_for_each_child);
* other than 0, we break out and return that value.
*/
int device_for_each_child_reverse(struct device *parent, void *data,
- int (*fn)(struct device *dev, void *data))
+ device_iter_t fn)
{
struct klist_iter i;
struct device *child;
int error = 0;
- if (!parent->p)
+ if (!parent || !parent->p)
return 0;
klist_iter_init(&parent->p->klist_children, &i);
@@ -4002,87 +4029,77 @@ int device_for_each_child_reverse(struct device *parent, void *data,
EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
/**
- * device_find_child - device iterator for locating a particular device.
- * @parent: parent struct device
- * @match: Callback function to check device
- * @data: Data to pass to match function
- *
- * This is similar to the device_for_each_child() function above, but it
- * returns a reference to a device that is 'found' for later use, as
- * determined by the @match callback.
+ * device_for_each_child_reverse_from - device child iterator in reversed order.
+ * @parent: parent struct device.
+ * @from: optional starting point in child list
+ * @fn: function to be called for each device.
+ * @data: data for the callback.
*
- * The callback should return 0 if the device doesn't match and non-zero
- * if it does. If the callback returns non-zero and a reference to the
- * current device can be obtained, this function will return to the caller
- * and not iterate over any more devices.
+ * Iterate over @parent's child devices, starting at @from, and call @fn
+ * for each, passing it @data. This helper is identical to
+ * device_for_each_child_reverse() when @from is NULL.
*
- * NOTE: you will need to drop the reference with put_device() after use.
+ * @fn is checked each iteration. If it returns anything other than 0,
+ * iteration stop and that value is returned to the caller of
+ * device_for_each_child_reverse_from();
*/
-struct device *device_find_child(struct device *parent, void *data,
- int (*match)(struct device *dev, void *data))
+int device_for_each_child_reverse_from(struct device *parent,
+ struct device *from, void *data,
+ device_iter_t fn)
{
struct klist_iter i;
struct device *child;
+ int error = 0;
- if (!parent)
- return NULL;
+ if (!parent || !parent->p)
+ return 0;
- klist_iter_init(&parent->p->klist_children, &i);
- while ((child = next_device(&i)))
- if (match(child, data) && get_device(child))
- break;
+ klist_iter_init_node(&parent->p->klist_children, &i,
+ (from ? &from->p->knode_parent : NULL));
+ while ((child = prev_device(&i)) && !error)
+ error = fn(child, data);
klist_iter_exit(&i);
- return child;
+ return error;
}
-EXPORT_SYMBOL_GPL(device_find_child);
+EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from);
/**
- * device_find_child_by_name - device iterator for locating a child device.
+ * device_find_child - device iterator for locating a particular device.
* @parent: parent struct device
- * @name: name of the child device
+ * @match: Callback function to check device
+ * @data: Data to pass to match function
+ *
+ * This is similar to the device_for_each_child() function above, but it
+ * returns a reference to a device that is 'found' for later use, as
+ * determined by the @match callback.
*
- * This is similar to the device_find_child() function above, but it
- * returns a reference to a device that has the name @name.
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does. If the callback returns non-zero and a reference to the
+ * current device can be obtained, this function will return to the caller
+ * and not iterate over any more devices.
*
* NOTE: you will need to drop the reference with put_device() after use.
*/
-struct device *device_find_child_by_name(struct device *parent,
- const char *name)
+struct device *device_find_child(struct device *parent, const void *data,
+ device_match_t match)
{
struct klist_iter i;
struct device *child;
- if (!parent)
+ if (!parent || !parent->p)
return NULL;
klist_iter_init(&parent->p->klist_children, &i);
- while ((child = next_device(&i)))
- if (sysfs_streq(dev_name(child), name) && get_device(child))
+ while ((child = next_device(&i))) {
+ if (match(child, data)) {
+ get_device(child);
break;
+ }
+ }
klist_iter_exit(&i);
return child;
}
-EXPORT_SYMBOL_GPL(device_find_child_by_name);
-
-static int match_any(struct device *dev, void *unused)
-{
- return 1;
-}
-
-/**
- * device_find_any_child - device iterator for locating a child device, if any.
- * @parent: parent struct device
- *
- * This is similar to the device_find_child() function above, but it
- * returns a reference to a child device, if any.
- *
- * NOTE: you will need to drop the reference with put_device() after use.
- */
-struct device *device_find_any_child(struct device *parent)
-{
- return device_find_child(parent, NULL, match_any);
-}
-EXPORT_SYMBOL_GPL(device_find_any_child);
+EXPORT_SYMBOL_GPL(device_find_child);
int __init devices_init(void)
{
@@ -4098,9 +4115,14 @@ int __init devices_init(void)
sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
if (!sysfs_dev_char_kobj)
goto char_kobj_err;
+ device_link_wq = alloc_workqueue("device_link_wq", 0, 0);
+ if (!device_link_wq)
+ goto wq_err;
return 0;
+ wq_err:
+ kobject_put(sysfs_dev_char_kobj);
char_kobj_err:
kobject_put(sysfs_dev_block_kobj);
block_kobj_err:
@@ -4467,9 +4489,11 @@ EXPORT_SYMBOL_GPL(device_destroy);
*/
int device_rename(struct device *dev, const char *new_name)
{
+ struct subsys_private *sp = NULL;
struct kobject *kobj = &dev->kobj;
char *old_device_name = NULL;
int error;
+ bool is_link_renamed = false;
dev = get_device(dev);
if (!dev)
@@ -4484,7 +4508,7 @@ int device_rename(struct device *dev, const char *new_name)
}
if (dev->class) {
- struct subsys_private *sp = class_to_subsys(dev->class);
+ sp = class_to_subsys(dev->class);
if (!sp) {
error = -EINVAL;
@@ -4493,16 +4517,19 @@ int device_rename(struct device *dev, const char *new_name)
error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name,
new_name, kobject_namespace(kobj));
- subsys_put(sp);
if (error)
goto out;
+
+ is_link_renamed = true;
}
error = kobject_rename(kobj, new_name);
- if (error)
- goto out;
-
out:
+ if (error && is_link_renamed)
+ sysfs_rename_link_ns(&sp->subsys.kobj, kobj, new_name,
+ old_device_name, kobject_namespace(kobj));
+ subsys_put(sp);
+
put_device(dev);
kfree(old_device_name);
@@ -4824,7 +4851,7 @@ set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
else
return;
- strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem));
+ strscpy(dev_info->subsystem, subsys);
/*
* Add device identifier DEVICE=:
@@ -4934,6 +4961,49 @@ define_dev_printk_level(_dev_info, KERN_INFO);
#endif
+static void __dev_probe_failed(const struct device *dev, int err, bool fatal,
+ const char *fmt, va_list vargsp)
+{
+ struct va_format vaf;
+ va_list vargs;
+
+ /*
+ * On x86_64 and possibly on other architectures, va_list is actually a
+ * size-1 array containing a structure. As a result, function parameter
+ * vargsp decays from T[1] to T*, and &vargsp has type T** rather than
+ * T(*)[1], which is expected by its assignment to vaf.va below.
+ *
+ * One standard way to solve this mess is by creating a copy in a local
+ * variable of type va_list and then using a pointer to that local copy
+ * instead, which is the approach employed here.
+ */
+ va_copy(vargs, vargsp);
+
+ vaf.fmt = fmt;
+ vaf.va = &vargs;
+
+ switch (err) {
+ case -EPROBE_DEFER:
+ device_set_deferred_probe_reason(dev, &vaf);
+ dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
+ break;
+
+ case -ENOMEM:
+ /* Don't print anything on -ENOMEM, there's already enough output */
+ break;
+
+ default:
+ /* Log fatal final failures as errors, otherwise produce warnings */
+ if (fatal)
+ dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
+ else
+ dev_warn(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
+ break;
+ }
+
+ va_end(vargs);
+}
+
/**
* dev_err_probe - probe error check and log helper
* @dev: the pointer to the struct device
@@ -4946,7 +5016,7 @@ define_dev_printk_level(_dev_info, KERN_INFO);
* -EPROBE_DEFER and propagate error upwards.
* In case of -EPROBE_DEFER it sets also defer probe reason, which can be
* checked later by reading devices_deferred debugfs attribute.
- * It replaces code sequence::
+ * It replaces the following code sequence::
*
* if (err != -EPROBE_DEFER)
* dev_err(dev, ...);
@@ -4958,37 +5028,78 @@ define_dev_printk_level(_dev_info, KERN_INFO);
*
* return dev_err_probe(dev, err, ...);
*
- * Using this helper in your probe function is totally fine even if @err is
- * known to never be -EPROBE_DEFER.
+ * Using this helper in your probe function is totally fine even if @err
+ * is known to never be -EPROBE_DEFER.
* The benefit compared to a normal dev_err() is the standardized format
- * of the error code, it being emitted symbolically (i.e. you get "EAGAIN"
- * instead of "-35") and the fact that the error code is returned which allows
- * more compact error paths.
+ * of the error code, which is emitted symbolically (i.e. you get "EAGAIN"
+ * instead of "-35"), and having the error code returned allows more
+ * compact error paths.
*
* Returns @err.
*/
int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
{
- struct va_format vaf;
- va_list args;
+ va_list vargs;
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
+ va_start(vargs, fmt);
- if (err != -EPROBE_DEFER) {
- dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
- } else {
- device_set_deferred_probe_reason(dev, &vaf);
- dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
- }
+ /* Use dev_err() for logging when err doesn't equal -EPROBE_DEFER */
+ __dev_probe_failed(dev, err, true, fmt, vargs);
- va_end(args);
+ va_end(vargs);
return err;
}
EXPORT_SYMBOL_GPL(dev_err_probe);
+/**
+ * dev_warn_probe - probe error check and log helper
+ * @dev: the pointer to the struct device
+ * @err: error value to test
+ * @fmt: printf-style format string
+ * @...: arguments as specified in the format string
+ *
+ * This helper implements common pattern present in probe functions for error
+ * checking: print debug or warning message depending if the error value is
+ * -EPROBE_DEFER and propagate error upwards.
+ * In case of -EPROBE_DEFER it sets also defer probe reason, which can be
+ * checked later by reading devices_deferred debugfs attribute.
+ * It replaces the following code sequence::
+ *
+ * if (err != -EPROBE_DEFER)
+ * dev_warn(dev, ...);
+ * else
+ * dev_dbg(dev, ...);
+ * return err;
+ *
+ * with::
+ *
+ * return dev_warn_probe(dev, err, ...);
+ *
+ * Using this helper in your probe function is totally fine even if @err
+ * is known to never be -EPROBE_DEFER.
+ * The benefit compared to a normal dev_warn() is the standardized format
+ * of the error code, which is emitted symbolically (i.e. you get "EAGAIN"
+ * instead of "-35"), and having the error code returned allows more
+ * compact error paths.
+ *
+ * Returns @err.
+ */
+int dev_warn_probe(const struct device *dev, int err, const char *fmt, ...)
+{
+ va_list vargs;
+
+ va_start(vargs, fmt);
+
+ /* Use dev_warn() for logging when err doesn't equal -EPROBE_DEFER */
+ __dev_probe_failed(dev, err, false, fmt, vargs);
+
+ va_end(vargs);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dev_warn_probe);
+
static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
{
return fwnode && !IS_ERR(fwnode->secondary);
@@ -5089,15 +5200,21 @@ int device_match_name(struct device *dev, const void *name)
}
EXPORT_SYMBOL_GPL(device_match_name);
+int device_match_type(struct device *dev, const void *type)
+{
+ return dev->type == type;
+}
+EXPORT_SYMBOL_GPL(device_match_type);
+
int device_match_of_node(struct device *dev, const void *np)
{
- return dev->of_node == np;
+ return np && dev->of_node == np;
}
EXPORT_SYMBOL_GPL(device_match_of_node);
int device_match_fwnode(struct device *dev, const void *fwnode)
{
- return dev_fwnode(dev) == fwnode;
+ return fwnode && dev_fwnode(dev) == fwnode;
}
EXPORT_SYMBOL_GPL(device_match_fwnode);
@@ -5109,13 +5226,13 @@ EXPORT_SYMBOL_GPL(device_match_devt);
int device_match_acpi_dev(struct device *dev, const void *adev)
{
- return ACPI_COMPANION(dev) == adev;
+ return adev && ACPI_COMPANION(dev) == adev;
}
EXPORT_SYMBOL(device_match_acpi_dev);
int device_match_acpi_handle(struct device *dev, const void *handle)
{
- return ACPI_HANDLE(dev) == handle;
+ return handle && ACPI_HANDLE(dev) == handle;
}
EXPORT_SYMBOL(device_match_acpi_handle);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 47de0f140ba6..a7e511849875 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -26,7 +26,7 @@
static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
-static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
+static int cpu_subsys_match(struct device *dev, const struct device_driver *drv)
{
/* ACPI style match is the only one that may succeed. */
if (acpi_driver_match_device(dev, drv))
@@ -95,6 +95,7 @@ void unregister_cpu(struct cpu *cpu)
{
int logical_cpu = cpu->dev.id;
+ set_cpu_enabled(logical_cpu, false);
unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
device_unregister(&cpu->dev);
@@ -144,7 +145,7 @@ static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
#endif /* CONFIG_HOTPLUG_CPU */
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_DUMP
#include <linux/kexec.h>
static ssize_t crash_notes_show(struct device *dev,
@@ -189,14 +190,14 @@ static const struct attribute_group crash_note_cpu_attr_group = {
#endif
static const struct attribute_group *common_cpu_attr_groups[] = {
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_DUMP
&crash_note_cpu_attr_group,
#endif
NULL
};
static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_DUMP
&crash_note_cpu_attr_group,
#endif
NULL
@@ -273,6 +274,13 @@ static ssize_t print_cpus_offline(struct device *dev,
}
static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
+static ssize_t print_cpus_enabled(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(cpu_enabled_mask));
+}
+static DEVICE_ATTR(enabled, 0444, print_cpus_enabled, NULL);
+
static ssize_t print_cpus_isolated(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -306,9 +314,9 @@ static ssize_t crash_hotplug_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sysfs_emit(buf, "%d\n", crash_hotplug_cpu_support());
+ return sysfs_emit(buf, "%d\n", crash_check_hotplug_support());
}
-static DEVICE_ATTR_ADMIN_RO(crash_hotplug);
+static DEVICE_ATTR_RO(crash_hotplug);
#endif
static void cpu_device_release(struct device *dev)
@@ -366,7 +374,7 @@ static int cpu_uevent(const struct device *dev, struct kobj_uevent_env *env)
}
#endif
-struct bus_type cpu_subsys = {
+const struct bus_type cpu_subsys = {
.name = "cpu",
.dev_name = "cpu",
.match = cpu_subsys_match,
@@ -413,6 +421,7 @@ int register_cpu(struct cpu *cpu, int num)
register_cpu_under_node(num, cpu_to_node(num));
dev_pm_qos_expose_latency_limit(&cpu->dev,
PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
+ set_cpu_enabled(num, true);
return 0;
}
@@ -494,6 +503,7 @@ static struct attribute *cpu_root_attrs[] = {
&cpu_attrs[2].attr.attr,
&dev_attr_kernel_max.attr,
&dev_attr_offline.attr,
+ &dev_attr_enabled.attr,
&dev_attr_isolated.attr,
#ifdef CONFIG_NO_HZ_FULL
&dev_attr_nohz_full.attr,
@@ -558,7 +568,7 @@ static void __init cpu_dev_register_generic(void)
for_each_present_cpu(i) {
ret = arch_register_cpu(i);
- if (ret)
+ if (ret && ret != -EPROBE_DEFER)
pr_warn("register_cpu %d failed (%d)\n", i, ret);
}
}
@@ -588,6 +598,8 @@ CPU_SHOW_VULN_FALLBACK(mmio_stale_data);
CPU_SHOW_VULN_FALLBACK(retbleed);
CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gds);
+CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
+CPU_SHOW_VULN_FALLBACK(ghostwrite);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -602,6 +614,8 @@ static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
+static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
+static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -617,6 +631,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_retbleed.attr,
&dev_attr_spec_rstack_overflow.attr,
&dev_attr_gather_data_sampling.attr,
+ &dev_attr_reg_file_data_sampling.attr,
+ &dev_attr_ghostwrite.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 85152537dbf1..f0e4b4aba885 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -248,7 +248,7 @@ static int deferred_devs_show(struct seq_file *s, void *data)
list_for_each_entry(curr, &deferred_probe_pending_list, deferred_probe)
seq_printf(s, "%s\t%s", dev_name(curr->device),
- curr->device->p->deferred_probe_reason ?: "\n");
+ curr->deferred_probe_reason ?: "\n");
mutex_unlock(&deferred_probe_mutex);
@@ -313,7 +313,7 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
mutex_lock(&deferred_probe_mutex);
list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
- dev_info(p->device, "deferred probe pending: %s", p->deferred_probe_reason ?: "(reason unknown)\n");
+ dev_warn(p->device, "deferred probe pending: %s", p->deferred_probe_reason ?: "(reason unknown)\n");
mutex_unlock(&deferred_probe_mutex);
fw_devlink_probing_done();
@@ -393,17 +393,17 @@ bool device_is_bound(struct device *dev)
{
return dev->p && klist_node_attached(&dev->p->knode_driver);
}
+EXPORT_SYMBOL_GPL(device_is_bound);
static void driver_bound(struct device *dev)
{
if (device_is_bound(dev)) {
- pr_warn("%s: device %s already bound\n",
- __func__, kobject_name(&dev->kobj));
+ dev_warn(dev, "%s: device already bound\n", __func__);
return;
}
- pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
- __func__, dev_name(dev));
+ dev_dbg(dev, "driver: '%s': %s: bound to device\n", dev->driver->name,
+ __func__);
klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
device_links_driver_bound(dev);
@@ -569,7 +569,7 @@ static void device_remove(struct device *dev)
dev->driver->remove(dev);
}
-static int call_driver_probe(struct device *dev, struct device_driver *drv)
+static int call_driver_probe(struct device *dev, const struct device_driver *drv)
{
int ret = 0;
@@ -587,20 +587,20 @@ static int call_driver_probe(struct device *dev, struct device_driver *drv)
break;
case -ENODEV:
case -ENXIO:
- pr_debug("%s: probe of %s rejects match %d\n",
- drv->name, dev_name(dev), ret);
+ dev_dbg(dev, "probe with driver %s rejects match %d\n",
+ drv->name, ret);
break;
default:
/* driver matched but the probe failed */
- pr_warn("%s: probe of %s failed with error %d\n",
- drv->name, dev_name(dev), ret);
+ dev_err(dev, "probe with driver %s failed with error %d\n",
+ drv->name, ret);
break;
}
return ret;
}
-static int really_probe(struct device *dev, struct device_driver *drv)
+static int really_probe(struct device *dev, const struct device_driver *drv)
{
bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
!drv->suppress_bind_attrs;
@@ -620,8 +620,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
if (link_ret == -EPROBE_DEFER)
return link_ret;
- pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
- drv->bus->name, __func__, drv->name, dev_name(dev));
+ dev_dbg(dev, "bus: '%s': %s: probing driver %s with device\n",
+ drv->bus->name, __func__, drv->name);
if (!list_empty(&dev->devres_head)) {
dev_crit(dev, "Resources present before probing\n");
ret = -EBUSY;
@@ -629,7 +629,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
}
re_probe:
- dev->driver = drv;
+ // FIXME - this cast should not be needed "soon"
+ dev->driver = (struct device_driver *)drv;
/* If using pinctrl, bind pins now before probing */
ret = pinctrl_bind_pins(dev);
@@ -644,8 +645,7 @@ re_probe:
ret = driver_sysfs_add(dev);
if (ret) {
- pr_err("%s: driver_sysfs_add(%s) failed\n",
- __func__, dev_name(dev));
+ dev_err(dev, "%s: driver_sysfs_add failed\n", __func__);
goto sysfs_failed;
}
@@ -706,8 +706,8 @@ re_probe:
dev->pm_domain->sync(dev);
driver_bound(dev);
- pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
- drv->bus->name, __func__, dev_name(dev), drv->name);
+ dev_dbg(dev, "bus: '%s': %s: bound device to driver %s\n",
+ drv->bus->name, __func__, drv->name);
goto done;
dev_sysfs_state_synced_failed:
@@ -729,7 +729,7 @@ done:
/*
* For initcall_debug, show the driver probe time.
*/
-static int really_probe_debug(struct device *dev, struct device_driver *drv)
+static int really_probe_debug(struct device *dev, const struct device_driver *drv)
{
ktime_t calltime, rettime;
int ret;
@@ -776,7 +776,7 @@ void wait_for_device_probe(void)
}
EXPORT_SYMBOL_GPL(wait_for_device_probe);
-static int __driver_probe_device(struct device_driver *drv, struct device *dev)
+static int __driver_probe_device(const struct device_driver *drv, struct device *dev)
{
int ret = 0;
@@ -786,8 +786,8 @@ static int __driver_probe_device(struct device_driver *drv, struct device *dev)
return -EBUSY;
dev->can_match = true;
- pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
- drv->bus->name, __func__, dev_name(dev), drv->name);
+ dev_dbg(dev, "bus: '%s': %s: matched device with driver %s\n",
+ drv->bus->name, __func__, drv->name);
pm_runtime_get_suppliers(dev);
if (dev->parent)
@@ -821,7 +821,7 @@ static int __driver_probe_device(struct device_driver *drv, struct device *dev)
*
* If the device has a parent, runtime-resume the parent before driver probing.
*/
-static int driver_probe_device(struct device_driver *drv, struct device *dev)
+static int driver_probe_device(const struct device_driver *drv, struct device *dev)
{
int trigger_count = atomic_read(&deferred_trigger_count);
int ret;
@@ -865,7 +865,7 @@ static int __init save_async_options(char *buf)
}
__setup("driver_async_probe=", save_async_options);
-static bool driver_allows_async_probing(struct device_driver *drv)
+static bool driver_allows_async_probing(const struct device_driver *drv)
{
switch (drv->probe_type) {
case PROBE_PREFER_ASYNCHRONOUS:
@@ -1119,7 +1119,7 @@ static void __device_driver_unlock(struct device *dev, struct device *parent)
* Manually attach driver to a device. Will acquire both @dev lock and
* @dev->parent lock if needed. Returns 0 on success, -ERR on failure.
*/
-int device_driver_attach(struct device_driver *drv, struct device *dev)
+int device_driver_attach(const struct device_driver *drv, struct device *dev)
{
int ret;
@@ -1139,7 +1139,7 @@ EXPORT_SYMBOL_GPL(device_driver_attach);
static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
{
struct device *dev = _dev;
- struct device_driver *drv;
+ const struct device_driver *drv;
int ret;
__device_driver_lock(dev, dev->parent);
@@ -1155,7 +1155,7 @@ static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
static int __driver_attach(struct device *dev, void *data)
{
- struct device_driver *drv = data;
+ const struct device_driver *drv = data;
bool async = false;
int ret;
@@ -1228,9 +1228,10 @@ static int __driver_attach(struct device *dev, void *data)
* returns 0 and the @dev->driver is set, we've found a
* compatible pair.
*/
-int driver_attach(struct device_driver *drv)
+int driver_attach(const struct device_driver *drv)
{
- return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
+ /* The (void *) will be put back to const * in __driver_attach() */
+ return bus_for_each_dev(drv->bus, NULL, (void *)drv, __driver_attach);
}
EXPORT_SYMBOL_GPL(driver_attach);
@@ -1286,7 +1287,7 @@ static void __device_release_driver(struct device *dev, struct device *parent)
}
void device_release_driver_internal(struct device *dev,
- struct device_driver *drv,
+ const struct device_driver *drv,
struct device *parent)
{
__device_driver_lock(dev, parent);
@@ -1335,7 +1336,7 @@ void device_driver_detach(struct device *dev)
* driver_detach - detach driver from all devices it controls.
* @drv: driver.
*/
-void driver_detach(struct device_driver *drv)
+void driver_detach(const struct device_driver *drv)
{
struct device_private *dev_prv;
struct device *dev;
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 7e2d1f0d903a..64840e5d5fcc 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -18,9 +18,6 @@ static struct class devcd_class;
/* global disable flag, for security purposes */
static bool devcd_disabled;
-/* if data isn't read by userspace after 5 minutes then delete it */
-#define DEVCD_TIMEOUT (HZ * 60 * 5)
-
struct devcd_entry {
struct device devcd_dev;
void *data;
@@ -109,7 +106,7 @@ static void devcd_del(struct work_struct *wk)
}
static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -119,7 +116,7 @@ static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
}
static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -135,19 +132,15 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute devcd_attr_data = {
- .attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, },
- .size = 0,
- .read = devcd_data_read,
- .write = devcd_data_write,
-};
+static const struct bin_attribute devcd_attr_data =
+ __BIN_ATTR(data, 0600, devcd_data_read, devcd_data_write, 0);
-static struct bin_attribute *devcd_dev_bin_attrs[] = {
+static const struct bin_attribute *const devcd_dev_bin_attrs[] = {
&devcd_attr_data, NULL,
};
static const struct attribute_group devcd_dev_group = {
- .bin_attrs = devcd_dev_bin_attrs,
+ .bin_attrs_new = devcd_dev_bin_attrs,
};
static const struct attribute_group *devcd_dev_groups[] = {
@@ -189,9 +182,9 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri
* mutex_lock(&devcd->mutex);
*
*
- * In the above diagram, It looks like disabled_store() would be racing with parallely
+ * In the above diagram, it looks like disabled_store() would be racing with parallelly
* running devcd_del() and result in memory abort while acquiring devcd->mutex which
- * is called after kfree of devcd memory after dropping its last reference with
+ * is called after kfree of devcd memory after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
@@ -288,6 +281,8 @@ static void devcd_free_sgtable(void *data)
* @offset: start copy from @offset@ bytes from the head of the data
* in the given scatterlist
* @data_len: the length of the data in the sg_table
+ *
+ * Returns: the number of bytes copied
*/
static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
size_t buf_len, void *data,
@@ -305,7 +300,31 @@ static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
}
/**
- * dev_coredumpm - create device coredump with read/free methods
+ * dev_coredump_put - remove device coredump
+ * @dev: the struct device for the crashed device
+ *
+ * dev_coredump_put() removes coredump, if exists, for a given device from
+ * the file system and free its associated data otherwise, does nothing.
+ *
+ * It is useful for modules that do not want to keep coredump
+ * available after its unload.
+ */
+void dev_coredump_put(struct device *dev)
+{
+ struct device *existing;
+
+ existing = class_find_device(&devcd_class, NULL, dev,
+ devcd_match_failing);
+ if (existing) {
+ devcd_free(existing, NULL);
+ put_device(existing);
+ }
+}
+EXPORT_SYMBOL_GPL(dev_coredump_put);
+
+/**
+ * dev_coredumpm_timeout - create device coredump with read/free methods with a
+ * custom timeout.
* @dev: the struct device for the crashed device
* @owner: the module that contains the read/free functions, use %THIS_MODULE
* @data: data cookie for the @read/@free functions
@@ -313,17 +332,20 @@ static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
* @gfp: allocation flags
* @read: function to read from the given buffer
* @free: function to free the given buffer
+ * @timeout: time in jiffies to remove coredump
*
* Creates a new device coredump for the given device. If a previous one hasn't
* been read yet, the new coredump is discarded. The data lifetime is determined
* by the device coredump framework and when it is no longer needed the @free
* function will be called to free the data.
*/
-void dev_coredumpm(struct device *dev, struct module *owner,
- void *data, size_t datalen, gfp_t gfp,
- ssize_t (*read)(char *buffer, loff_t offset, size_t count,
- void *data, size_t datalen),
- void (*free)(void *data))
+void dev_coredumpm_timeout(struct device *dev, struct module *owner,
+ void *data, size_t datalen, gfp_t gfp,
+ ssize_t (*read)(char *buffer, loff_t offset,
+ size_t count, void *data,
+ size_t datalen),
+ void (*free)(void *data),
+ unsigned long timeout)
{
static atomic_t devcd_count = ATOMIC_INIT(0);
struct devcd_entry *devcd;
@@ -380,7 +402,7 @@ void dev_coredumpm(struct device *dev, struct module *owner,
dev_set_uevent_suppress(&devcd->devcd_dev, false);
kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
- schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
+ schedule_delayed_work(&devcd->del_wk, timeout);
mutex_unlock(&devcd->mutex);
return;
put_device:
@@ -391,7 +413,7 @@ void dev_coredumpm(struct device *dev, struct module *owner,
free:
free(data);
}
-EXPORT_SYMBOL_GPL(dev_coredumpm);
+EXPORT_SYMBOL_GPL(dev_coredumpm_timeout);
/**
* dev_coredumpsg - create device coredump that uses scatterlist as data
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 3df0025d12aa..93e7779ef21e 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -85,7 +85,7 @@ static void group_close_release(struct device *dev, void *res)
/* noop */
}
-static struct devres_group * node_to_group(struct devres_node *node)
+static struct devres_group *node_to_group(struct devres_node *node)
{
if (node->release == &group_open_release)
return container_of(node, struct devres_group, node[0]);
@@ -107,8 +107,8 @@ static bool check_dr_size(size_t size, size_t *tot_size)
return true;
}
-static __always_inline struct devres * alloc_dr(dr_release_t release,
- size_t size, gfp_t gfp, int nid)
+static __always_inline struct devres *alloc_dr(dr_release_t release,
+ size_t size, gfp_t gfp, int nid)
{
size_t tot_size;
struct devres *dr;
@@ -283,8 +283,8 @@ static struct devres *find_dr(struct device *dev, dr_release_t release,
* RETURNS:
* Pointer to found devres, NULL if not found.
*/
-void * devres_find(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data)
+void *devres_find(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data)
{
struct devres *dr;
unsigned long flags;
@@ -313,8 +313,8 @@ EXPORT_SYMBOL_GPL(devres_find);
* RETURNS:
* Pointer to found or added devres.
*/
-void * devres_get(struct device *dev, void *new_res,
- dr_match_t match, void *match_data)
+void *devres_get(struct device *dev, void *new_res,
+ dr_match_t match, void *match_data)
{
struct devres *new_dr = container_of(new_res, struct devres, data);
struct devres *dr;
@@ -349,8 +349,8 @@ EXPORT_SYMBOL_GPL(devres_get);
* RETURNS:
* Pointer to removed devres on success, NULL if not found.
*/
-void * devres_remove(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data)
+void *devres_remove(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data)
{
struct devres *dr;
unsigned long flags;
@@ -549,7 +549,7 @@ int devres_release_all(struct device *dev)
* RETURNS:
* ID of the new group, NULL on failure.
*/
-void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
+void *devres_open_group(struct device *dev, void *id, gfp_t gfp)
{
struct devres_group *grp;
unsigned long flags;
@@ -567,6 +567,7 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
grp->id = grp;
if (id)
grp->id = id;
+ grp->color = 0;
spin_lock_irqsave(&dev->devres_lock, flags);
add_dr(dev, &grp->node[0]);
@@ -576,7 +577,7 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
EXPORT_SYMBOL_GPL(devres_open_group);
/* Find devres group with ID @id. If @id is NULL, look for the latest. */
-static struct devres_group * find_group(struct device *dev, void *id)
+static struct devres_group *find_group(struct device *dev, void *id)
{
struct devres_node *node;
@@ -749,25 +750,38 @@ int __devm_add_action(struct device *dev, void (*action)(void *), void *data, co
EXPORT_SYMBOL_GPL(__devm_add_action);
/**
- * devm_remove_action() - removes previously added custom action
+ * devm_remove_action_nowarn() - removes previously added custom action
* @dev: Device that owns the action
* @action: Function implementing the action
* @data: Pointer to data passed to @action implementation
*
* Removes instance of @action previously added by devm_add_action().
* Both action and data should match one of the existing entries.
+ *
+ * In contrast to devm_remove_action(), this function does not WARN() if no
+ * entry could have been found.
+ *
+ * This should only be used if the action is contained in an object with
+ * independent lifetime management, e.g. the Devres rust abstraction.
+ *
+ * Causing the warning from regular driver code most likely indicates an abuse
+ * of the devres API.
+ *
+ * Returns: 0 on success, -ENOENT if no entry could have been found.
*/
-void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
+int devm_remove_action_nowarn(struct device *dev,
+ void (*action)(void *),
+ void *data)
{
struct action_devres devres = {
.data = data,
.action = action,
};
- WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
- &devres));
+ return devres_destroy(dev, devm_action_release, devm_action_match,
+ &devres);
}
-EXPORT_SYMBOL_GPL(devm_remove_action);
+EXPORT_SYMBOL_GPL(devm_remove_action_nowarn);
/**
* devm_release_action() - release previously added custom action
@@ -896,9 +910,12 @@ void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
/*
* Otherwise: allocate new, larger chunk. We need to allocate before
* taking the lock as most probably the caller uses GFP_KERNEL.
+ * alloc_dr() will call check_dr_size() to reserve extra memory
+ * for struct devres automatically, so size @new_size user request
+ * is delivered to it directly as devm_kmalloc() does.
*/
new_dr = alloc_dr(devm_kmalloc_release,
- total_new_size, gfp, dev_to_node(dev));
+ new_size, gfp, dev_to_node(dev));
if (!new_dr)
return NULL;
@@ -1222,7 +1239,11 @@ EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
*/
void devm_free_percpu(struct device *dev, void __percpu *pdata)
{
- WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
- (__force void *)pdata));
+ /*
+ * Use devres_release() to prevent memory leakage as
+ * devm_free_pages() does.
+ */
+ WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match,
+ (void *)(__force unsigned long)pdata));
}
EXPORT_SYMBOL_GPL(devm_free_percpu);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index c8436c26ed6a..8ab010ddf709 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(driver_set_override);
* Iterate over the @drv's list of devices calling @fn for each one.
*/
int driver_for_each_device(struct device_driver *drv, struct device *start,
- void *data, int (*fn)(struct device *, void *))
+ void *data, device_iter_t fn)
{
struct klist_iter i;
struct device *dev;
@@ -148,9 +148,9 @@ EXPORT_SYMBOL_GPL(driver_for_each_device);
* if it does. If the callback returns non-zero, this function will
* return to the caller and not iterate over any more devices.
*/
-struct device *driver_find_device(struct device_driver *drv,
+struct device *driver_find_device(const struct device_driver *drv,
struct device *start, const void *data,
- int (*match)(struct device *dev, const void *data))
+ device_match_t match)
{
struct klist_iter i;
struct device *dev;
@@ -160,9 +160,12 @@ struct device *driver_find_device(struct device_driver *drv,
klist_iter_init_node(&drv->p->klist_devices, &i,
(start ? &start->p->knode_driver : NULL));
- while ((dev = next_device(&i)))
- if (match(dev, data) && get_device(dev))
+ while ((dev = next_device(&i))) {
+ if (match(dev, data)) {
+ get_device(dev);
break;
+ }
+ }
klist_iter_exit(&i);
return dev;
}
@@ -173,7 +176,7 @@ EXPORT_SYMBOL_GPL(driver_find_device);
* @drv: driver.
* @attr: driver attribute descriptor.
*/
-int driver_create_file(struct device_driver *drv,
+int driver_create_file(const struct device_driver *drv,
const struct driver_attribute *attr)
{
int error;
@@ -191,7 +194,7 @@ EXPORT_SYMBOL_GPL(driver_create_file);
* @drv: driver.
* @attr: driver attribute descriptor.
*/
-void driver_remove_file(struct device_driver *drv,
+void driver_remove_file(const struct device_driver *drv,
const struct driver_attribute *attr)
{
if (drv)
@@ -199,13 +202,13 @@ void driver_remove_file(struct device_driver *drv,
}
EXPORT_SYMBOL_GPL(driver_remove_file);
-int driver_add_groups(struct device_driver *drv,
+int driver_add_groups(const struct device_driver *drv,
const struct attribute_group **groups)
{
return sysfs_create_groups(&drv->p->kobj, groups);
}
-void driver_remove_groups(struct device_driver *drv,
+void driver_remove_groups(const struct device_driver *drv,
const struct attribute_group **groups)
{
sysfs_remove_groups(&drv->p->kobj, groups);
diff --git a/drivers/base/faux.c b/drivers/base/faux.c
new file mode 100644
index 000000000000..531e9d789ee0
--- /dev/null
+++ b/drivers/base/faux.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2025 The Linux Foundation
+ *
+ * A "simple" faux bus that allows devices to be created and added
+ * automatically to it. This is to be used whenever you need to create a
+ * device that is not associated with any "real" system resources, and do
+ * not want to have to deal with a bus/driver binding logic. It is
+ * intended to be very simple, with only a create and a destroy function
+ * available.
+ */
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/container_of.h>
+#include <linux/device/faux.h>
+#include "base.h"
+
+/*
+ * Internal wrapper structure so we can hold a pointer to the
+ * faux_device_ops for this device.
+ */
+struct faux_object {
+ struct faux_device faux_dev;
+ const struct faux_device_ops *faux_ops;
+};
+#define to_faux_object(dev) container_of_const(dev, struct faux_object, faux_dev.dev)
+
+static struct device faux_bus_root = {
+ .init_name = "faux",
+};
+
+static int faux_match(struct device *dev, const struct device_driver *drv)
+{
+ /* Match always succeeds, we only have one driver */
+ return 1;
+}
+
+static int faux_probe(struct device *dev)
+{
+ struct faux_object *faux_obj = to_faux_object(dev);
+ struct faux_device *faux_dev = &faux_obj->faux_dev;
+ const struct faux_device_ops *faux_ops = faux_obj->faux_ops;
+ int ret = 0;
+
+ if (faux_ops && faux_ops->probe)
+ ret = faux_ops->probe(faux_dev);
+
+ return ret;
+}
+
+static void faux_remove(struct device *dev)
+{
+ struct faux_object *faux_obj = to_faux_object(dev);
+ struct faux_device *faux_dev = &faux_obj->faux_dev;
+ const struct faux_device_ops *faux_ops = faux_obj->faux_ops;
+
+ if (faux_ops && faux_ops->remove)
+ faux_ops->remove(faux_dev);
+}
+
+static const struct bus_type faux_bus_type = {
+ .name = "faux",
+ .match = faux_match,
+ .probe = faux_probe,
+ .remove = faux_remove,
+};
+
+static struct device_driver faux_driver = {
+ .name = "faux_driver",
+ .bus = &faux_bus_type,
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+};
+
+static void faux_device_release(struct device *dev)
+{
+ struct faux_object *faux_obj = to_faux_object(dev);
+
+ kfree(faux_obj);
+}
+
+/**
+ * faux_device_create_with_groups - Create and register with the driver
+ * core a faux device and populate the device with an initial
+ * set of sysfs attributes.
+ * @name: The name of the device we are adding, must be unique for
+ * all faux devices.
+ * @parent: Pointer to a potential parent struct device. If set to
+ * NULL, the device will be created in the "root" of the faux
+ * device tree in sysfs.
+ * @faux_ops: struct faux_device_ops that the new device will call back
+ * into, can be NULL.
+ * @groups: The set of sysfs attributes that will be created for this
+ * device when it is registered with the driver core.
+ *
+ * Create a new faux device and register it in the driver core properly.
+ * If present, callbacks in @faux_ops will be called with the device that
+ * for the caller to do something with at the proper time given the
+ * device's lifecycle.
+ *
+ * Note, when this function is called, the functions specified in struct
+ * faux_ops can be called before the function returns, so be prepared for
+ * everything to be properly initialized before that point in time.
+ *
+ * Return:
+ * * NULL if an error happened with creating the device
+ * * pointer to a valid struct faux_device that is registered with sysfs
+ */
+struct faux_device *faux_device_create_with_groups(const char *name,
+ struct device *parent,
+ const struct faux_device_ops *faux_ops,
+ const struct attribute_group **groups)
+{
+ struct faux_object *faux_obj;
+ struct faux_device *faux_dev;
+ struct device *dev;
+ int ret;
+
+ faux_obj = kzalloc(sizeof(*faux_obj), GFP_KERNEL);
+ if (!faux_obj)
+ return NULL;
+
+ /* Save off the callbacks so we can use them in the future */
+ faux_obj->faux_ops = faux_ops;
+
+ /* Initialize the device portion and register it with the driver core */
+ faux_dev = &faux_obj->faux_dev;
+ dev = &faux_dev->dev;
+
+ device_initialize(dev);
+ dev->release = faux_device_release;
+ if (parent)
+ dev->parent = parent;
+ else
+ dev->parent = &faux_bus_root;
+ dev->bus = &faux_bus_type;
+ dev->groups = groups;
+ dev_set_name(dev, "%s", name);
+
+ ret = device_add(dev);
+ if (ret) {
+ pr_err("%s: device_add for faux device '%s' failed with %d\n",
+ __func__, name, ret);
+ put_device(dev);
+ return NULL;
+ }
+
+ return faux_dev;
+}
+EXPORT_SYMBOL_GPL(faux_device_create_with_groups);
+
+/**
+ * faux_device_create - create and register with the driver core a faux device
+ * @name: The name of the device we are adding, must be unique for all
+ * faux devices.
+ * @parent: Pointer to a potential parent struct device. If set to
+ * NULL, the device will be created in the "root" of the faux
+ * device tree in sysfs.
+ * @faux_ops: struct faux_device_ops that the new device will call back
+ * into, can be NULL.
+ *
+ * Create a new faux device and register it in the driver core properly.
+ * If present, callbacks in @faux_ops will be called with the device that
+ * for the caller to do something with at the proper time given the
+ * device's lifecycle.
+ *
+ * Note, when this function is called, the functions specified in struct
+ * faux_ops can be called before the function returns, so be prepared for
+ * everything to be properly initialized before that point in time.
+ *
+ * Return:
+ * * NULL if an error happened with creating the device
+ * * pointer to a valid struct faux_device that is registered with sysfs
+ */
+struct faux_device *faux_device_create(const char *name,
+ struct device *parent,
+ const struct faux_device_ops *faux_ops)
+{
+ return faux_device_create_with_groups(name, parent, faux_ops, NULL);
+}
+EXPORT_SYMBOL_GPL(faux_device_create);
+
+/**
+ * faux_device_destroy - destroy a faux device
+ * @faux_dev: faux device to destroy
+ *
+ * Unregisters and cleans up a device that was created with a call to
+ * faux_device_create()
+ */
+void faux_device_destroy(struct faux_device *faux_dev)
+{
+ struct device *dev = &faux_dev->dev;
+
+ if (!faux_dev)
+ return;
+
+ device_del(dev);
+
+ /* The final put_device() will clean up the memory we allocated for this device. */
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(faux_device_destroy);
+
+int __init faux_bus_init(void)
+{
+ int ret;
+
+ ret = device_register(&faux_bus_root);
+ if (ret) {
+ put_device(&faux_bus_root);
+ return ret;
+ }
+
+ ret = bus_register(&faux_bus_type);
+ if (ret)
+ goto error_bus;
+
+ ret = driver_register(&faux_driver);
+ if (ret)
+ goto error_driver;
+
+ return ret;
+
+error_driver:
+ bus_unregister(&faux_bus_type);
+
+error_bus:
+ device_unregister(&faux_bus_root);
+ return ret;
+}
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index 5ca00e02fe82..a03701674265 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -37,6 +37,13 @@ config FW_LOADER_DEBUG
SHA256 checksums to the kernel log for each firmware file that is
loaded.
+config RUST_FW_LOADER_ABSTRACTIONS
+ bool "Rust Firmware Loader abstractions"
+ depends on RUST
+ depends on FW_LOADER=y
+ help
+ This enables the Rust abstractions for the firmware loader API.
+
if FW_LOADER
config FW_LOADER_PAGED_BUF
diff --git a/drivers/base/firmware_loader/builtin/main.c b/drivers/base/firmware_loader/builtin/main.c
index a065c3150897..d36befebb1b9 100644
--- a/drivers/base/firmware_loader/builtin/main.c
+++ b/drivers/base/firmware_loader/builtin/main.c
@@ -61,7 +61,7 @@ bool firmware_request_builtin(struct firmware *fw, const char *name)
return false;
}
-EXPORT_SYMBOL_NS_GPL(firmware_request_builtin, TEST_FIRMWARE);
+EXPORT_SYMBOL_NS_GPL(firmware_request_builtin, "TEST_FIRMWARE");
/**
* firmware_request_builtin_buf() - load builtin firmware into optional buffer
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index 8432ab2c3b3c..c8afc501a8a4 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -22,10 +22,10 @@ struct firmware_fallback_config fw_fallback_config = {
.loading_timeout = 60,
.old_timeout = 60,
};
-EXPORT_SYMBOL_NS_GPL(fw_fallback_config, FIRMWARE_LOADER_PRIVATE);
+EXPORT_SYMBOL_NS_GPL(fw_fallback_config, "FIRMWARE_LOADER_PRIVATE");
#ifdef CONFIG_SYSCTL
-static struct ctl_table firmware_config_table[] = {
+static const struct ctl_table firmware_config_table[] = {
{
.procname = "force_sysfs_fallback",
.data = &fw_fallback_config.force_sysfs_fallback,
@@ -56,13 +56,13 @@ int register_firmware_config_sysctl(void)
return -ENOMEM;
return 0;
}
-EXPORT_SYMBOL_NS_GPL(register_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
+EXPORT_SYMBOL_NS_GPL(register_firmware_config_sysctl, "FIRMWARE_LOADER_PRIVATE");
void unregister_firmware_config_sysctl(void)
{
unregister_sysctl_table(firmware_config_sysct_table_header);
firmware_config_sysct_table_header = NULL;
}
-EXPORT_SYMBOL_NS_GPL(unregister_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
+EXPORT_SYMBOL_NS_GPL(unregister_firmware_config_sysctl, "FIRMWARE_LOADER_PRIVATE");
#endif /* CONFIG_SYSCTL */
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index ea28102d421e..cb0912ea3e62 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -551,12 +551,16 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
file_size_ptr,
READING_FIRMWARE);
if (rc < 0) {
- if (rc != -ENOENT)
- dev_warn(device, "loading %s failed with error %d\n",
- path, rc);
- else
- dev_dbg(device, "loading %s failed for no such file or directory.\n",
- path);
+ if (!(fw_priv->opt_flags & FW_OPT_NO_WARN)) {
+ if (rc != -ENOENT)
+ dev_warn(device,
+ "loading %s failed with error %d\n",
+ path, rc);
+ else
+ dev_dbg(device,
+ "loading %s failed for no such file or directory.\n",
+ path);
+ }
continue;
}
size = rc;
@@ -825,19 +829,18 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name, st
shash->tfm = alg;
if (crypto_shash_digest(shash, fw->data, fw->size, sha256buf) < 0)
- goto out_shash;
+ goto out_free;
for (int i = 0; i < SHA256_DIGEST_SIZE; i++)
sprintf(&outbuf[i * 2], "%02x", sha256buf[i]);
outbuf[SHA256_BLOCK_SIZE] = 0;
dev_dbg(device, "Loaded FW: %s, sha256: %s\n", name, outbuf);
-out_shash:
- crypto_free_shash(alg);
out_free:
kfree(shash);
kfree(outbuf);
kfree(sha256buf);
+ crypto_free_shash(alg);
}
#else
static void fw_log_firmware_info(const struct firmware *fw, const char *name,
@@ -845,6 +848,26 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name,
{}
#endif
+/*
+ * Reject firmware file names with ".." path components.
+ * There are drivers that construct firmware file names from device-supplied
+ * strings, and we don't want some device to be able to tell us "I would like to
+ * be sent my firmware from ../../../etc/shadow, please".
+ *
+ * Search for ".." surrounded by either '/' or start/end of string.
+ *
+ * This intentionally only looks at the firmware name, not at the firmware base
+ * directory or at symlink contents.
+ */
+static bool name_contains_dotdot(const char *name)
+{
+ size_t name_len = strlen(name);
+
+ return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 ||
+ strstr(name, "/../") != NULL ||
+ (name_len >= 3 && strcmp(name+name_len-3, "/..") == 0);
+}
+
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
@@ -865,6 +888,14 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
goto out;
}
+ if (name_contains_dotdot(name)) {
+ dev_warn(device,
+ "Firmware load for '%s' refused, path contains '..' component\n",
+ name);
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = _request_firmware_prepare(&fw, name, device, buf, size,
offset, opt_flags);
if (ret <= 0) /* error or already assigned */
@@ -942,6 +973,8 @@ out:
* @name will be used as $FIRMWARE in the uevent environment and
* should be distinctive enough not to be confused with any other
* firmware image for this or any other device.
+ * It must not contain any ".." path components - "foo/bar..bin" is
+ * allowed, but "foo/../bar.bin" is not.
*
* Caller must hold the reference count of @device.
*
@@ -1041,8 +1074,8 @@ EXPORT_SYMBOL_GPL(firmware_request_platform);
/**
* firmware_request_cache() - cache firmware for suspend so resume can use it
- * @name: name of firmware file
* @device: device for which firmware should be cached for
+ * @name: name of firmware file
*
* There are some devices with an optimization that enables the device to not
* require loading firmware on system reboot. This optimization may still
@@ -1168,34 +1201,11 @@ static void request_firmware_work_func(struct work_struct *work)
kfree(fw_work);
}
-/**
- * request_firmware_nowait() - asynchronous version of request_firmware
- * @module: module requesting the firmware
- * @uevent: sends uevent to copy the firmware image if this flag
- * is non-zero else the firmware copy must be done manually.
- * @name: name of firmware file
- * @device: device for which firmware is being loaded
- * @gfp: allocation flags
- * @context: will be passed over to @cont, and
- * @fw may be %NULL if firmware request fails.
- * @cont: function will be called asynchronously when the firmware
- * request is over.
- *
- * Caller must hold the reference count of @device.
- *
- * Asynchronous variant of request_firmware() for user contexts:
- * - sleep for as small periods as possible since it may
- * increase kernel boot time of built-in device drivers
- * requesting firmware in their ->probe() methods, if
- * @gfp is GFP_KERNEL.
- *
- * - can't sleep at all if @gfp is GFP_ATOMIC.
- **/
-int
-request_firmware_nowait(
+
+static int _request_firmware_nowait(
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
- void (*cont)(const struct firmware *fw, void *context))
+ void (*cont)(const struct firmware *fw, void *context), bool nowarn)
{
struct firmware_work *fw_work;
@@ -1213,7 +1223,8 @@ request_firmware_nowait(
fw_work->context = context;
fw_work->cont = cont;
fw_work->opt_flags = FW_OPT_NOWAIT |
- (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
+ (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER) |
+ (nowarn ? FW_OPT_NO_WARN : 0);
if (!uevent && fw_cache_is_setup(device, name)) {
kfree_const(fw_work->name);
@@ -1232,8 +1243,66 @@ request_firmware_nowait(
schedule_work(&fw_work->work);
return 0;
}
+
+/**
+ * request_firmware_nowait() - asynchronous version of request_firmware
+ * @module: module requesting the firmware
+ * @uevent: sends uevent to copy the firmware image if this flag
+ * is non-zero else the firmware copy must be done manually.
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ * @gfp: allocation flags
+ * @context: will be passed over to @cont, and
+ * @fw may be %NULL if firmware request fails.
+ * @cont: function will be called asynchronously when the firmware
+ * request is over.
+ *
+ * Caller must hold the reference count of @device.
+ *
+ * Asynchronous variant of request_firmware() for user contexts:
+ * - sleep for as small periods as possible since it may
+ * increase kernel boot time of built-in device drivers
+ * requesting firmware in their ->probe() methods, if
+ * @gfp is GFP_KERNEL.
+ *
+ * - can't sleep at all if @gfp is GFP_ATOMIC.
+ **/
+int request_firmware_nowait(
+ struct module *module, bool uevent,
+ const char *name, struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context))
+{
+ return _request_firmware_nowait(module, uevent, name, device, gfp,
+ context, cont, false);
+
+}
EXPORT_SYMBOL(request_firmware_nowait);
+/**
+ * firmware_request_nowait_nowarn() - async version of request_firmware_nowarn
+ * @module: module requesting the firmware
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ * @gfp: allocation flags
+ * @context: will be passed over to @cont, and
+ * @fw may be %NULL if firmware request fails.
+ * @cont: function will be called asynchronously when the firmware
+ * request is over.
+ *
+ * Similar in function to request_firmware_nowait(), but doesn't print a warning
+ * when the firmware file could not be found and always sends a uevent to copy
+ * the firmware image.
+ */
+int firmware_request_nowait_nowarn(
+ struct module *module, const char *name,
+ struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context))
+{
+ return _request_firmware_nowait(module, FW_ACTION_UEVENT, name, device,
+ gfp, context, cont, true);
+}
+EXPORT_SYMBOL_GPL(firmware_request_nowait_nowarn);
+
#ifdef CONFIG_FW_CACHE
static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
index c9c93b47d9a5..d254ceb56d84 100644
--- a/drivers/base/firmware_loader/sysfs.c
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -259,7 +259,7 @@ static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
}
static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -316,7 +316,7 @@ static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
* the driver as a firmware image.
**/
static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -356,11 +356,11 @@ out:
return retval;
}
-static struct bin_attribute firmware_attr_data = {
+static const struct bin_attribute firmware_attr_data = {
.attr = { .name = "data", .mode = 0644 },
.size = 0,
- .read = firmware_data_read,
- .write = firmware_data_write,
+ .read_new = firmware_data_read,
+ .write_new = firmware_data_write,
};
static struct attribute *fw_dev_attrs[] = {
@@ -374,14 +374,14 @@ static struct attribute *fw_dev_attrs[] = {
NULL
};
-static struct bin_attribute *fw_dev_bin_attrs[] = {
+static const struct bin_attribute *const fw_dev_bin_attrs[] = {
&firmware_attr_data,
NULL
};
static const struct attribute_group fw_dev_attr_group = {
.attrs = fw_dev_attrs,
- .bin_attrs = fw_dev_bin_attrs,
+ .bin_attrs_new = fw_dev_bin_attrs,
#ifdef CONFIG_FW_UPLOAD
.is_visible = fw_upload_is_visible,
#endif
diff --git a/drivers/base/firmware_loader/sysfs.h b/drivers/base/firmware_loader/sysfs.h
index 2060add8ef81..1cabea544a40 100644
--- a/drivers/base/firmware_loader/sysfs.h
+++ b/drivers/base/firmware_loader/sysfs.h
@@ -6,7 +6,7 @@
#include "firmware.h"
-MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE);
+MODULE_IMPORT_NS("FIRMWARE_LOADER_PRIVATE");
extern struct firmware_fallback_config fw_fallback_config;
extern struct device_attribute dev_attr_loading;
diff --git a/drivers/base/init.c b/drivers/base/init.c
index c4954835128c..9d2b06d65dfc 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -32,6 +32,7 @@ void __init driver_init(void)
/* These are also core pieces, but must come after the
* core core pieces.
*/
+ faux_bus_init();
of_core_init();
platform_bus_init();
auxiliary_bus_init();
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
index e23d0b49a793..bfd9215c9070 100644
--- a/drivers/base/isa.c
+++ b/drivers/base/isa.c
@@ -23,7 +23,7 @@ struct isa_dev {
#define to_isa_dev(x) container_of((x), struct isa_dev, dev)
-static int isa_bus_match(struct device *dev, struct device_driver *driver)
+static int isa_bus_match(struct device *dev, const struct device_driver *driver)
{
struct isa_driver *isa_driver = to_isa_driver(driver);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 14f964a7719b..348c5dbbfa68 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -188,6 +188,7 @@ static int memory_block_online(struct memory_block *mem)
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_vmemmap_pages = 0;
+ struct memory_notify arg;
struct zone *zone;
int ret;
@@ -207,9 +208,19 @@ static int memory_block_online(struct memory_block *mem)
if (mem->altmap)
nr_vmemmap_pages = mem->altmap->free;
+ arg.altmap_start_pfn = start_pfn;
+ arg.altmap_nr_pages = nr_vmemmap_pages;
+ arg.start_pfn = start_pfn + nr_vmemmap_pages;
+ arg.nr_pages = nr_pages - nr_vmemmap_pages;
mem_hotplug_begin();
+ ret = memory_notify(MEM_PREPARE_ONLINE, &arg);
+ ret = notifier_to_errno(ret);
+ if (ret)
+ goto out_notifier;
+
if (nr_vmemmap_pages) {
- ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
+ ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages,
+ zone, mem->altmap->inaccessible);
if (ret)
goto out;
}
@@ -231,7 +242,11 @@ static int memory_block_online(struct memory_block *mem)
nr_vmemmap_pages);
mem->zone = zone;
+ mem_hotplug_done();
+ return ret;
out:
+ memory_notify(MEM_FINISH_OFFLINE, &arg);
+out_notifier:
mem_hotplug_done();
return ret;
}
@@ -244,6 +259,7 @@ static int memory_block_offline(struct memory_block *mem)
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_vmemmap_pages = 0;
+ struct memory_notify arg;
int ret;
if (!mem->zone)
@@ -275,6 +291,11 @@ static int memory_block_offline(struct memory_block *mem)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
mem->zone = NULL;
+ arg.altmap_start_pfn = start_pfn;
+ arg.altmap_nr_pages = nr_vmemmap_pages;
+ arg.start_pfn = start_pfn + nr_vmemmap_pages;
+ arg.nr_pages = nr_pages - nr_vmemmap_pages;
+ memory_notify(MEM_FINISH_OFFLINE, &arg);
out:
mem_hotplug_done();
return ret;
@@ -491,7 +512,7 @@ static ssize_t auto_online_blocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%s\n",
- online_type_to_str[mhp_default_online_type]);
+ online_type_to_str[mhp_get_default_online_type()]);
}
static ssize_t auto_online_blocks_store(struct device *dev,
@@ -503,7 +524,7 @@ static ssize_t auto_online_blocks_store(struct device *dev,
if (online_type < 0)
return -EINVAL;
- mhp_default_online_type = online_type;
+ mhp_set_default_online_type(online_type);
return count;
}
@@ -514,7 +535,7 @@ static DEVICE_ATTR_RW(auto_online_blocks);
static ssize_t crash_hotplug_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sysfs_emit(buf, "%d\n", crash_hotplug_memory_support());
+ return sysfs_emit(buf, "%d\n", crash_check_hotplug_support());
}
static DEVICE_ATTR_RO(crash_hotplug);
#endif
diff --git a/drivers/base/module.c b/drivers/base/module.c
index 46ad4d636731..5bc71bea883a 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -9,7 +9,7 @@
#include <linux/string.h>
#include "base.h"
-static char *make_driver_name(struct device_driver *drv)
+static char *make_driver_name(const struct device_driver *drv)
{
char *driver_name;
@@ -30,14 +30,14 @@ static void module_create_drivers_dir(struct module_kobject *mk)
mutex_unlock(&drivers_dir_mutex);
}
-void module_add_driver(struct module *mod, struct device_driver *drv)
+int module_add_driver(struct module *mod, const struct device_driver *drv)
{
char *driver_name;
- int no_warn;
struct module_kobject *mk = NULL;
+ int ret;
if (!drv)
- return;
+ return 0;
if (mod)
mk = &mod->mkobj;
@@ -56,20 +56,44 @@ void module_add_driver(struct module *mod, struct device_driver *drv)
}
if (!mk)
- return;
+ return 0;
+
+ ret = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module");
+ if (ret)
+ return ret;
- /* Don't check return codes; these calls are idempotent */
- no_warn = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module");
driver_name = make_driver_name(drv);
- if (driver_name) {
- module_create_drivers_dir(mk);
- no_warn = sysfs_create_link(mk->drivers_dir, &drv->p->kobj,
- driver_name);
- kfree(driver_name);
+ if (!driver_name) {
+ ret = -ENOMEM;
+ goto out_remove_kobj;
}
+
+ module_create_drivers_dir(mk);
+ if (!mk->drivers_dir) {
+ ret = -EINVAL;
+ goto out_free_driver_name;
+ }
+
+ ret = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, driver_name);
+ if (ret)
+ goto out_remove_drivers_dir;
+
+ kfree(driver_name);
+
+ return 0;
+
+out_remove_drivers_dir:
+ sysfs_remove_link(mk->drivers_dir, driver_name);
+
+out_free_driver_name:
+ kfree(driver_name);
+
+out_remove_kobj:
+ sysfs_remove_link(&drv->p->kobj, "module");
+ return ret;
}
-void module_remove_driver(struct device_driver *drv)
+void module_remove_driver(const struct device_driver *drv)
{
struct module_kobject *mk = NULL;
char *driver_name;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 1c05640461dd..0ea653fa3433 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -27,7 +27,7 @@ static const struct bus_type node_subsys = {
};
static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -45,10 +45,10 @@ static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
return n;
}
-static BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES);
static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -66,7 +66,7 @@ static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
return n;
}
-static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
/**
* struct node_access_nodes - Access class device to hold user visible
@@ -126,7 +126,7 @@ static void node_access_release(struct device *dev)
}
static struct node_access_nodes *node_init_node_access(struct node *node,
- unsigned int access)
+ enum access_coordinate_class access)
{
struct node_access_nodes *access_node;
struct device *dev;
@@ -191,7 +191,7 @@ static struct attribute *access_attrs[] = {
* @access: The access class the for the given attributes
*/
void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
- unsigned int access)
+ enum access_coordinate_class access)
{
struct node_access_nodes *c;
struct node *node;
@@ -215,6 +215,7 @@ void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
}
}
}
+EXPORT_SYMBOL_GPL(node_set_perf_attrs);
/**
* struct node_cache_info - Internal tracking for memory node caches
@@ -577,7 +578,7 @@ static struct attribute *node_dev_attrs[] = {
NULL
};
-static struct bin_attribute *node_dev_bin_attrs[] = {
+static const struct bin_attribute *node_dev_bin_attrs[] = {
&bin_attr_cpumap,
&bin_attr_cpulist,
NULL
@@ -585,7 +586,7 @@ static struct bin_attribute *node_dev_bin_attrs[] = {
static const struct attribute_group node_dev_group = {
.attrs = node_dev_attrs,
- .bin_attrs = node_dev_bin_attrs
+ .bin_attrs_new = node_dev_bin_attrs,
};
static const struct attribute_group *node_dev_groups[] = {
@@ -689,7 +690,7 @@ int register_cpu_under_node(unsigned int cpu, unsigned int nid)
*/
int register_memory_node_under_compute_node(unsigned int mem_nid,
unsigned int cpu_nid,
- unsigned int access)
+ enum access_coordinate_class access)
{
struct node *init_node, *targ_node;
struct node_access_nodes *initiator, *target;
diff --git a/drivers/base/physical_location.c b/drivers/base/physical_location.c
index 951819e71b4a..5db06e825c94 100644
--- a/drivers/base/physical_location.c
+++ b/drivers/base/physical_location.c
@@ -13,13 +13,11 @@
bool dev_add_physical_location(struct device *dev)
{
struct acpi_pld_info *pld;
- acpi_status status;
if (!has_acpi_companion(dev))
return false;
- status = acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld);
- if (ACPI_FAILURE(status))
+ if (!acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld))
return false;
dev->physical_location =
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index f37ad34c80ec..0e60dd650b5e 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -4,349 +4,96 @@
*
* Copyright (C) 2015 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com>
+ * Copyright (C) 2022 Linutronix GmbH
*/
#include <linux/device.h>
-#include <linux/idr.h>
-#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
-#include <linux/slab.h>
-#define DEV_ID_SHIFT 21
-#define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT))
-
-/*
- * Internal data structure containing a (made up, but unique) devid
- * and the callback to write the MSI message.
- */
-struct platform_msi_priv_data {
- struct device *dev;
- void *host_data;
- msi_alloc_info_t arg;
- irq_write_msi_msg_t write_msg;
- int devid;
-};
-
-/* The devid allocator */
-static DEFINE_IDA(platform_msi_devid_ida);
-
-#ifdef GENERIC_MSI_DOMAIN_OPS
/*
- * Convert an msi_desc to a globaly unique identifier (per-device
- * devid + msi_desc position in the msi_list).
+ * This indirection can go when platform_device_msi_init_and_alloc_irqs()
+ * is switched to a proper irq_chip::irq_write_msi_msg() callback. Keep it
+ * simple for now.
*/
-static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc)
+static void platform_msi_write_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
- u32 devid = desc->dev->msi.data->platform_data->devid;
+ irq_write_msi_msg_t cb = d->chip_data;
- return (devid << (32 - DEV_ID_SHIFT)) | desc->msi_index;
+ cb(irq_data_get_msi_desc(d), msg);
}
static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
{
arg->desc = desc;
- arg->hwirq = platform_msi_calc_hwirq(desc);
-}
-
-static int platform_msi_init(struct irq_domain *domain,
- struct msi_domain_info *info,
- unsigned int virq, irq_hw_number_t hwirq,
- msi_alloc_info_t *arg)
-{
- return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
- info->chip, info->chip_data);
-}
-
-static void platform_msi_set_proxy_dev(msi_alloc_info_t *arg)
-{
- arg->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE;
+ arg->hwirq = desc->msi_index;
}
-#else
-#define platform_msi_set_desc NULL
-#define platform_msi_init NULL
-#define platform_msi_set_proxy_dev(x) do {} while(0)
-#endif
-static void platform_msi_update_dom_ops(struct msi_domain_info *info)
-{
- struct msi_domain_ops *ops = info->ops;
-
- BUG_ON(!ops);
-
- if (ops->msi_init == NULL)
- ops->msi_init = platform_msi_init;
- if (ops->set_desc == NULL)
- ops->set_desc = platform_msi_set_desc;
-}
-
-static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
-{
- struct msi_desc *desc = irq_data_get_msi_desc(data);
+static const struct msi_domain_template platform_msi_template = {
+ .chip = {
+ .name = "pMSI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_write_msi_msg = platform_msi_write_msi_msg,
+ /* The rest is filled in by the platform MSI parent */
+ },
- desc->dev->msi.data->platform_data->write_msg(desc, msg);
-}
-
-static void platform_msi_update_chip_ops(struct msi_domain_info *info)
-{
- struct irq_chip *chip = info->chip;
+ .ops = {
+ .set_desc = platform_msi_set_desc,
+ },
- BUG_ON(!chip);
- if (!chip->irq_mask)
- chip->irq_mask = irq_chip_mask_parent;
- if (!chip->irq_unmask)
- chip->irq_unmask = irq_chip_unmask_parent;
- if (!chip->irq_eoi)
- chip->irq_eoi = irq_chip_eoi_parent;
- if (!chip->irq_set_affinity)
- chip->irq_set_affinity = msi_domain_set_affinity;
- if (!chip->irq_write_msi_msg)
- chip->irq_write_msi_msg = platform_msi_write_msg;
- if (WARN_ON((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
- !(chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)))
- info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
-}
-
-/**
- * platform_msi_create_irq_domain - Create a platform MSI interrupt domain
- * @fwnode: Optional fwnode of the interrupt controller
- * @info: MSI domain info
- * @parent: Parent irq domain
- *
- * Updates the domain and chip ops and creates a platform MSI
- * interrupt domain.
- *
- * Returns:
- * A domain pointer or NULL in case of failure.
- */
-struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
- struct msi_domain_info *info,
- struct irq_domain *parent)
-{
- struct irq_domain *domain;
-
- if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
- platform_msi_update_dom_ops(info);
- if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
- platform_msi_update_chip_ops(info);
- info->flags |= MSI_FLAG_DEV_SYSFS | MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
- MSI_FLAG_FREE_MSI_DESCS;
-
- domain = msi_create_irq_domain(fwnode, info, parent);
- if (domain)
- irq_domain_update_bus_token(domain, DOMAIN_BUS_PLATFORM_MSI);
-
- return domain;
-}
-EXPORT_SYMBOL_GPL(platform_msi_create_irq_domain);
-
-static int platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg)
-{
- struct platform_msi_priv_data *datap;
- int err;
-
- /*
- * Limit the number of interrupts to 2048 per device. Should we
- * need to bump this up, DEV_ID_SHIFT should be adjusted
- * accordingly (which would impact the max number of MSI
- * capable devices).
- */
- if (!dev->msi.domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
- return -EINVAL;
-
- if (dev->msi.domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
- dev_err(dev, "Incompatible msi_domain, giving up\n");
- return -EINVAL;
- }
-
- err = msi_setup_device_data(dev);
- if (err)
- return err;
-
- /* Already initialized? */
- if (dev->msi.data->platform_data)
- return -EBUSY;
-
- datap = kzalloc(sizeof(*datap), GFP_KERNEL);
- if (!datap)
- return -ENOMEM;
-
- datap->devid = ida_simple_get(&platform_msi_devid_ida,
- 0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
- if (datap->devid < 0) {
- err = datap->devid;
- kfree(datap);
- return err;
- }
-
- datap->write_msg = write_msi_msg;
- datap->dev = dev;
- dev->msi.data->platform_data = datap;
- return 0;
-}
-
-static void platform_msi_free_priv_data(struct device *dev)
-{
- struct platform_msi_priv_data *data = dev->msi.data->platform_data;
-
- dev->msi.data->platform_data = NULL;
- ida_simple_remove(&platform_msi_devid_ida, data->devid);
- kfree(data);
-}
+ .info = {
+ .bus_token = DOMAIN_BUS_DEVICE_MSI,
+ },
+};
/**
- * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev
+ * platform_device_msi_init_and_alloc_irqs - Initialize platform device MSI
+ * and allocate interrupts for @dev
* @dev: The device for which to allocate interrupts
* @nvec: The number of interrupts to allocate
* @write_msi_msg: Callback to write an interrupt message for @dev
*
* Returns:
* Zero for success, or an error code in case of failure
- */
-int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg)
-{
- int err;
-
- err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
- if (err)
- return err;
-
- err = msi_domain_alloc_irqs_range(dev, MSI_DEFAULT_DOMAIN, 0, nvec - 1);
- if (err)
- platform_msi_free_priv_data(dev);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
-
-/**
- * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
- * @dev: The device for which to free interrupts
- */
-void platform_msi_domain_free_irqs(struct device *dev)
-{
- msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
- platform_msi_free_priv_data(dev);
-}
-EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
-
-/**
- * platform_msi_get_host_data - Query the private data associated with
- * a platform-msi domain
- * @domain: The platform-msi domain
- *
- * Return: The private data provided when calling
- * platform_msi_create_device_domain().
- */
-void *platform_msi_get_host_data(struct irq_domain *domain)
-{
- struct platform_msi_priv_data *data = domain->host_data;
-
- return data->host_data;
-}
-
-static struct lock_class_key platform_device_msi_lock_class;
-
-/**
- * __platform_msi_create_device_domain - Create a platform-msi device domain
- *
- * @dev: The device generating the MSIs
- * @nvec: The number of MSIs that need to be allocated
- * @is_tree: flag to indicate tree hierarchy
- * @write_msi_msg: Callback to write an interrupt message for @dev
- * @ops: The hierarchy domain operations to use
- * @host_data: Private data associated to this domain
*
- * Return: An irqdomain for @nvec interrupts on success, NULL in case of error.
+ * This creates a MSI domain on @dev which has @dev->msi.domain as
+ * parent. The parent domain sets up the new domain. The domain has
+ * a fixed size of @nvec. The domain is managed by devres and will
+ * be removed when the device is removed.
*
- * This is for interrupt domains which stack on a platform-msi domain
- * created by platform_msi_create_irq_domain(). @dev->msi.domain points to
- * that platform-msi domain which is the parent for the new domain.
+ * Note: For migration purposes this falls back to the original platform_msi code
+ * up to the point where all platforms have been converted to the MSI
+ * parent model.
*/
-struct irq_domain *
-__platform_msi_create_device_domain(struct device *dev,
- unsigned int nvec,
- bool is_tree,
- irq_write_msi_msg_t write_msi_msg,
- const struct irq_domain_ops *ops,
- void *host_data)
+int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg)
{
- struct platform_msi_priv_data *data;
- struct irq_domain *domain;
- int err;
+ struct irq_domain *domain = dev->msi.domain;
- err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
- if (err)
- return NULL;
+ if (!domain || !write_msi_msg)
+ return -EINVAL;
/*
- * Use a separate lock class for the MSI descriptor mutex on
- * platform MSI device domains because the descriptor mutex nests
- * into the domain mutex. See alloc/free below.
+ * @write_msi_msg is stored in the resulting msi_domain_info::data.
+ * The underlying domain creation mechanism will assign that
+ * callback to the resulting irq chip.
*/
- lockdep_set_class(&dev->msi.data->mutex, &platform_device_msi_lock_class);
-
- data = dev->msi.data->platform_data;
- data->host_data = host_data;
- domain = irq_domain_create_hierarchy(dev->msi.domain, 0,
- is_tree ? 0 : nvec,
- dev->fwnode, ops, data);
- if (!domain)
- goto free_priv;
+ if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN,
+ &platform_msi_template,
+ nvec, NULL, write_msi_msg))
+ return -ENODEV;
- platform_msi_set_proxy_dev(&data->arg);
- err = msi_domain_prepare_irqs(domain->parent, dev, nvec, &data->arg);
- if (err)
- goto free_domain;
-
- return domain;
-
-free_domain:
- irq_domain_remove(domain);
-free_priv:
- platform_msi_free_priv_data(dev);
- return NULL;
+ return msi_domain_alloc_irqs_range(dev, MSI_DEFAULT_DOMAIN, 0, nvec - 1);
}
+EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs);
/**
- * platform_msi_device_domain_free - Free interrupts associated with a platform-msi
- * device domain
- *
- * @domain: The platform-msi device domain
- * @virq: The base irq from which to perform the free operation
- * @nr_irqs: How many interrupts to free from @virq
- */
-void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
-{
- struct platform_msi_priv_data *data = domain->host_data;
-
- msi_lock_descs(data->dev);
- msi_domain_depopulate_descs(data->dev, virq, nr_irqs);
- irq_domain_free_irqs_common(domain, virq, nr_irqs);
- msi_free_msi_descs_range(data->dev, virq, virq + nr_irqs - 1);
- msi_unlock_descs(data->dev);
-}
-
-/**
- * platform_msi_device_domain_alloc - Allocate interrupts associated with
- * a platform-msi device domain
- *
- * @domain: The platform-msi device domain
- * @virq: The base irq from which to perform the allocate operation
- * @nr_irqs: How many interrupts to allocate from @virq
- *
- * Return 0 on success, or an error code on failure. Must be called
- * with irq_domain_mutex held (which can only be done as part of a
- * top-level interrupt allocation).
+ * platform_device_msi_free_irqs_all - Free all interrupts for @dev
+ * @dev: The device for which to free interrupts
*/
-int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
+void platform_device_msi_free_irqs_all(struct device *dev)
{
- struct platform_msi_priv_data *data = domain->host_data;
- struct device *dev = data->dev;
-
- return msi_domain_populate_irqs(domain->parent, dev, virq, nr_irqs, &data->arg);
+ msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
}
+EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 10c577963418..6f2a33722c52 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -608,7 +608,7 @@ int platform_device_add_resources(struct platform_device *pdev,
struct resource *r = NULL;
if (res) {
- r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
+ r = kmemdup_array(res, num, sizeof(*r), GFP_KERNEL);
if (!r)
return -ENOMEM;
}
@@ -1122,7 +1122,7 @@ static int platform_legacy_resume(struct device *dev)
int platform_pm_suspend(struct device *dev)
{
- struct device_driver *drv = dev->driver;
+ const struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
@@ -1140,7 +1140,7 @@ int platform_pm_suspend(struct device *dev)
int platform_pm_resume(struct device *dev)
{
- struct device_driver *drv = dev->driver;
+ const struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
@@ -1162,7 +1162,7 @@ int platform_pm_resume(struct device *dev)
int platform_pm_freeze(struct device *dev)
{
- struct device_driver *drv = dev->driver;
+ const struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
@@ -1180,7 +1180,7 @@ int platform_pm_freeze(struct device *dev)
int platform_pm_thaw(struct device *dev)
{
- struct device_driver *drv = dev->driver;
+ const struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
@@ -1198,7 +1198,7 @@ int platform_pm_thaw(struct device *dev)
int platform_pm_poweroff(struct device *dev)
{
- struct device_driver *drv = dev->driver;
+ const struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
@@ -1216,7 +1216,7 @@ int platform_pm_poweroff(struct device *dev)
int platform_pm_restore(struct device *dev)
{
- struct device_driver *drv = dev->driver;
+ const struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
@@ -1332,7 +1332,7 @@ __ATTRIBUTE_GROUPS(platform_dev);
* and compare it against the name of the driver. Return whether they match
* or not.
*/
-static int platform_match(struct device *dev, struct device_driver *drv)
+static int platform_match(struct device *dev, const struct device_driver *drv)
{
struct platform_device *pdev = to_platform_device(dev);
struct platform_driver *pdrv = to_platform_driver(drv);
@@ -1420,14 +1420,8 @@ static void platform_remove(struct device *_dev)
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
- if (drv->remove_new) {
- drv->remove_new(dev);
- } else if (drv->remove) {
- int ret = drv->remove(dev);
-
- if (ret)
- dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n");
- }
+ if (drv->remove)
+ drv->remove(dev);
dev_pm_domain_detach(_dev, true);
}
@@ -1480,7 +1474,7 @@ static const struct dev_pm_ops platform_dev_pm_ops = {
USE_PLATFORM_PM_SLEEP_OPS
};
-struct bus_type platform_bus_type = {
+const struct bus_type platform_bus_type = {
.name = "platform",
.dev_groups = platform_dev_groups,
.match = platform_match,
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 44ec20918a4d..781968a128ff 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -11,6 +11,7 @@
#include <linux/pm_clock.h>
#include <linux/acpi.h>
#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
#include "power.h"
@@ -168,6 +169,179 @@ struct device *dev_pm_domain_attach_by_name(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_name);
/**
+ * dev_pm_domain_attach_list - Associate a device with its PM domains.
+ * @dev: The device used to lookup the PM domains for.
+ * @data: The data used for attaching to the PM domains.
+ * @list: An out-parameter with an allocated list of attached PM domains.
+ *
+ * This function helps to attach a device to its multiple PM domains. The
+ * caller, which is typically a driver's probe function, may provide a list of
+ * names for the PM domains that we should try to attach the device to, but it
+ * may also provide an empty list, in case the attach should be done for all of
+ * the available PM domains.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ *
+ * Returns the number of attached PM domains or a negative error code in case of
+ * a failure. Note that, to detach the list of PM domains, the driver shall call
+ * dev_pm_domain_detach_list(), typically during the remove phase.
+ */
+int dev_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list)
+{
+ struct device_node *np = dev->of_node;
+ struct dev_pm_domain_list *pds;
+ struct device *pd_dev = NULL;
+ int ret, i, num_pds = 0;
+ bool by_id = true;
+ size_t size;
+ u32 pd_flags = data ? data->pd_flags : 0;
+ u32 link_flags = pd_flags & PD_FLAG_NO_DEV_LINK ? 0 :
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
+
+ if (dev->pm_domain)
+ return -EEXIST;
+
+ /* For now this is limited to OF based platforms. */
+ if (!np)
+ return 0;
+
+ if (data && data->pd_names) {
+ num_pds = data->num_pd_names;
+ by_id = false;
+ } else {
+ num_pds = of_count_phandle_with_args(np, "power-domains",
+ "#power-domain-cells");
+ }
+
+ if (num_pds <= 0)
+ return 0;
+
+ pds = kzalloc(sizeof(*pds), GFP_KERNEL);
+ if (!pds)
+ return -ENOMEM;
+
+ size = sizeof(*pds->pd_devs) + sizeof(*pds->pd_links) +
+ sizeof(*pds->opp_tokens);
+ pds->pd_devs = kcalloc(num_pds, size, GFP_KERNEL);
+ if (!pds->pd_devs) {
+ ret = -ENOMEM;
+ goto free_pds;
+ }
+ pds->pd_links = (void *)(pds->pd_devs + num_pds);
+ pds->opp_tokens = (void *)(pds->pd_links + num_pds);
+
+ if (link_flags && pd_flags & PD_FLAG_DEV_LINK_ON)
+ link_flags |= DL_FLAG_RPM_ACTIVE;
+
+ for (i = 0; i < num_pds; i++) {
+ if (by_id)
+ pd_dev = dev_pm_domain_attach_by_id(dev, i);
+ else
+ pd_dev = dev_pm_domain_attach_by_name(dev,
+ data->pd_names[i]);
+ if (IS_ERR_OR_NULL(pd_dev)) {
+ ret = pd_dev ? PTR_ERR(pd_dev) : -ENODEV;
+ goto err_attach;
+ }
+
+ if (pd_flags & PD_FLAG_REQUIRED_OPP) {
+ struct dev_pm_opp_config config = {
+ .required_dev = pd_dev,
+ .required_dev_index = i,
+ };
+
+ ret = dev_pm_opp_set_config(dev, &config);
+ if (ret < 0)
+ goto err_link;
+
+ pds->opp_tokens[i] = ret;
+ }
+
+ if (link_flags) {
+ struct device_link *link;
+
+ link = device_link_add(dev, pd_dev, link_flags);
+ if (!link) {
+ ret = -ENODEV;
+ goto err_link;
+ }
+
+ pds->pd_links[i] = link;
+ }
+
+ pds->pd_devs[i] = pd_dev;
+ }
+
+ pds->num_pds = num_pds;
+ *list = pds;
+ return num_pds;
+
+err_link:
+ dev_pm_opp_clear_config(pds->opp_tokens[i]);
+ dev_pm_domain_detach(pd_dev, true);
+err_attach:
+ while (--i >= 0) {
+ dev_pm_opp_clear_config(pds->opp_tokens[i]);
+ if (pds->pd_links[i])
+ device_link_del(pds->pd_links[i]);
+ dev_pm_domain_detach(pds->pd_devs[i], true);
+ }
+ kfree(pds->pd_devs);
+free_pds:
+ kfree(pds);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach_list);
+
+/**
+ * devm_pm_domain_detach_list - devres-enabled version of dev_pm_domain_detach_list.
+ * @_list: The list of PM domains to detach.
+ *
+ * This function reverse the actions from devm_pm_domain_attach_list().
+ * it will be invoked during the remove phase from drivers implicitly if driver
+ * uses devm_pm_domain_attach_list() to attach the PM domains.
+ */
+static void devm_pm_domain_detach_list(void *_list)
+{
+ struct dev_pm_domain_list *list = _list;
+
+ dev_pm_domain_detach_list(list);
+}
+
+/**
+ * devm_pm_domain_attach_list - devres-enabled version of dev_pm_domain_attach_list
+ * @dev: The device used to lookup the PM domains for.
+ * @data: The data used for attaching to the PM domains.
+ * @list: An out-parameter with an allocated list of attached PM domains.
+ *
+ * NOTE: this will also handle calling devm_pm_domain_detach_list() for
+ * you during remove phase.
+ *
+ * Returns the number of attached PM domains or a negative error code in case of
+ * a failure.
+ */
+int devm_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list)
+{
+ int ret, num_pds;
+
+ num_pds = dev_pm_domain_attach_list(dev, data, list);
+ if (num_pds <= 0)
+ return num_pds;
+
+ ret = devm_add_action_or_reset(dev, devm_pm_domain_detach_list, *list);
+ if (ret)
+ return ret;
+
+ return num_pds;
+}
+EXPORT_SYMBOL_GPL(devm_pm_domain_attach_list);
+
+/**
* dev_pm_domain_detach - Detach a device from its PM domain.
* @dev: Device to detach.
* @power_off: Used to indicate whether we should power off the device.
@@ -188,6 +362,35 @@ void dev_pm_domain_detach(struct device *dev, bool power_off)
EXPORT_SYMBOL_GPL(dev_pm_domain_detach);
/**
+ * dev_pm_domain_detach_list - Detach a list of PM domains.
+ * @list: The list of PM domains to detach.
+ *
+ * This function reverse the actions from dev_pm_domain_attach_list().
+ * Typically it should be invoked during the remove phase from drivers.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ */
+void dev_pm_domain_detach_list(struct dev_pm_domain_list *list)
+{
+ int i;
+
+ if (!list)
+ return;
+
+ for (i = 0; i < list->num_pds; i++) {
+ dev_pm_opp_clear_config(list->opp_tokens[i]);
+ if (list->pd_links[i])
+ device_link_del(list->pd_links[i]);
+ dev_pm_domain_detach(list->pd_devs[i], true);
+ }
+
+ kfree(list->pd_devs);
+ kfree(list);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_detach_list);
+
+/**
* dev_pm_domain_start - Start the device through its PM domain.
* @dev: Device to start.
*
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index fadcd0379dc2..40e1d8d8a589 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -60,7 +60,6 @@ static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
-struct suspend_stats suspend_stats;
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
@@ -209,7 +208,7 @@ static ktime_t initcall_debug_start(struct device *dev, void *cb)
if (!pm_print_times_enabled)
return 0;
- dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
+ dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
task_pid_nr(current),
dev->parent ? dev_name(dev->parent) : "none");
return ktime_get();
@@ -224,7 +223,7 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
return;
rettime = ktime_get();
- dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
+ dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
(unsigned long long)ktime_us_delta(rettime, calltime));
}
@@ -497,6 +496,7 @@ struct dpm_watchdog {
struct device *dev;
struct task_struct *tsk;
struct timer_list timer;
+ bool fatal;
};
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
@@ -513,11 +513,23 @@ struct dpm_watchdog {
static void dpm_watchdog_handler(struct timer_list *t)
{
struct dpm_watchdog *wd = from_timer(wd, t, timer);
+ struct timer_list *timer = &wd->timer;
+ unsigned int time_left;
+
+ if (wd->fatal) {
+ dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+ show_stack(wd->tsk, NULL, KERN_EMERG);
+ panic("%s %s: unrecoverable failure\n",
+ dev_driver_string(wd->dev), dev_name(wd->dev));
+ }
+
+ time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
+ dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
+ CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
+ show_stack(wd->tsk, NULL, KERN_WARNING);
- dev_emerg(wd->dev, "**** DPM device timeout ****\n");
- show_stack(wd->tsk, NULL, KERN_EMERG);
- panic("%s %s: unrecoverable failure\n",
- dev_driver_string(wd->dev), dev_name(wd->dev));
+ wd->fatal = true;
+ mod_timer(timer, jiffies + HZ * time_left);
}
/**
@@ -531,10 +543,11 @@ static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
wd->dev = dev;
wd->tsk = current;
+ wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
/* use same timeout value for both suspend and resume */
- timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+ timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
add_timer(timer);
}
@@ -578,6 +591,35 @@ bool dev_pm_skip_resume(struct device *dev)
return !dev->power.must_resume;
}
+static bool is_async(struct device *dev)
+{
+ return dev->power.async_suspend && pm_async_enabled
+ && !pm_trace_is_enabled();
+}
+
+static bool dpm_async_fn(struct device *dev, async_func_t func)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (is_async(dev)) {
+ dev->power.async_in_progress = true;
+
+ get_device(dev);
+
+ if (async_schedule_dev_nocall(func, dev))
+ return true;
+
+ put_device(dev);
+ }
+ /*
+ * Because async_schedule_dev_nocall() above has returned false or it
+ * has not been called at all, func() is not running and it is safe to
+ * update the async_in_progress flag without extra synchronization.
+ */
+ dev->power.async_in_progress = false;
+ return false;
+}
+
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
@@ -614,13 +656,15 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
* so change its status accordingly.
*
* Otherwise, the device is going to be resumed, so set its PM-runtime
- * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
- * to avoid confusing drivers that don't use it.
+ * status to "active" unless its power.set_active flag is clear, in
+ * which case it is not necessary to update its PM-runtime status.
*/
- if (skip_resume)
+ if (skip_resume) {
pm_runtime_set_suspended(dev);
- else if (dev_pm_skip_suspend(dev))
+ } else if (dev->power.set_active) {
pm_runtime_set_active(dev);
+ dev->power.set_active = false;
+ }
if (dev->pm_domain) {
info = "noirq power domain ";
@@ -657,42 +701,12 @@ Out:
TRACE_RESUME(error);
if (error) {
- suspend_stats.failed_resume_noirq++;
- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+ async_error = error;
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
}
}
-static bool is_async(struct device *dev)
-{
- return dev->power.async_suspend && pm_async_enabled
- && !pm_trace_is_enabled();
-}
-
-static bool dpm_async_fn(struct device *dev, async_func_t func)
-{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- dev->power.async_in_progress = true;
-
- get_device(dev);
-
- if (async_schedule_dev_nocall(func, dev))
- return true;
-
- put_device(dev);
- }
- /*
- * Because async_schedule_dev_nocall() above has returned false or it
- * has not been called at all, func() is not running and it is safe to
- * update the async_in_progress flag without extra synchronization.
- */
- dev->power.async_in_progress = false;
- return false;
-}
-
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = data;
@@ -707,9 +721,12 @@ static void dpm_noirq_resume_devices(pm_message_t state)
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
+ async_error = 0;
pm_transition = state;
+ mutex_lock(&dpm_list_mtx);
+
/*
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
@@ -736,6 +753,9 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "noirq");
+ if (async_error)
+ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
@@ -817,8 +837,7 @@ Out:
complete_all(&dev->power.completion);
if (error) {
- suspend_stats.failed_resume_early++;
- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+ async_error = error;
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async early" : " early", error);
}
@@ -842,9 +861,12 @@ void dpm_resume_early(pm_message_t state)
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
+ async_error = 0;
pm_transition = state;
+ mutex_lock(&dpm_list_mtx);
+
/*
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
@@ -871,6 +893,9 @@ void dpm_resume_early(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "early");
+ if (async_error)
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
}
@@ -905,7 +930,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
if (dev->power.direct_complete) {
- /* Match the pm_runtime_disable() in __device_suspend(). */
+ /* Match the pm_runtime_disable() in device_suspend(). */
pm_runtime_enable(dev);
goto Complete;
}
@@ -974,8 +999,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
TRACE_RESUME(error);
if (error) {
- suspend_stats.failed_resume++;
- dpm_save_failed_step(SUSPEND_RESUME);
+ async_error = error;
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
@@ -1004,10 +1028,11 @@ void dpm_resume(pm_message_t state)
trace_suspend_resume(TPS("dpm_resume"), state.event, true);
might_sleep();
- mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
+ mutex_lock(&dpm_list_mtx);
+
/*
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
@@ -1017,29 +1042,25 @@ void dpm_resume(pm_message_t state)
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
-
- get_device(dev);
+ list_move_tail(&dev->power.entry, &dpm_prepared_list);
if (!dev->power.async_in_progress) {
+ get_device(dev);
+
mutex_unlock(&dpm_list_mtx);
device_resume(dev, state, false);
+ put_device(dev);
+
mutex_lock(&dpm_list_mtx);
}
-
- if (!list_empty(&dev->power.entry))
- list_move_tail(&dev->power.entry, &dpm_prepared_list);
-
- mutex_unlock(&dpm_list_mtx);
-
- put_device(dev);
-
- mutex_lock(&dpm_list_mtx);
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, NULL);
+ if (async_error)
+ dpm_save_failed_step(SUSPEND_RESUME);
cpufreq_resume();
devfreq_resume();
@@ -1187,7 +1208,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
}
/**
- * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
+ * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
@@ -1195,7 +1216,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
+static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1240,6 +1261,8 @@ Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
async_error = error;
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
goto Complete;
}
@@ -1257,8 +1280,14 @@ Skip:
dev->power.may_skip_resume))
dev->power.must_resume = true;
- if (dev->power.must_resume)
+ if (dev->power.must_resume) {
+ if (dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) {
+ dev->power.set_active = true;
+ if (dev->parent && !dev->parent->power.ignore_children)
+ dev->parent->power.set_active = true;
+ }
dpm_superior_set_must_resume(dev);
+ }
Complete:
complete_all(&dev->power.completion);
@@ -1269,54 +1298,37 @@ Complete:
static void async_suspend_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
-
- error = __device_suspend_noirq(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
+ device_suspend_noirq(dev, pm_transition, true);
put_device(dev);
}
-static int device_suspend_noirq(struct device *dev)
-{
- if (dpm_async_fn(dev, async_suspend_noirq))
- return 0;
-
- return __device_suspend_noirq(dev, pm_transition, false);
-}
-
static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
pm_transition = state;
async_error = 0;
+ mutex_lock(&dpm_list_mtx);
+
while (!list_empty(&dpm_late_early_list)) {
struct device *dev = to_device(dpm_late_early_list.prev);
- get_device(dev);
- mutex_unlock(&dpm_list_mtx);
-
- error = device_suspend_noirq(dev);
+ list_move(&dev->power.entry, &dpm_noirq_list);
- mutex_lock(&dpm_list_mtx);
+ if (dpm_async_fn(dev, async_suspend_noirq))
+ continue;
- if (error) {
- pm_dev_err(dev, state, " noirq", error);
- dpm_save_failed_dev(dev_name(dev));
- } else if (!list_empty(&dev->power.entry)) {
- list_move(&dev->power.entry, &dpm_noirq_list);
- }
+ get_device(dev);
mutex_unlock(&dpm_list_mtx);
+ error = device_suspend_noirq(dev, state, false);
+
put_device(dev);
mutex_lock(&dpm_list_mtx);
@@ -1324,15 +1336,16 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
if (error || async_error)
break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
if (!error)
error = async_error;
- if (error) {
- suspend_stats.failed_suspend_noirq++;
+ if (error)
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
- }
+
dpm_show_time(starttime, state, error, "noirq");
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
return error;
@@ -1375,14 +1388,14 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev)
}
/**
- * __device_suspend_late - Execute a "late suspend" callback for given device.
+ * device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
+static int device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1434,6 +1447,8 @@ Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
async_error = error;
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async late" : " late", error);
goto Complete;
}
dpm_propagate_wakeup_to_parent(dev);
@@ -1450,24 +1465,11 @@ Complete:
static void async_suspend_late(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
- error = __device_suspend_late(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
+ device_suspend_late(dev, pm_transition, true);
put_device(dev);
}
-static int device_suspend_late(struct device *dev)
-{
- if (dpm_async_fn(dev, async_suspend_late))
- return 0;
-
- return __device_suspend_late(dev, pm_transition, false);
-}
-
/**
* dpm_suspend_late - Execute "late suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
@@ -1478,32 +1480,28 @@ int dpm_suspend_late(pm_message_t state)
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
- wake_up_all_idle_cpus();
- mutex_lock(&dpm_list_mtx);
+
pm_transition = state;
async_error = 0;
- while (!list_empty(&dpm_suspended_list)) {
- struct device *dev = to_device(dpm_suspended_list.prev);
-
- get_device(dev);
+ wake_up_all_idle_cpus();
- mutex_unlock(&dpm_list_mtx);
+ mutex_lock(&dpm_list_mtx);
- error = device_suspend_late(dev);
+ while (!list_empty(&dpm_suspended_list)) {
+ struct device *dev = to_device(dpm_suspended_list.prev);
- mutex_lock(&dpm_list_mtx);
+ list_move(&dev->power.entry, &dpm_late_early_list);
- if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &dpm_late_early_list);
+ if (dpm_async_fn(dev, async_suspend_late))
+ continue;
- if (error) {
- pm_dev_err(dev, state, " late", error);
- dpm_save_failed_dev(dev_name(dev));
- }
+ get_device(dev);
mutex_unlock(&dpm_list_mtx);
+ error = device_suspend_late(dev, state, false);
+
put_device(dev);
mutex_lock(&dpm_list_mtx);
@@ -1511,12 +1509,14 @@ int dpm_suspend_late(pm_message_t state)
if (error || async_error)
break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
if (!error)
error = async_error;
+
if (error) {
- suspend_stats.failed_suspend_late++;
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
}
@@ -1597,12 +1597,12 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
}
/**
- * __device_suspend - Execute "suspend" callbacks for given device.
+ * device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*/
-static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+static int device_suspend(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1716,8 +1716,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_watchdog_clear(&wd);
Complete:
- if (error)
+ if (error) {
async_error = error;
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async" : "", error);
+ }
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
@@ -1727,25 +1730,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
static void async_suspend(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
-
- error = __device_suspend(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
+ device_suspend(dev, pm_transition, true);
put_device(dev);
}
-static int device_suspend(struct device *dev)
-{
- if (dpm_async_fn(dev, async_suspend))
- return 0;
-
- return __device_suspend(dev, pm_transition, false);
-}
-
/**
* dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -1761,29 +1750,25 @@ int dpm_suspend(pm_message_t state)
devfreq_suspend();
cpufreq_suspend();
- mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
- while (!list_empty(&dpm_prepared_list)) {
- struct device *dev = to_device(dpm_prepared_list.prev);
- get_device(dev);
+ mutex_lock(&dpm_list_mtx);
- mutex_unlock(&dpm_list_mtx);
+ while (!list_empty(&dpm_prepared_list)) {
+ struct device *dev = to_device(dpm_prepared_list.prev);
- error = device_suspend(dev);
+ list_move(&dev->power.entry, &dpm_suspended_list);
- mutex_lock(&dpm_list_mtx);
+ if (dpm_async_fn(dev, async_suspend))
+ continue;
- if (error) {
- pm_dev_err(dev, state, "", error);
- dpm_save_failed_dev(dev_name(dev));
- } else if (!list_empty(&dev->power.entry)) {
- list_move(&dev->power.entry, &dpm_suspended_list);
- }
+ get_device(dev);
mutex_unlock(&dpm_list_mtx);
+ error = device_suspend(dev, state, false);
+
put_device(dev);
mutex_lock(&dpm_list_mtx);
@@ -1791,14 +1776,16 @@ int dpm_suspend(pm_message_t state)
if (error || async_error)
break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
if (!error)
error = async_error;
- if (error) {
- suspend_stats.failed_suspend++;
+
+ if (error)
dpm_save_failed_step(SUSPEND_SUSPEND);
- }
+
dpm_show_time(starttime, state, error, NULL);
trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
return error;
@@ -1949,11 +1936,11 @@ int dpm_suspend_start(pm_message_t state)
int error;
error = dpm_prepare(state);
- if (error) {
- suspend_stats.failed_prepare++;
+ if (error)
dpm_save_failed_step(SUSPEND_PREPARE);
- } else
+ else
error = dpm_suspend(state);
+
dpm_show_time(starttime, state, error, "start");
return error;
}
@@ -1962,7 +1949,7 @@ EXPORT_SYMBOL_GPL(dpm_suspend_start);
void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
{
if (ret)
- dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
+ dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index bd77f6734f14..ff393cba7649 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -137,6 +137,7 @@ s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
return ret;
}
+EXPORT_SYMBOL_GPL(dev_pm_qos_read_value);
/**
* apply_constraint - Add/modify/remove device PM QoS request.
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 05793c9fbb84..2ee45841486b 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -94,6 +94,7 @@ static void update_pm_runtime_accounting(struct device *dev)
static void __update_runtime_status(struct device *dev, enum rpm_status status)
{
update_pm_runtime_accounting(dev);
+ trace_rpm_status(dev, status);
dev->power.runtime_status = status;
}
@@ -1176,7 +1177,7 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
- * pm_runtime_get_if_active - Conditionally bump up device usage counter.
+ * pm_runtime_get_conditional - Conditionally bump up device usage counter.
* @dev: Device to handle.
* @ign_usage_count: Whether or not to look at the current usage counter value.
*
@@ -1197,7 +1198,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume);
* The caller is responsible for decrementing the runtime PM usage counter of
* @dev after this function has returned a positive value for it.
*/
-int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
+static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)
{
unsigned long flags;
int retval;
@@ -1218,9 +1219,40 @@ int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
return retval;
}
+
+/**
+ * pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is
+ * in active state
+ * @dev: Target device.
+ *
+ * Increment the runtime PM usage counter of @dev if its runtime PM status is
+ * %RPM_ACTIVE, in which case it returns 1. If the device is in a different
+ * state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the
+ * device, in which case also the usage_count will remain unmodified.
+ */
+int pm_runtime_get_if_active(struct device *dev)
+{
+ return pm_runtime_get_conditional(dev, true);
+}
EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
/**
+ * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
+ * @dev: Target device.
+ *
+ * Increment the runtime PM usage counter of @dev if its runtime PM status is
+ * %RPM_ACTIVE and its runtime PM usage counter is greater than 0, in which case
+ * it returns 1. If the device is in a different state or its usage_count is 0,
+ * 0 is returned. -EINVAL is returned if runtime PM is disabled for the device,
+ * in which case also the usage_count will remain unmodified.
+ */
+int pm_runtime_get_if_in_use(struct device *dev)
+{
+ return pm_runtime_get_conditional(dev, false);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
+
+/**
* __pm_runtime_set_status - Set runtime PM status of a device.
* @dev: Device to handle.
* @status: New runtime PM status of the device.
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a1474fb67db9..f84018125b46 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -6,7 +6,6 @@
#include <linux/export.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
-#include <linux/pm_wakeup.h>
#include <linux/atomic.h>
#include <linux/jiffies.h>
#include "power.h"
@@ -509,14 +508,6 @@ static ssize_t wakeup_last_time_ms_show(struct device *dev,
return sysfs_emit(buf, "%lld\n", msec);
}
-static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
- kgid_t kgid)
-{
- if (dev->power.wakeup && dev->power.wakeup->dev)
- return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
- return 0;
-}
-
static DEVICE_ATTR_RO(wakeup_last_time_ms);
#ifdef CONFIG_PM_AUTOSLEEP
@@ -541,6 +532,15 @@ static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
#endif /* CONFIG_PM_AUTOSLEEP */
+
+static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid)
+{
+ if (dev->power.wakeup && dev->power.wakeup->dev)
+ return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
+ return 0;
+}
+
#else /* CONFIG_PM_SLEEP */
static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
kgid_t kgid)
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 42171f766dcb..8aa28c08b289 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -103,6 +103,32 @@ void dev_pm_clear_wake_irq(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
+static void devm_pm_clear_wake_irq(void *dev)
+{
+ dev_pm_clear_wake_irq(dev);
+}
+
+/**
+ * devm_pm_set_wake_irq - device-managed variant of dev_pm_set_wake_irq
+ * @dev: Device entry
+ * @irq: Device IO interrupt
+ *
+ *
+ * Attach a device IO interrupt as a wake IRQ, same with dev_pm_set_wake_irq,
+ * but the device will be auto clear wake capability on driver detach.
+ */
+int devm_pm_set_wake_irq(struct device *dev, int irq)
+{
+ int ret;
+
+ ret = dev_pm_set_wake_irq(dev, irq);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_pm_clear_wake_irq, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_set_wake_irq);
+
/**
* handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
* @irq: Device specific dedicated wake-up interrupt
@@ -313,8 +339,10 @@ void dev_pm_enable_wake_irq_complete(struct device *dev)
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
- wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
+ wirq->status & WAKE_IRQ_DEDICATED_REVERSE) {
enable_irq(wirq->irq);
+ wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
+ }
}
/**
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index a917219feea6..752b417e8129 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -451,16 +451,15 @@ static struct wakeup_source *device_wakeup_detach(struct device *dev)
* Detach the @dev's wakeup source object from it, unregister this wakeup source
* object and destroy it.
*/
-int device_wakeup_disable(struct device *dev)
+void device_wakeup_disable(struct device *dev)
{
struct wakeup_source *ws;
if (!dev || !dev->power.can_wakeup)
- return -EINVAL;
+ return;
ws = device_wakeup_detach(dev);
wakeup_source_unregister(ws);
- return 0;
}
EXPORT_SYMBOL_GPL(device_wakeup_disable);
@@ -502,7 +501,11 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
*/
int device_set_wakeup_enable(struct device *dev, bool enable)
{
- return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
+ if (enable)
+ return device_wakeup_enable(dev);
+
+ device_wakeup_disable(dev);
+ return 0;
}
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index a1b01ab42052..c1392743df9c 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -7,15 +7,16 @@
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/export.h>
-#include <linux/kernel.h>
+#include <linux/kconfig.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_graph.h>
-#include <linux/of_irq.h>
#include <linux/property.h>
#include <linux/phy.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
struct fwnode_handle *__dev_fwnode(struct device *dev)
{
@@ -70,6 +71,44 @@ bool fwnode_property_present(const struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_property_present);
/**
+ * device_property_read_bool - Return the value for a boolean property of a device
+ * @dev: Device whose property is being checked
+ * @propname: Name of the property
+ *
+ * Return if property @propname is true or false in the device firmware description.
+ *
+ * Return: true if property @propname is present. Otherwise, returns false.
+ */
+bool device_property_read_bool(const struct device *dev, const char *propname)
+{
+ return fwnode_property_read_bool(dev_fwnode(dev), propname);
+}
+EXPORT_SYMBOL_GPL(device_property_read_bool);
+
+/**
+ * fwnode_property_read_bool - Return the value for a boolean property of a firmware node
+ * @fwnode: Firmware node whose property to check
+ * @propname: Name of the property
+ *
+ * Return if property @propname is true or false in the firmware description.
+ */
+bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ bool ret;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return false;
+
+ ret = fwnode_call_bool_op(fwnode, property_read_bool, propname);
+ if (ret)
+ return ret;
+
+ return fwnode_call_bool_op(fwnode->secondary, property_read_bool, propname);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_bool);
+
+/**
* device_property_read_u8_array - return a u8 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
@@ -700,34 +739,6 @@ struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
/**
- * fwnode_get_next_parent_dev - Find device of closest ancestor fwnode
- * @fwnode: firmware node
- *
- * Given a firmware node (@fwnode), this function finds its closest ancestor
- * firmware node that has a corresponding struct device and returns that struct
- * device.
- *
- * The caller is responsible for calling put_device() on the returned device
- * pointer.
- *
- * Return: a pointer to the device of the @fwnode's closest ancestor.
- */
-struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwnode)
-{
- struct fwnode_handle *parent;
- struct device *dev;
-
- fwnode_for_each_parent_node(fwnode, parent) {
- dev = get_dev_from_fwnode(parent);
- if (dev) {
- fwnode_handle_put(parent);
- return dev;
- }
- }
- return NULL;
-}
-
-/**
* fwnode_count_parents - Return the number of parents a node has
* @fwnode: The node the parents of which are to be counted
*
@@ -774,34 +785,6 @@ struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_get_nth_parent);
/**
- * fwnode_is_ancestor_of - Test if @ancestor is ancestor of @child
- * @ancestor: Firmware which is tested for being an ancestor
- * @child: Firmware which is tested for being the child
- *
- * A node is considered an ancestor of itself too.
- *
- * Return: true if @ancestor is an ancestor of @child. Otherwise, returns false.
- */
-bool fwnode_is_ancestor_of(const struct fwnode_handle *ancestor, const struct fwnode_handle *child)
-{
- struct fwnode_handle *parent;
-
- if (IS_ERR_OR_NULL(ancestor))
- return false;
-
- if (child == ancestor)
- return true;
-
- fwnode_for_each_parent_node(child, parent) {
- if (parent == ancestor) {
- fwnode_handle_put(parent);
- return true;
- }
- }
- return false;
-}
-
-/**
* fwnode_get_next_child_node - Return the next child node handle for a node
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the node's child nodes or a %NULL handle.
@@ -924,20 +907,6 @@ struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_handle_get);
/**
- * fwnode_handle_put - Drop reference to a device node
- * @fwnode: Pointer to the device node to drop the reference to.
- *
- * This has to be used when terminating device_for_each_child_node() iteration
- * with break or return to prevent stale device node references from being left
- * behind.
- */
-void fwnode_handle_put(struct fwnode_handle *fwnode)
-{
- fwnode_call_void_op(fwnode, put);
-}
-EXPORT_SYMBOL_GPL(fwnode_handle_put);
-
-/**
* fwnode_device_is_available - check if a device is available for use
* @fwnode: Pointer to the fwnode of the device.
*
@@ -960,7 +929,7 @@ EXPORT_SYMBOL_GPL(fwnode_device_is_available);
/**
* device_get_child_node_count - return the number of child nodes for device
- * @dev: Device to cound the child nodes for
+ * @dev: Device to count the child nodes for
*
* Return: the number of child nodes for a given device.
*/
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 583dd5d7d46b..bdb450436cbc 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -59,6 +59,7 @@ struct regmap {
unsigned long raw_spinlock_flags;
};
};
+ struct lock_class_key *lock_key;
regmap_lock lock;
regmap_unlock unlock;
void *lock_arg; /* This is passed to lock/unlock functions */
@@ -93,6 +94,7 @@ struct regmap {
#endif
unsigned int max_register;
+ bool max_register_is_set;
bool (*writeable_reg)(struct device *dev, unsigned int reg);
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
@@ -325,20 +327,22 @@ struct regmap_ram_data {
* Create a test register map with data stored in RAM, not intended
* for practical use.
*/
-struct regmap *__regmap_init_ram(const struct regmap_config *config,
+struct regmap *__regmap_init_ram(struct device *dev,
+ const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name);
-#define regmap_init_ram(config, data) \
- __regmap_lockdep_wrapper(__regmap_init_ram, #config, config, data)
+#define regmap_init_ram(dev, config, data) \
+ __regmap_lockdep_wrapper(__regmap_init_ram, #dev, dev, config, data)
-struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
+struct regmap *__regmap_init_raw_ram(struct device *dev,
+ const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name);
-#define regmap_init_raw_ram(config, data) \
- __regmap_lockdep_wrapper(__regmap_init_raw_ram, #config, config, data)
+#define regmap_init_raw_ram(dev, config, data) \
+ __regmap_lockdep_wrapper(__regmap_init_raw_ram, #dev, dev, config, data)
#endif
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index b7e4b2464102..f36d3618b67c 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -23,11 +23,11 @@ static int regcache_flat_init(struct regmap *map)
int i;
unsigned int *cache;
- if (!map || map->reg_stride_order < 0 || !map->max_register)
+ if (!map || map->reg_stride_order < 0 || !map->max_register_is_set)
return -EINVAL;
map->cache = kcalloc(regcache_flat_get_index(map, map->max_register)
- + 1, sizeof(unsigned int), GFP_KERNEL);
+ + 1, sizeof(unsigned int), map->alloc_flags);
if (!map->cache)
return -ENOMEM;
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index 41edd6a430eb..2319c30283a6 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -73,8 +73,7 @@ static int regcache_maple_write(struct regmap *map, unsigned int reg,
rcu_read_unlock();
- entry = kmalloc((last - index + 1) * sizeof(unsigned long),
- map->alloc_flags);
+ entry = kmalloc_array(last - index + 1, sizeof(*entry), map->alloc_flags);
if (!entry)
return -ENOMEM;
@@ -110,9 +109,10 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, min, max);
unsigned long *entry, *lower, *upper;
- unsigned long lower_index, lower_last;
+ /* initialized to work around false-positive -Wuninitialized warning */
+ unsigned long lower_index = 0, lower_last = 0;
unsigned long upper_index, upper_last;
- int ret;
+ int ret = 0;
lower = NULL;
upper = NULL;
@@ -132,9 +132,9 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
lower_index = mas.index;
lower_last = min -1;
- lower = kmemdup(entry, ((min - mas.index) *
- sizeof(unsigned long)),
- map->alloc_flags);
+ lower = kmemdup_array(entry,
+ min - mas.index, sizeof(*lower),
+ map->alloc_flags);
if (!lower) {
ret = -ENOMEM;
goto out_unlocked;
@@ -145,10 +145,9 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
upper_index = max + 1;
upper_last = mas.last;
- upper = kmemdup(&entry[max + 1],
- ((mas.last - max) *
- sizeof(unsigned long)),
- map->alloc_flags);
+ upper = kmemdup_array(&entry[max - mas.index + 1],
+ mas.last - max, sizeof(*upper),
+ map->alloc_flags);
if (!upper) {
ret = -ENOMEM;
goto out_unlocked;
@@ -204,7 +203,7 @@ static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry,
* overheads.
*/
if (max - min > 1 && regmap_can_raw_write(map)) {
- buf = kmalloc(val_bytes * (max - min), map->alloc_flags);
+ buf = kmalloc_array(max - min, val_bytes, map->alloc_flags);
if (!buf) {
ret = -ENOMEM;
goto out;
@@ -244,7 +243,7 @@ static int regcache_maple_sync(struct regmap *map, unsigned int min,
unsigned long lmin = min;
unsigned long lmax = max;
unsigned int r, v, sync_start;
- int ret;
+ int ret = 0;
bool sync_needed = false;
map->cache_bypass = true;
@@ -294,7 +293,7 @@ static int regcache_maple_exit(struct regmap *map)
{
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, 0, UINT_MAX);
- unsigned int *entry;;
+ unsigned int *entry;
/* if we've already been called then just return */
if (!mt)
@@ -320,7 +319,7 @@ static int regcache_maple_insert_block(struct regmap *map, int first,
unsigned long *entry;
int i, ret;
- entry = kcalloc(last - first + 1, sizeof(unsigned long), map->alloc_flags);
+ entry = kmalloc_array(last - first + 1, sizeof(*entry), map->alloc_flags);
if (!entry)
return -ENOMEM;
@@ -348,13 +347,16 @@ static int regcache_maple_init(struct regmap *map)
int ret;
int range_start;
- mt = kmalloc(sizeof(*mt), GFP_KERNEL);
+ mt = kmalloc(sizeof(*mt), map->alloc_flags);
if (!mt)
return -ENOMEM;
map->cache = mt;
mt_init(mt);
+ if (!mt_external_lock(mt) && map->lock_key)
+ lockdep_set_class_and_subclass(&mt->ma_lock, map->lock_key, 1);
+
if (!map->num_reg_defaults)
return 0;
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 3db88bbcae0f..a9d17f316e55 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -187,7 +187,7 @@ static int regcache_rbtree_init(struct regmap *map)
int i;
int ret;
- map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
+ map->cache = kmalloc(sizeof *rbtree_ctx, map->alloc_flags);
if (!map->cache)
return -ENOMEM;
@@ -275,18 +275,16 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
pos = (reg - base_reg) / map->reg_stride;
offset = (rbnode->base_reg - base_reg) / map->reg_stride;
- blk = krealloc(rbnode->block,
- blklen * map->cache_word_size,
- map->alloc_flags);
+ blk = krealloc_array(rbnode->block, blklen, map->cache_word_size, map->alloc_flags);
if (!blk)
return -ENOMEM;
rbnode->block = blk;
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
- present = krealloc(rbnode->cache_present,
- BITS_TO_LONGS(blklen) * sizeof(*present),
- map->alloc_flags);
+ present = krealloc_array(rbnode->cache_present,
+ BITS_TO_LONGS(blklen), sizeof(*present),
+ map->alloc_flags);
if (!present)
return -ENOMEM;
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index ac63a73ccdaa..b1f8508c3966 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -154,7 +154,7 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
map->num_reg_defaults = config->num_reg_defaults;
map->num_reg_defaults_raw = config->num_reg_defaults_raw;
map->reg_defaults_raw = config->reg_defaults_raw;
- map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
+ map->cache_word_size = BITS_TO_BYTES(config->val_bits);
map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
map->cache = NULL;
@@ -170,8 +170,8 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
* a copy of it.
*/
if (config->reg_defaults) {
- tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
- sizeof(struct reg_default), GFP_KERNEL);
+ tmp_buf = kmemdup_array(config->reg_defaults, map->num_reg_defaults,
+ sizeof(*map->reg_defaults), GFP_KERNEL);
if (!tmp_buf)
return -ENOMEM;
map->reg_defaults = tmp_buf;
@@ -187,13 +187,17 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
return 0;
}
- if (!map->max_register && map->num_reg_defaults_raw)
+ if (!map->max_register_is_set && map->num_reg_defaults_raw) {
map->max_register = (map->num_reg_defaults_raw - 1) * map->reg_stride;
+ map->max_register_is_set = true;
+ }
if (map->cache_ops->init) {
dev_dbg(map->dev, "Initializing %s cache\n",
map->cache_ops->name);
+ map->lock(map->lock_arg);
ret = map->cache_ops->init(map);
+ map->unlock(map->lock_arg);
if (ret)
goto err_free;
}
@@ -221,7 +225,9 @@ void regcache_exit(struct regmap *map)
if (map->cache_ops->exit) {
dev_dbg(map->dev, "Destroying %s cache\n",
map->cache_ops->name);
+ map->lock(map->lock_arg);
map->cache_ops->exit(map);
+ map->unlock(map->lock_arg);
}
}
@@ -405,7 +411,7 @@ out:
* have gone out of sync, force writes of all the paging
* registers.
*/
- rb_for_each(node, 0, &map->range_tree, rbtree_all) {
+ rb_for_each(node, NULL, &map->range_tree, rbtree_all) {
struct regmap_range_node *this =
rb_entry(node, struct regmap_range_node, node);
diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c
index b9f76bdf74a9..a561971c459c 100644
--- a/drivers/base/regmap/regmap-ac97.c
+++ b/drivers/base/regmap/regmap-ac97.c
@@ -86,4 +86,5 @@ struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_ac97);
+MODULE_DESCRIPTION("Register map access API - AC'97 support");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 3ec611dc0c09..c9b39a02278e 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -350,7 +350,8 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
if (quirks->max_write_len &&
(bus->max_raw_write == 0 || bus->max_raw_write > quirks->max_write_len))
- max_write = quirks->max_write_len;
+ max_write = quirks->max_write_len -
+ (config->reg_bits + config->pad_bits) / BITS_PER_BYTE;
if (max_read || max_write) {
ret_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
@@ -396,4 +397,5 @@ struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_i2c);
+MODULE_DESCRIPTION("Register map access API - I2C support");
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-i3c.c b/drivers/base/regmap/regmap-i3c.c
index 0328b0b34284..b5300b7c477e 100644
--- a/drivers/base/regmap/regmap-i3c.c
+++ b/drivers/base/regmap/regmap-i3c.c
@@ -56,5 +56,5 @@ struct regmap *__devm_regmap_init_i3c(struct i3c_device *i3c,
EXPORT_SYMBOL_GPL(__devm_regmap_init_i3c);
MODULE_AUTHOR("Vitor Soares <vitor.soares@synopsys.com>");
-MODULE_DESCRIPTION("Regmap I3C Module");
+MODULE_DESCRIPTION("regmap I3C Module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 45fd13ef13fc..978613407ea3 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -305,8 +305,8 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
unsigned int b)
{
const struct regmap_irq_chip *chip = data->chip;
+ const struct regmap_irq_sub_irq_map *subreg;
struct regmap *map = data->map;
- struct regmap_irq_sub_irq_map *subreg;
unsigned int reg;
int i, ret = 0;
@@ -364,14 +364,11 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
memset32(data->status_buf, GENMASK(31, 0), chip->num_regs);
} else if (chip->num_main_regs) {
unsigned int max_main_bits;
- unsigned long size;
-
- size = chip->num_regs * sizeof(unsigned int);
max_main_bits = (chip->num_main_status_bits) ?
chip->num_main_status_bits : chip->num_regs;
/* Clear the status buf as we don't read all status regs */
- memset(data->status_buf, 0, size);
+ memset32(data->status_buf, 0, chip->num_regs);
/* We could support bulk read for main status registers
* but I don't expect to see devices with really many main
@@ -514,12 +511,16 @@ exit:
return IRQ_NONE;
}
+static struct lock_class_key regmap_irq_lock_class;
+static struct lock_class_key regmap_irq_request_class;
+
static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct regmap_irq_chip_data *data = h->host_data;
irq_set_chip_data(virq, data);
+ irq_set_lockdep_class(virq, &regmap_irq_lock_class, &regmap_irq_request_class);
irq_set_chip(virq, &data->irq_chip);
irq_set_nested_thread(virq, 1);
irq_set_parent(virq, data->irq);
@@ -608,6 +609,30 @@ int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type,
}
EXPORT_SYMBOL_GPL(regmap_irq_set_type_config_simple);
+static int regmap_irq_create_domain(struct fwnode_handle *fwnode, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data *d)
+{
+ struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .size = chip->num_irqs,
+ .hwirq_max = chip->num_irqs,
+ .virq_base = irq_base,
+ .ops = &regmap_domain_ops,
+ .host_data = d,
+ .name_suffix = chip->domain_suffix,
+ };
+
+ d->domain = irq_domain_instantiate(&info);
+ if (IS_ERR(d->domain)) {
+ dev_err(d->map->dev, "Failed to create IRQ domain\n");
+ return PTR_ERR(d->domain);
+ }
+
+ return 0;
+}
+
+
/**
* regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
*
@@ -856,18 +881,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
}
}
- if (irq_base)
- d->domain = irq_domain_create_legacy(fwnode, chip->num_irqs,
- irq_base, 0,
- &regmap_domain_ops, d);
- else
- d->domain = irq_domain_create_linear(fwnode, chip->num_irqs,
- &regmap_domain_ops, d);
- if (!d->domain) {
- dev_err(map->dev, "Failed to create IRQ domain\n");
- ret = -ENOMEM;
+ ret = regmap_irq_create_domain(fwnode, irq_base, chip, d);
+ if (ret)
goto err_alloc;
- }
ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
irq_flags | IRQF_ONESHOT,
@@ -890,6 +906,7 @@ err_alloc:
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
+ kfree(d->main_status_buf);
kfree(d->status_buf);
kfree(d->status_reg_buf);
if (d->config_buf) {
@@ -965,6 +982,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
+ kfree(d->main_status_buf);
kfree(d->status_reg_buf);
kfree(d->status_buf);
if (d->config_buf) {
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index 0d957c5f1bcc..64ea340950b6 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -4,11 +4,27 @@
//
// Copyright 2023 Arm Ltd
+#include <kunit/device.h>
+#include <kunit/resource.h>
#include <kunit/test.h>
#include "internal.h"
#define BLOCK_TEST_SIZE 12
+KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_action, regmap_exit, struct regmap *);
+
+struct regmap_test_priv {
+ struct device *dev;
+};
+
+struct regmap_test_param {
+ enum regcache_type cache;
+ enum regmap_endian val_endian;
+
+ unsigned int from_reg;
+ bool fast_io;
+};
+
static void get_changed_bytes(void *orig, void *new, size_t size)
{
char *o = orig;
@@ -27,57 +43,140 @@ static void get_changed_bytes(void *orig, void *new, size_t size)
}
static const struct regmap_config test_regmap_config = {
- .max_register = BLOCK_TEST_SIZE,
.reg_stride = 1,
.val_bits = sizeof(unsigned int) * 8,
};
-struct regcache_types {
- enum regcache_type type;
- const char *name;
-};
+static const char *regcache_type_name(enum regcache_type type)
+{
+ switch (type) {
+ case REGCACHE_NONE:
+ return "none";
+ case REGCACHE_FLAT:
+ return "flat";
+ case REGCACHE_RBTREE:
+ return "rbtree";
+ case REGCACHE_MAPLE:
+ return "maple";
+ default:
+ return NULL;
+ }
+}
+
+static const char *regmap_endian_name(enum regmap_endian endian)
+{
+ switch (endian) {
+ case REGMAP_ENDIAN_BIG:
+ return "big";
+ case REGMAP_ENDIAN_LITTLE:
+ return "little";
+ case REGMAP_ENDIAN_DEFAULT:
+ return "default";
+ case REGMAP_ENDIAN_NATIVE:
+ return "native";
+ default:
+ return NULL;
+ }
+}
-static void case_to_desc(const struct regcache_types *t, char *desc)
+static void param_to_desc(const struct regmap_test_param *param, char *desc)
{
- strcpy(desc, t->name);
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s%s @%#x",
+ regcache_type_name(param->cache),
+ regmap_endian_name(param->val_endian),
+ param->fast_io ? " fast I/O" : "",
+ param->from_reg);
}
-static const struct regcache_types regcache_types_list[] = {
- { REGCACHE_NONE, "none" },
- { REGCACHE_FLAT, "flat" },
- { REGCACHE_RBTREE, "rbtree" },
- { REGCACHE_MAPLE, "maple" },
+static const struct regmap_test_param regcache_types_list[] = {
+ { .cache = REGCACHE_NONE },
+ { .cache = REGCACHE_NONE, .fast_io = true },
+ { .cache = REGCACHE_FLAT },
+ { .cache = REGCACHE_FLAT, .fast_io = true },
+ { .cache = REGCACHE_RBTREE },
+ { .cache = REGCACHE_RBTREE, .fast_io = true },
+ { .cache = REGCACHE_MAPLE },
+ { .cache = REGCACHE_MAPLE, .fast_io = true },
};
-KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc);
+KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc);
-static const struct regcache_types real_cache_types_list[] = {
- { REGCACHE_FLAT, "flat" },
- { REGCACHE_RBTREE, "rbtree" },
- { REGCACHE_MAPLE, "maple" },
+static const struct regmap_test_param real_cache_types_only_list[] = {
+ { .cache = REGCACHE_FLAT },
+ { .cache = REGCACHE_FLAT, .fast_io = true },
+ { .cache = REGCACHE_RBTREE },
+ { .cache = REGCACHE_RBTREE, .fast_io = true },
+ { .cache = REGCACHE_MAPLE },
+ { .cache = REGCACHE_MAPLE, .fast_io = true },
};
-KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc);
+KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc);
+
+static const struct regmap_test_param real_cache_types_list[] = {
+ { .cache = REGCACHE_FLAT, .from_reg = 0 },
+ { .cache = REGCACHE_FLAT, .from_reg = 0, .fast_io = true },
+ { .cache = REGCACHE_FLAT, .from_reg = 0x2001 },
+ { .cache = REGCACHE_FLAT, .from_reg = 0x2002 },
+ { .cache = REGCACHE_FLAT, .from_reg = 0x2003 },
+ { .cache = REGCACHE_FLAT, .from_reg = 0x2004 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0, .fast_io = true },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
+};
-static const struct regcache_types sparse_cache_types_list[] = {
- { REGCACHE_RBTREE, "rbtree" },
- { REGCACHE_MAPLE, "maple" },
+KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc);
+
+static const struct regmap_test_param sparse_cache_types_list[] = {
+ { .cache = REGCACHE_RBTREE, .from_reg = 0 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0, .fast_io = true },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
};
-KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc);
+KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, param_to_desc);
-static struct regmap *gen_regmap(struct regmap_config *config,
+static struct regmap *gen_regmap(struct kunit *test,
+ struct regmap_config *config,
struct regmap_ram_data **data)
{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap_test_priv *priv = test->priv;
unsigned int *buf;
- struct regmap *ret;
- size_t size = (config->max_register + 1) * sizeof(unsigned int);
- int i;
+ struct regmap *ret = ERR_PTR(-ENOMEM);
+ size_t size;
+ int i, error;
struct reg_default *defaults;
- config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
- config->cache_type == REGCACHE_MAPLE;
+ config->cache_type = param->cache;
+ config->fast_io = param->fast_io;
+ if (config->max_register == 0) {
+ config->max_register = param->from_reg;
+ if (config->num_reg_defaults)
+ config->max_register += (config->num_reg_defaults - 1) *
+ config->reg_stride;
+ else
+ config->max_register += (BLOCK_TEST_SIZE * config->reg_stride);
+ }
+
+ size = array_size(config->max_register + 1, sizeof(*buf));
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -86,49 +185,61 @@ static struct regmap *gen_regmap(struct regmap_config *config,
*data = kzalloc(sizeof(**data), GFP_KERNEL);
if (!(*data))
- return ERR_PTR(-ENOMEM);
+ goto out_free;
(*data)->vals = buf;
if (config->num_reg_defaults) {
- defaults = kcalloc(config->num_reg_defaults,
- sizeof(struct reg_default),
- GFP_KERNEL);
+ defaults = kunit_kcalloc(test,
+ config->num_reg_defaults,
+ sizeof(struct reg_default),
+ GFP_KERNEL);
if (!defaults)
- return ERR_PTR(-ENOMEM);
+ goto out_free;
+
config->reg_defaults = defaults;
for (i = 0; i < config->num_reg_defaults; i++) {
- defaults[i].reg = i * config->reg_stride;
- defaults[i].def = buf[i * config->reg_stride];
+ defaults[i].reg = param->from_reg + (i * config->reg_stride);
+ defaults[i].def = buf[param->from_reg + (i * config->reg_stride)];
}
}
- ret = regmap_init_ram(config, *data);
- if (IS_ERR(ret)) {
- kfree(buf);
- kfree(*data);
- }
+ ret = regmap_init_ram(priv->dev, config, *data);
+ if (IS_ERR(ret))
+ goto out_free;
+
+ /* This calls regmap_exit() on failure, which frees buf and *data */
+ error = kunit_add_action_or_reset(test, regmap_exit_action, ret);
+ if (error)
+ ret = ERR_PTR(error);
+
+ return ret;
+
+out_free:
+ kfree(buf);
+ kfree(*data);
return ret;
}
-static bool reg_5_false(struct device *context, unsigned int reg)
+static bool reg_5_false(struct device *dev, unsigned int reg)
{
- return reg != 5;
+ struct kunit *test = dev_get_drvdata(dev);
+ const struct regmap_test_param *param = test->param_value;
+
+ return reg != (param->from_reg + 5);
}
static void basic_read_write(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val, rval;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -141,14 +252,11 @@ static void basic_read_write(struct kunit *test)
KUNIT_EXPECT_EQ(test, val, rval);
/* If using a cache the cache satisfied the read */
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]);
-
- regmap_exit(map);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[0]);
}
static void bulk_write(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -156,9 +264,8 @@ static void bulk_write(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -178,14 +285,11 @@ static void bulk_write(struct kunit *test)
/* If using a cache the cache satisfied the read */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
-
- regmap_exit(map);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
static void bulk_read(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -193,9 +297,8 @@ static void bulk_read(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -211,14 +314,211 @@ static void bulk_read(struct kunit *test)
/* If using a cache the cache satisfied the read */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
+}
+
+static void multi_write(struct kunit *test)
+{
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ struct reg_sequence sequence[BLOCK_TEST_SIZE];
+ unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ get_random_bytes(&val, sizeof(val));
+
+ /*
+ * Data written via the multi API can be read back with single
+ * reads.
+ */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++) {
+ sequence[i].reg = i;
+ sequence[i].def = val[i];
+ sequence[i].delay_us = 0;
+ }
+ KUNIT_EXPECT_EQ(test, 0,
+ regmap_multi_reg_write(map, sequence, BLOCK_TEST_SIZE));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
+
+ KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
+
+ /* If using a cache the cache satisfied the read */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
+}
+
+static void multi_read(struct kunit *test)
+{
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int regs[BLOCK_TEST_SIZE];
+ unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ get_random_bytes(&val, sizeof(val));
+
+ /* Data written as single writes can be read via the multi API */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++) {
+ regs[i] = i;
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
+ }
+ KUNIT_EXPECT_EQ(test, 0,
+ regmap_multi_reg_read(map, regs, rval, BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
+
+ /* If using a cache the cache satisfied the read */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
+}
+
+static void read_bypassed(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[BLOCK_TEST_SIZE], rval;
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+
+ get_random_bytes(&val, sizeof(val));
- regmap_exit(map);
+ /* Write some test values */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
+
+ regcache_cache_only(map, true);
+
+ /*
+ * While in cache-only regmap_read_bypassed() should return the register
+ * value and leave the map in cache-only.
+ */
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ /* Put inverted bits in rval to prove we really read the value */
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+ KUNIT_EXPECT_TRUE(test, map->cache_only);
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+ }
+
+ /*
+ * Change the underlying register values to prove it is returning
+ * real values not cached values.
+ */
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ val[i] = ~val[i];
+ data->vals[param->from_reg + i] = val[i];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_NE(test, val[i], rval);
+
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+ KUNIT_EXPECT_TRUE(test, map->cache_only);
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+ }
+}
+
+static void read_bypassed_volatile(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[BLOCK_TEST_SIZE], rval;
+ int i;
+
+ config = test_regmap_config;
+ /* All registers except #5 volatile */
+ config.volatile_reg = reg_5_false;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+
+ get_random_bytes(&val, sizeof(val));
+
+ /* Write some test values */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
+
+ regcache_cache_only(map, true);
+
+ /*
+ * While in cache-only regmap_read_bypassed() should return the register
+ * value and leave the map in cache-only.
+ */
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ /* Register #5 is non-volatile so should read from cache */
+ KUNIT_EXPECT_EQ(test, (i == 5) ? 0 : -EBUSY,
+ regmap_read(map, param->from_reg + i, &rval));
+
+ /* Put inverted bits in rval to prove we really read the value */
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+ KUNIT_EXPECT_TRUE(test, map->cache_only);
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+ }
+
+ /*
+ * Change the underlying register values to prove it is returning
+ * real values not cached values.
+ */
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ val[i] = ~val[i];
+ data->vals[param->from_reg + i] = val[i];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ if (i == 5)
+ continue;
+
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+ KUNIT_EXPECT_TRUE(test, map->cache_only);
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+ }
}
static void write_readonly(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -226,11 +526,10 @@ static void write_readonly(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
config.writeable_reg = reg_5_false;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -247,13 +546,10 @@ static void write_readonly(struct kunit *test)
/* Did that match what we see on the device? */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
-
- regmap_exit(map);
}
static void read_writeonly(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -261,10 +557,9 @@ static void read_writeonly(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.readable_reg = reg_5_false;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -277,7 +572,7 @@ static void read_writeonly(struct kunit *test)
* fail if we aren't using the flat cache.
*/
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
- if (t->type != REGCACHE_FLAT) {
+ if (config.cache_type != REGCACHE_FLAT) {
KUNIT_EXPECT_EQ(test, i != 5,
regmap_read(map, i, &val) == 0);
} else {
@@ -287,13 +582,10 @@ static void read_writeonly(struct kunit *test)
/* Did we trigger a hardware access? */
KUNIT_EXPECT_FALSE(test, data->read[5]);
-
- regmap_exit(map);
}
static void reg_defaults(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -301,10 +593,9 @@ static void reg_defaults(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -316,12 +607,11 @@ static void reg_defaults(struct kunit *test)
/* The data should have been read from cache if there was one */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
static void reg_defaults_read_dev(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -329,17 +619,16 @@ static void reg_defaults_read_dev(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* We should have read the cache defaults back from the map */
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
- KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type != REGCACHE_NONE, data->read[i]);
data->read[i] = false;
}
@@ -350,12 +639,11 @@ static void reg_defaults_read_dev(struct kunit *test)
/* The data should have been read from cache if there was one */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
static void register_patch(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -365,10 +653,9 @@ static void register_patch(struct kunit *test)
/* We need defaults so readback works */
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -401,13 +688,10 @@ static void register_patch(struct kunit *test)
break;
}
}
-
- regmap_exit(map);
}
static void stride(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -415,16 +699,22 @@ static void stride(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.reg_stride = 2;
config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
- map = gen_regmap(&config, &data);
+ /*
+ * Allow one extra register so that the read/written arrays
+ * are sized big enough to include an entry for the odd
+ * address past the final reg_default register.
+ */
+ config.max_register = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
- /* Only even registers can be accessed, try both read and write */
+ /* Only even addresses can be accessed, try both read and write */
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
data->read[i] = false;
data->written[i] = false;
@@ -437,15 +727,13 @@ static void stride(struct kunit *test)
} else {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, data->vals[i], rval);
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE,
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE,
data->read[i]);
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
KUNIT_EXPECT_TRUE(test, data->written[i]);
}
}
-
- regmap_exit(map);
}
static struct regmap_range_cfg test_range = {
@@ -481,7 +769,6 @@ static bool test_range_all_volatile(struct device *dev, unsigned int reg)
static void basic_ranges(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -489,13 +776,12 @@ static void basic_ranges(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.volatile_reg = test_range_all_volatile;
config.ranges = &test_range;
config.num_ranges = 1;
config.max_register = test_range.range_max;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -546,14 +832,11 @@ static void basic_ranges(struct kunit *test)
KUNIT_EXPECT_FALSE(test, data->read[i]);
KUNIT_EXPECT_FALSE(test, data->written[i]);
}
-
- regmap_exit(map);
}
/* Try to stress dynamic creation of cache data structures */
static void stress_insert(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -562,18 +845,16 @@ static void stress_insert(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.max_register = 300;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
- vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register,
- GFP_KERNEL);
+ buf_sz = array_size(sizeof(*vals), config.max_register);
+ vals = kunit_kmalloc(test, buf_sz, GFP_KERNEL);
KUNIT_ASSERT_FALSE(test, vals == NULL);
- buf_sz = sizeof(unsigned long) * config.max_register;
get_random_bytes(vals, buf_sz);
@@ -599,24 +880,21 @@ static void stress_insert(struct kunit *test)
for (i = 0; i < config.max_register; i ++) {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, rval, vals[i]);
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
-
- regmap_exit(map);
}
static void cache_bypass(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val, rval;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -624,28 +902,26 @@ static void cache_bypass(struct kunit *test)
get_random_bytes(&val, sizeof(val));
/* Ensure the cache has a value in it */
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val));
/* Bypass then write a different value */
regcache_cache_bypass(map, true);
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1));
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1));
/* Read the bypassed value */
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
KUNIT_EXPECT_EQ(test, val + 1, rval);
- KUNIT_EXPECT_EQ(test, data->vals[0], rval);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg], rval);
/* Disable bypass, the cache should still return the original value */
regcache_cache_bypass(map, false);
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
KUNIT_EXPECT_EQ(test, val, rval);
-
- regmap_exit(map);
}
-static void cache_sync(struct kunit *test)
+static void cache_sync_marked_dirty(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -653,9 +929,8 @@ static void cache_sync(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -663,10 +938,10 @@ static void cache_sync(struct kunit *test)
get_random_bytes(&val, sizeof(val));
/* Put some data into the cache */
- KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
BLOCK_TEST_SIZE));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->written[i] = false;
+ data->written[param->from_reg + i] = false;
/* Trash the data on the device itself then resync */
regcache_mark_dirty(map);
@@ -674,16 +949,63 @@ static void cache_sync(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did we just write the correct data out? */
- KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, true, data->written[i]);
+ KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
+}
+
+static void cache_sync_after_cache_only(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[BLOCK_TEST_SIZE];
+ unsigned int val_mask;
+ int i;
- regmap_exit(map);
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ val_mask = GENMASK(config.val_bits - 1, 0);
+ get_random_bytes(&val, sizeof(val));
+
+ /* Put some data into the cache */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
+ BLOCK_TEST_SIZE));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ /* Set cache-only and change the values */
+ regcache_cache_only(map, true);
+ for (i = 0; i < ARRAY_SIZE(val); ++i)
+ val[i] = ~val[i] & val_mask;
+
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
+ BLOCK_TEST_SIZE));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
+
+ KUNIT_EXPECT_MEMNEQ(test, &data->vals[param->from_reg], val, sizeof(val));
+
+ /* Exit cache-only and sync the cache without marking hardware registers dirty */
+ regcache_cache_only(map, false);
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+
+ /* Did we just write the correct data out? */
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + i]);
}
-static void cache_sync_defaults(struct kunit *test)
+static void cache_sync_defaults_marked_dirty(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -691,10 +1013,9 @@ static void cache_sync_defaults(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -702,24 +1023,85 @@ static void cache_sync_defaults(struct kunit *test)
get_random_bytes(&val, sizeof(val));
/* Change the value of one register */
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val));
/* Resync */
regcache_mark_dirty(map);
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->written[i] = false;
+ data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did we just sync the one register we touched? */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, i == 2, data->written[i]);
+ KUNIT_EXPECT_EQ(test, i == 2, data->written[param->from_reg + i]);
- regmap_exit(map);
+ /* Rewrite registers back to their defaults */
+ for (i = 0; i < config.num_reg_defaults; ++i)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg,
+ config.reg_defaults[i].def));
+
+ /*
+ * Resync after regcache_mark_dirty() should not write out registers
+ * that are at default value
+ */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+ regcache_mark_dirty(map);
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
+}
+
+static void cache_sync_default_after_cache_only(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int orig_val;
+ int i;
+
+ config = test_regmap_config;
+ config.num_reg_defaults = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + 2, &orig_val));
+
+ /* Enter cache-only and change the value of one register */
+ regcache_cache_only(map, true);
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1));
+
+ /* Exit cache-only and resync, should write out the changed register */
+ regcache_cache_only(map, false);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+
+ /* Was the register written out? */
+ KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val + 1);
+
+ /* Enter cache-only and write register back to its default value */
+ regcache_cache_only(map, true);
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val));
+
+ /* Resync should write out the new value */
+ regcache_cache_only(map, false);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val);
}
static void cache_sync_readonly(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -727,40 +1109,37 @@ static void cache_sync_readonly(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.writeable_reg = reg_5_false;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Read all registers to fill the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
/* Change the value of all registers, readonly should fail */
get_random_bytes(&val, sizeof(val));
regcache_cache_only(map, true);
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
+ KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0);
regcache_cache_only(map, false);
/* Resync */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->written[i] = false;
+ data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did that match what we see on the device? */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
-
- regmap_exit(map);
+ KUNIT_EXPECT_EQ(test, i != 5, data->written[param->from_reg + i]);
}
static void cache_sync_patch(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -770,23 +1149,22 @@ static void cache_sync_patch(struct kunit *test)
/* We need defaults so readback works */
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Stash the original values */
- KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
BLOCK_TEST_SIZE));
/* Patch a couple of values */
- patch[0].reg = 2;
+ patch[0].reg = param->from_reg + 2;
patch[0].def = rval[2] + 1;
patch[0].delay_us = 0;
- patch[1].reg = 5;
+ patch[1].reg = param->from_reg + 5;
patch[1].def = rval[5] + 1;
patch[1].delay_us = 0;
KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
@@ -795,33 +1173,31 @@ static void cache_sync_patch(struct kunit *test)
/* Sync the cache */
regcache_mark_dirty(map);
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->written[i] = false;
+ data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* The patch should be on the device but not in the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
KUNIT_EXPECT_EQ(test, val, rval[i]);
switch (i) {
case 2:
case 5:
- KUNIT_EXPECT_EQ(test, true, data->written[i]);
- KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
+ KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i] + 1);
break;
default:
- KUNIT_EXPECT_EQ(test, false, data->written[i]);
- KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
+ KUNIT_EXPECT_EQ(test, false, data->written[param->from_reg + i]);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i]);
break;
}
}
-
- regmap_exit(map);
}
static void cache_drop(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -829,41 +1205,267 @@ static void cache_drop(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Ensure the data is read from the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->read[i] = false;
- KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ data->read[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
BLOCK_TEST_SIZE));
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
- KUNIT_EXPECT_FALSE(test, data->read[i]);
- data->read[i] = false;
+ KUNIT_EXPECT_FALSE(test, data->read[param->from_reg + i]);
+ data->read[param->from_reg + i] = false;
}
- KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
/* Drop some registers */
- KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5));
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3,
+ param->from_reg + 5));
/* Reread and check only the dropped registers hit the device. */
- KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
BLOCK_TEST_SIZE));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5);
- KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
+ KUNIT_EXPECT_EQ(test, data->read[param->from_reg + i], i >= 3 && i <= 5);
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
+}
- regmap_exit(map);
+static void cache_drop_with_non_contiguous_ranges(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[4][BLOCK_TEST_SIZE];
+ unsigned int reg;
+ const int num_ranges = ARRAY_SIZE(val) * 2;
+ int rangeidx, i;
+
+ static_assert(ARRAY_SIZE(val) == 4);
+
+ config = test_regmap_config;
+ config.max_register = param->from_reg + (num_ranges * BLOCK_TEST_SIZE);
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ for (i = 0; i < config.max_register + 1; i++)
+ data->written[i] = false;
+
+ /* Create non-contiguous cache blocks by writing every other range */
+ get_random_bytes(&val, sizeof(val));
+ for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
+ reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, reg,
+ &val[rangeidx / 2],
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
+ &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
+ }
+
+ /* Check that odd ranges weren't written */
+ for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
+ reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
+ }
+
+ /* Drop range 2 */
+ reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg, reg + BLOCK_TEST_SIZE - 1));
+
+ /* Drop part of range 4 */
+ reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg + 3, reg + 5));
+
+ /* Mark dirty and reset mock registers to 0 */
+ regcache_mark_dirty(map);
+ for (i = 0; i < config.max_register + 1; i++) {
+ data->vals[i] = 0;
+ data->written[i] = false;
+ }
+
+ /* The registers that were dropped from range 4 should now remain at 0 */
+ val[4 / 2][3] = 0;
+ val[4 / 2][4] = 0;
+ val[4 / 2][5] = 0;
+
+ /* Sync and check that the expected register ranges were written */
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+
+ /* Check that odd ranges weren't written */
+ for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
+ reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
+ }
+
+ /* Check that even ranges (except 2 and 4) were written */
+ for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
+ if ((rangeidx == 2) || (rangeidx == 4))
+ continue;
+
+ reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_TRUE(test, data->written[reg + i]);
+
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
+ &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
+ }
+
+ /* Check that range 2 wasn't written */
+ reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
+
+ /* Check that range 4 was partially written */
+ reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, data->written[reg + i], i < 3 || i > 5);
+
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], &val[4 / 2], sizeof(val[4 / 2]));
+
+ /* Nothing before param->from_reg should have been written */
+ for (i = 0; i < param->from_reg; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+}
+
+static void cache_drop_all_and_sync_marked_dirty(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+ config.num_reg_defaults = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Ensure the data is read from the cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->read[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
+
+ /* Change all values in cache from defaults */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
+
+ /* Drop all registers */
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
+
+ /* Mark dirty and cache sync should not write anything. */
+ regcache_mark_dirty(map);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ for (i = 0; i <= config.max_register; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+}
+
+static void cache_drop_all_and_sync_no_defaults(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Ensure the data is read from the cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->read[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
+
+ /* Change all values in cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
+
+ /* Drop all registers */
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
+
+ /*
+ * Sync cache without marking it dirty. All registers were dropped
+ * so the cache should not have any entries to write out.
+ */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ for (i = 0; i <= config.max_register; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+}
+
+static void cache_drop_all_and_sync_has_defaults(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+ config.num_reg_defaults = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Ensure the data is read from the cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->read[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
+
+ /* Change all values in cache from defaults */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
+
+ /* Drop all registers */
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
+
+ /*
+ * Sync cache without marking it dirty. All registers were dropped
+ * so the cache should not have any entries to write out.
+ */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ for (i = 0; i <= config.max_register; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
}
static void cache_present(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -871,39 +1473,77 @@ static void cache_present(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->read[i] = false;
+ data->read[param->from_reg + i] = false;
/* No defaults so no registers cached. */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, i));
+ KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
/* We didn't trigger any reads */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_ASSERT_FALSE(test, data->read[i]);
+ KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
/* Fill the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
/* Now everything should be cached */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, i));
+ KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i));
+}
+
+static void cache_write_zero(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val;
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->read[param->from_reg + i] = false;
+
+ /* No defaults so no registers cached. */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
+
+ /* We didn't trigger any reads */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
+
+ /* Write a zero value */
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, 0));
+
+ /* Read that zero value back */
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
+ KUNIT_EXPECT_EQ(test, 0, val);
+
+ /* From the cache? */
+ KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, 1));
- regmap_exit(map);
+ /* Try to throw it away */
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 1, 1));
+ KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, 1));
}
/* Check that caching the window register works with sync */
static void cache_range_window_reg(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -911,13 +1551,12 @@ static void cache_range_window_reg(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.volatile_reg = test_range_window_volatile;
config.ranges = &test_range;
config.num_ranges = 1;
config.max_register = test_range.range_max;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -953,41 +1592,29 @@ static void cache_range_window_reg(struct kunit *test)
KUNIT_ASSERT_EQ(test, val, 2);
}
-struct raw_test_types {
- const char *name;
-
- enum regcache_type cache_type;
- enum regmap_endian val_endian;
-};
-
-static void raw_to_desc(const struct raw_test_types *t, char *desc)
-{
- strcpy(desc, t->name);
-}
-
-static const struct raw_test_types raw_types_list[] = {
- { "none-little", REGCACHE_NONE, REGMAP_ENDIAN_LITTLE },
- { "none-big", REGCACHE_NONE, REGMAP_ENDIAN_BIG },
- { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE },
- { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG },
- { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
- { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
- { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE },
- { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG },
+static const struct regmap_test_param raw_types_list[] = {
+ { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
};
-KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, raw_to_desc);
+KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc);
-static const struct raw_test_types raw_cache_types_list[] = {
- { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE },
- { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG },
- { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
- { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
- { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE },
- { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG },
+static const struct regmap_test_param raw_cache_types_list[] = {
+ { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
};
-KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, raw_to_desc);
+KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, param_to_desc);
static const struct regmap_config raw_regmap_config = {
.max_register = BLOCK_TEST_SIZE,
@@ -997,21 +1624,24 @@ static const struct regmap_config raw_regmap_config = {
.val_bits = 16,
};
-static struct regmap *gen_raw_regmap(struct regmap_config *config,
- struct raw_test_types *test_type,
+static struct regmap *gen_raw_regmap(struct kunit *test,
+ struct regmap_config *config,
struct regmap_ram_data **data)
{
+ struct regmap_test_priv *priv = test->priv;
+ const struct regmap_test_param *param = test->param_value;
u16 *buf;
- struct regmap *ret;
- size_t size = (config->max_register + 1) * config->reg_bits / 8;
- int i;
+ struct regmap *ret = ERR_PTR(-ENOMEM);
+ int i, error;
struct reg_default *defaults;
+ size_t size;
- config->cache_type = test_type->cache_type;
- config->val_format_endian = test_type->val_endian;
+ config->cache_type = param->cache;
+ config->val_format_endian = param->val_endian;
config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
config->cache_type == REGCACHE_MAPLE;
+ size = array_size(config->max_register + 1, BITS_TO_BYTES(config->reg_bits));
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -1020,20 +1650,21 @@ static struct regmap *gen_raw_regmap(struct regmap_config *config,
*data = kzalloc(sizeof(**data), GFP_KERNEL);
if (!(*data))
- return ERR_PTR(-ENOMEM);
+ goto out_free;
(*data)->vals = (void *)buf;
config->num_reg_defaults = config->max_register + 1;
- defaults = kcalloc(config->num_reg_defaults,
- sizeof(struct reg_default),
- GFP_KERNEL);
+ defaults = kunit_kcalloc(test,
+ config->num_reg_defaults,
+ sizeof(struct reg_default),
+ GFP_KERNEL);
if (!defaults)
- return ERR_PTR(-ENOMEM);
+ goto out_free;
config->reg_defaults = defaults;
for (i = 0; i < config->num_reg_defaults; i++) {
defaults[i].reg = i;
- switch (test_type->val_endian) {
+ switch (param->val_endian) {
case REGMAP_ENDIAN_LITTLE:
defaults[i].def = le16_to_cpu(buf[i]);
break;
@@ -1041,7 +1672,8 @@ static struct regmap *gen_raw_regmap(struct regmap_config *config,
defaults[i].def = be16_to_cpu(buf[i]);
break;
default:
- return ERR_PTR(-EINVAL);
+ ret = ERR_PTR(-EINVAL);
+ goto out_free;
}
}
@@ -1052,18 +1684,26 @@ static struct regmap *gen_raw_regmap(struct regmap_config *config,
if (config->cache_type == REGCACHE_NONE)
config->num_reg_defaults = 0;
- ret = regmap_init_raw_ram(config, *data);
- if (IS_ERR(ret)) {
- kfree(buf);
- kfree(*data);
- }
+ ret = regmap_init_raw_ram(priv->dev, config, *data);
+ if (IS_ERR(ret))
+ goto out_free;
+
+ /* This calls regmap_exit() on failure, which frees buf and *data */
+ error = kunit_add_action_or_reset(test, regmap_exit_action, ret);
+ if (error)
+ ret = ERR_PTR(error);
+
+ return ret;
+
+out_free:
+ kfree(buf);
+ kfree(*data);
return ret;
}
static void raw_read_defaults_single(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1072,7 +1712,7 @@ static void raw_read_defaults_single(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1082,13 +1722,10 @@ static void raw_read_defaults_single(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
}
-
- regmap_exit(map);
}
static void raw_read_defaults(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1099,35 +1736,31 @@ static void raw_read_defaults(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
- val_len = sizeof(*rval) * (config.max_register + 1);
- rval = kmalloc(val_len, GFP_KERNEL);
+ val_len = array_size(sizeof(*rval), config.max_register + 1);
+ rval = kunit_kmalloc(test, val_len, GFP_KERNEL);
KUNIT_ASSERT_TRUE(test, rval != NULL);
if (!rval)
return;
-
+
/* Check that we can read the defaults via the API */
KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
for (i = 0; i < config.max_register + 1; i++) {
def = config.reg_defaults[i].def;
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
- KUNIT_EXPECT_EQ(test, def, be16_to_cpu(rval[i]));
+ KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i]));
} else {
- KUNIT_EXPECT_EQ(test, def, le16_to_cpu(rval[i]));
+ KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i]));
}
}
-
- kfree(rval);
- regmap_exit(map);
}
static void raw_write_read_single(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1136,7 +1769,7 @@ static void raw_write_read_single(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1147,13 +1780,10 @@ static void raw_write_read_single(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
KUNIT_EXPECT_EQ(test, val, rval);
-
- regmap_exit(map);
}
static void raw_write(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1164,7 +1794,7 @@ static void raw_write(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1185,10 +1815,10 @@ static void raw_write(struct kunit *test)
case 3:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval,
- be16_to_cpu(val[i % 2]));
+ be16_to_cpu((__force __be16)val[i % 2]));
} else {
KUNIT_EXPECT_EQ(test, rval,
- le16_to_cpu(val[i % 2]));
+ le16_to_cpu((__force __le16)val[i % 2]));
}
break;
default:
@@ -1199,8 +1829,6 @@ static void raw_write(struct kunit *test)
/* The values should appear in the "hardware" */
KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
-
- regmap_exit(map);
}
static bool reg_zero(struct device *dev, unsigned int reg)
@@ -1215,7 +1843,6 @@ static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg)
static void raw_noinc_write(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1228,7 +1855,7 @@ static void raw_noinc_write(struct kunit *test)
config.writeable_noinc_reg = reg_zero;
config.readable_noinc_reg = reg_zero;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1259,13 +1886,10 @@ static void raw_noinc_write(struct kunit *test)
/* Make sure we didn't touch the register after the noinc register */
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
KUNIT_ASSERT_EQ(test, val_test, val);
-
- regmap_exit(map);
}
static void raw_sync(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1276,7 +1900,7 @@ static void raw_sync(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1300,10 +1924,10 @@ static void raw_sync(struct kunit *test)
case 3:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval,
- be16_to_cpu(val[i - 2]));
+ be16_to_cpu((__force __be16)val[i - 2]));
} else {
KUNIT_EXPECT_EQ(test, rval,
- le16_to_cpu(val[i - 2]));
+ le16_to_cpu((__force __le16)val[i - 2]));
}
break;
case 4:
@@ -1323,7 +1947,7 @@ static void raw_sync(struct kunit *test)
val[2] = cpu_to_be16(val[2]);
else
val[2] = cpu_to_le16(val[2]);
-
+
/* The values should not appear in the "hardware" */
KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
@@ -1337,14 +1961,78 @@ static void raw_sync(struct kunit *test)
/* The values should now appear in the "hardware" */
KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
+}
- regmap_exit(map);
+static void raw_ranges(struct kunit *test)
+{
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val;
+ int i;
+
+ config = raw_regmap_config;
+ config.volatile_reg = test_range_all_volatile;
+ config.ranges = &test_range;
+ config.num_ranges = 1;
+ config.max_register = test_range.range_max;
+
+ map = gen_raw_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Reset the page to a non-zero value to trigger a change */
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
+ test_range.range_max));
+
+ /* Check we set the page and use the window for writes */
+ data->written[test_range.selector_reg] = false;
+ data->written[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
+
+ data->written[test_range.selector_reg] = false;
+ data->written[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
+ test_range.range_min +
+ test_range.window_len,
+ 0));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
+
+ /* Same for reads */
+ data->written[test_range.selector_reg] = false;
+ data->read[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
+
+ data->written[test_range.selector_reg] = false;
+ data->read[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
+ test_range.range_min +
+ test_range.window_len,
+ &val));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
+
+ /* No physical access triggered in the virtual range */
+ for (i = test_range.range_min; i < test_range.range_max; i++) {
+ KUNIT_EXPECT_FALSE(test, data->read[i]);
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+ }
}
static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(read_bypassed, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(read_bypassed_volatile, real_cache_types_gen_params),
KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(multi_write, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(multi_read, regcache_types_gen_params),
KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
@@ -1354,13 +2042,20 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
- KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params),
- KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync_marked_dirty, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync_after_cache_only, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync_defaults_marked_dirty, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync_default_after_cache_only, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_drop_with_non_contiguous_ranges, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_drop_all_and_sync_marked_dirty, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
- KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_write_zero, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
@@ -1368,13 +2063,47 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
+ KUNIT_CASE_PARAM(raw_ranges, raw_test_cache_types_gen_params),
{}
};
+static int regmap_test_init(struct kunit *test)
+{
+ struct regmap_test_priv *priv;
+ struct device *dev;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ test->priv = priv;
+
+ dev = kunit_device_register(test, "regmap_test");
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ priv->dev = get_device(dev);
+ dev_set_drvdata(dev, test);
+
+ return 0;
+}
+
+static void regmap_test_exit(struct kunit *test)
+{
+ struct regmap_test_priv *priv = test->priv;
+
+ /* Destroy the dummy struct device */
+ if (priv && priv->dev)
+ put_device(priv->dev);
+}
+
static struct kunit_suite regmap_test_suite = {
.name = "regmap",
+ .init = regmap_test_init,
+ .exit = regmap_test_exit,
.test_cases = regmap_test_cases,
};
kunit_test_suite(regmap_test_suite);
+MODULE_DESCRIPTION("Regmap KUnit tests");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-mdio.c b/drivers/base/regmap/regmap-mdio.c
index 6aa6a2409478..9573bf3b52f4 100644
--- a/drivers/base/regmap/regmap-mdio.c
+++ b/drivers/base/regmap/regmap-mdio.c
@@ -117,5 +117,5 @@ struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev,
EXPORT_SYMBOL_GPL(__devm_regmap_init_mdio);
MODULE_AUTHOR("Sander Vanheule <sander@svanheule.net>");
-MODULE_DESCRIPTION("Regmap MDIO Module");
+MODULE_DESCRIPTION("regmap MDIO Module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-ram.c b/drivers/base/regmap/regmap-ram.c
index 192d6b131dff..4e5b4518ce4d 100644
--- a/drivers/base/regmap/regmap-ram.c
+++ b/drivers/base/regmap/regmap-ram.c
@@ -53,7 +53,8 @@ static const struct regmap_bus regmap_ram = {
.free_context = regmap_ram_free_context,
};
-struct regmap *__regmap_init_ram(const struct regmap_config *config,
+struct regmap *__regmap_init_ram(struct device *dev,
+ const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name)
@@ -75,11 +76,12 @@ struct regmap *__regmap_init_ram(const struct regmap_config *config,
if (!data->written)
return ERR_PTR(-ENOMEM);
- map = __regmap_init(NULL, &regmap_ram, data, config,
+ map = __regmap_init(dev, &regmap_ram, data, config,
lock_key, lock_name);
return map;
}
EXPORT_SYMBOL_GPL(__regmap_init_ram);
+MODULE_DESCRIPTION("Register map access API - Memory region");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-raw-ram.c b/drivers/base/regmap/regmap-raw-ram.c
index 93ae07b503fd..76c98814fb8a 100644
--- a/drivers/base/regmap/regmap-raw-ram.c
+++ b/drivers/base/regmap/regmap-raw-ram.c
@@ -107,7 +107,8 @@ static const struct regmap_bus regmap_raw_ram = {
.free_context = regmap_raw_ram_free_context,
};
-struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
+struct regmap *__regmap_init_raw_ram(struct device *dev,
+ const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name)
@@ -134,11 +135,12 @@ struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
data->reg_endian = config->reg_format_endian;
- map = __regmap_init(NULL, &regmap_raw_ram, data, config,
+ map = __regmap_init(dev, &regmap_raw_ram, data, config,
lock_key, lock_name);
return map;
}
EXPORT_SYMBOL_GPL(__regmap_init_raw_ram);
+MODULE_DESCRIPTION("Register map access API - Memory region with raw access");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-sccb.c b/drivers/base/regmap/regmap-sccb.c
index 986af26d88c2..12bbbb03e5f2 100644
--- a/drivers/base/regmap/regmap-sccb.c
+++ b/drivers/base/regmap/regmap-sccb.c
@@ -125,4 +125,5 @@ struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sccb);
+MODULE_DESCRIPTION("Register map access API - SCCB support");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c
index 388c3a087bd9..86644bbd0710 100644
--- a/drivers/base/regmap/regmap-sdw-mbq.c
+++ b/drivers/base/regmap/regmap-sdw-mbq.c
@@ -1,45 +1,187 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright(c) 2020 Intel Corporation.
+#include <linux/bits.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_registers.h>
+#include <sound/sdca_function.h>
#include "internal.h"
+struct regmap_mbq_context {
+ struct device *dev;
+
+ struct regmap_sdw_mbq_cfg cfg;
+
+ int val_size;
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
+};
+
+static int regmap_sdw_mbq_size(struct regmap_mbq_context *ctx, unsigned int reg)
+{
+ int size = ctx->val_size;
+
+ if (ctx->cfg.mbq_size) {
+ size = ctx->cfg.mbq_size(ctx->dev, reg);
+ if (!size || size > ctx->val_size)
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+static bool regmap_sdw_mbq_deferrable(struct regmap_mbq_context *ctx, unsigned int reg)
+{
+ if (ctx->cfg.deferrable)
+ return ctx->cfg.deferrable(ctx->dev, reg);
+
+ return false;
+}
+
+static int regmap_sdw_mbq_poll_busy(struct sdw_slave *slave, unsigned int reg,
+ struct regmap_mbq_context *ctx)
+{
+ struct device *dev = &slave->dev;
+ int val, ret = 0;
+
+ dev_dbg(dev, "Deferring transaction for 0x%x\n", reg);
+
+ reg = SDW_SDCA_CTL(SDW_SDCA_CTL_FUNC(reg), 0,
+ SDCA_CTL_ENTITY_0_FUNCTION_STATUS, 0);
+
+ if (ctx->readable_reg(dev, reg)) {
+ ret = read_poll_timeout(sdw_read_no_pm, val,
+ val < 0 || !(val & SDCA_CTL_ENTITY_0_FUNCTION_BUSY),
+ ctx->cfg.timeout_us, ctx->cfg.retry_us,
+ false, slave, reg);
+ if (val < 0)
+ return val;
+ if (ret)
+ dev_err(dev, "Function busy timed out 0x%x: %d\n", reg, val);
+ } else {
+ fsleep(ctx->cfg.timeout_us);
+ }
+
+ return ret;
+}
+
+static int regmap_sdw_mbq_write_impl(struct sdw_slave *slave,
+ unsigned int reg, unsigned int val,
+ int mbq_size, bool deferrable)
+{
+ int shift = mbq_size * BITS_PER_BYTE;
+ int ret;
+
+ while (--mbq_size > 0) {
+ shift -= BITS_PER_BYTE;
+
+ ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg),
+ (val >> shift) & 0xff);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = sdw_write_no_pm(slave, reg, val & 0xff);
+ if (deferrable && ret == -ENODATA)
+ return -EAGAIN;
+
+ return ret;
+}
+
static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int val)
{
- struct device *dev = context;
+ struct regmap_mbq_context *ctx = context;
+ struct device *dev = ctx->dev;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
+ int mbq_size = regmap_sdw_mbq_size(ctx, reg);
int ret;
- ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg), (val >> 8) & 0xff);
- if (ret < 0)
- return ret;
+ if (mbq_size < 0)
+ return mbq_size;
+
+ /*
+ * Technically the spec does allow a device to set itself to busy for
+ * internal reasons, but since it doesn't provide any information on
+ * how to handle timeouts in that case, for now the code will only
+ * process a single wait/timeout on function busy and a single retry
+ * of the transaction.
+ */
+ ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, deferrable);
+ if (ret == -EAGAIN) {
+ ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
+ if (ret)
+ return ret;
+
+ ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, false);
+ }
+
+ return ret;
+}
+
+static int regmap_sdw_mbq_read_impl(struct sdw_slave *slave,
+ unsigned int reg, unsigned int *val,
+ int mbq_size, bool deferrable)
+{
+ int shift = BITS_PER_BYTE;
+ int read;
+
+ read = sdw_read_no_pm(slave, reg);
+ if (read < 0) {
+ if (deferrable && read == -ENODATA)
+ return -EAGAIN;
+
+ return read;
+ }
+
+ *val = read;
+
+ while (--mbq_size > 0) {
+ read = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
+ if (read < 0)
+ return read;
+
+ *val |= read << shift;
+ shift += BITS_PER_BYTE;
+ }
- return sdw_write_no_pm(slave, reg, val & 0xff);
+ return 0;
}
static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val)
{
- struct device *dev = context;
+ struct regmap_mbq_context *ctx = context;
+ struct device *dev = ctx->dev;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
- int read0;
- int read1;
+ bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
+ int mbq_size = regmap_sdw_mbq_size(ctx, reg);
+ int ret;
- read0 = sdw_read_no_pm(slave, reg);
- if (read0 < 0)
- return read0;
+ if (mbq_size < 0)
+ return mbq_size;
- read1 = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
- if (read1 < 0)
- return read1;
+ /*
+ * Technically the spec does allow a device to set itself to busy for
+ * internal reasons, but since it doesn't provide any information on
+ * how to handle timeouts in that case, for now the code will only
+ * process a single wait/timeout on function busy and a single retry
+ * of the transaction.
+ */
+ ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, deferrable);
+ if (ret == -EAGAIN) {
+ ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
+ if (ret)
+ return ret;
- *val = (read1 << 8) | read0;
+ ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, false);
+ }
- return 0;
+ return ret;
}
static const struct regmap_bus regmap_sdw_mbq = {
@@ -51,8 +193,7 @@ static const struct regmap_bus regmap_sdw_mbq = {
static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
{
- /* MBQ-based controls are only 16-bits for now */
- if (config->val_bits != 16)
+ if (config->val_bits > (sizeof(unsigned int) * BITS_PER_BYTE))
return -ENOTSUPP;
/* Registers are 32 bits wide */
@@ -65,37 +206,71 @@ static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
return 0;
}
+static struct regmap_mbq_context *
+regmap_sdw_mbq_gen_context(struct device *dev,
+ const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config)
+{
+ struct regmap_mbq_context *ctx;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->dev = dev;
+
+ if (mbq_config)
+ ctx->cfg = *mbq_config;
+
+ ctx->val_size = config->val_bits / BITS_PER_BYTE;
+ ctx->readable_reg = config->readable_reg;
+
+ return ctx;
+}
+
struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name)
{
+ struct regmap_mbq_context *ctx;
int ret;
ret = regmap_sdw_mbq_config_check(config);
if (ret)
return ERR_PTR(ret);
- return __regmap_init(&sdw->dev, &regmap_sdw_mbq,
- &sdw->dev, config, lock_key, lock_name);
+ ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __regmap_init(&sdw->dev, &regmap_sdw_mbq, ctx,
+ config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_sdw_mbq);
struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name)
{
+ struct regmap_mbq_context *ctx;
int ret;
ret = regmap_sdw_mbq_config_check(config);
if (ret)
return ERR_PTR(ret);
- return __devm_regmap_init(&sdw->dev, &regmap_sdw_mbq,
- &sdw->dev, config, lock_key, lock_name);
+ ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __devm_regmap_init(&sdw->dev, &regmap_sdw_mbq, ctx,
+ config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
-MODULE_DESCRIPTION("Regmap SoundWire MBQ Module");
+MODULE_DESCRIPTION("regmap SoundWire MBQ Module");
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
index 159c0b740b00..ea631ac7c7ec 100644
--- a/drivers/base/regmap/regmap-sdw.c
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -98,5 +98,5 @@ struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw);
-MODULE_DESCRIPTION("Regmap SoundWire Module");
+MODULE_DESCRIPTION("regmap SoundWire Module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c
index 8075db788b39..54eb7d227cf4 100644
--- a/drivers/base/regmap/regmap-slimbus.c
+++ b/drivers/base/regmap/regmap-slimbus.c
@@ -68,4 +68,5 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus);
+MODULE_DESCRIPTION("Register map access API - SLIMbus support");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi-avmm.c b/drivers/base/regmap/regmap-spi-avmm.c
index 4c2b94b3e30b..d86a06cadcdb 100644
--- a/drivers/base/regmap/regmap-spi-avmm.c
+++ b/drivers/base/regmap/regmap-spi-avmm.c
@@ -710,4 +710,5 @@ struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_spi_avmm);
+MODULE_DESCRIPTION("Register map access API - SPI AVMM support");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 37ab23a9d034..14b1d88997cb 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -122,8 +122,7 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
return ERR_PTR(-ENOMEM);
max_msg_size = spi_max_message_size(spi);
- reg_reserve_size = config->reg_bits / BITS_PER_BYTE
- + config->pad_bits / BITS_PER_BYTE;
+ reg_reserve_size = (config->reg_bits + config->pad_bits) / BITS_PER_BYTE;
if (max_size + reg_reserve_size > max_msg_size)
max_size -= reg_reserve_size;
@@ -165,4 +164,5 @@ struct regmap *__devm_regmap_init_spi(struct spi_device *spi,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_spi);
+MODULE_DESCRIPTION("regmap SPI Module");
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index cdf12d2aa3a1..347bfe9544ce 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -222,4 +222,5 @@ struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *sdev,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_ext);
+MODULE_DESCRIPTION("Register map access API - SPMI support");
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c
index 3a8b402db852..29fd24f9c7ed 100644
--- a/drivers/base/regmap/regmap-w1.c
+++ b/drivers/base/regmap/regmap-w1.c
@@ -234,4 +234,5 @@ struct regmap *__devm_regmap_init_w1(struct device *w1_dev,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_w1);
+MODULE_DESCRIPTION("Register map access API - W1 (1-Wire) support");
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 6db77d8e45f9..f2843f814675 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -17,7 +17,7 @@
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/hwspinlock.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -89,7 +89,7 @@ EXPORT_SYMBOL_GPL(regmap_check_range_table);
bool regmap_writeable(struct regmap *map, unsigned int reg)
{
- if (map->max_register && reg > map->max_register)
+ if (map->max_register_is_set && reg > map->max_register)
return false;
if (map->writeable_reg)
@@ -112,7 +112,7 @@ bool regmap_cached(struct regmap *map, unsigned int reg)
if (!map->cache_ops)
return false;
- if (map->max_register && reg > map->max_register)
+ if (map->max_register_is_set && reg > map->max_register)
return false;
map->lock(map->lock_arg);
@@ -129,7 +129,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
if (!map->reg_read)
return false;
- if (map->max_register && reg > map->max_register)
+ if (map->max_register_is_set && reg > map->max_register)
return false;
if (map->format.format_write)
@@ -598,6 +598,17 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
}
EXPORT_SYMBOL_GPL(regmap_attach_dev);
+static int dev_get_regmap_match(struct device *dev, void *res, void *data);
+
+static int regmap_detach_dev(struct device *dev, struct regmap *map)
+{
+ if (!dev)
+ return 0;
+
+ return devres_release(dev, dev_get_regmap_release,
+ dev_get_regmap_match, (void *)map->name);
+}
+
static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
const struct regmap_config *config)
{
@@ -745,6 +756,7 @@ struct regmap *__regmap_init(struct device *dev,
lock_key, lock_name);
}
map->lock_arg = map;
+ map->lock_key = lock_key;
}
/*
@@ -757,14 +769,13 @@ struct regmap *__regmap_init(struct device *dev,
map->alloc_flags = GFP_KERNEL;
map->reg_base = config->reg_base;
+ map->reg_shift = config->pad_bits % 8;
- map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
map->format.reg_shift = config->reg_shift;
- map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
- map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
- config->val_bits + config->pad_bits, 8);
- map->reg_shift = config->pad_bits % 8;
+ map->format.reg_bytes = BITS_TO_BYTES(config->reg_bits);
+ map->format.val_bytes = BITS_TO_BYTES(config->val_bits);
+ map->format.buf_size = BITS_TO_BYTES(config->reg_bits + config->val_bits + config->pad_bits);
if (config->reg_stride)
map->reg_stride = config->reg_stride;
else
@@ -787,6 +798,7 @@ struct regmap *__regmap_init(struct device *dev,
map->bus = bus;
map->bus_context = bus_context;
map->max_register = config->max_register;
+ map->max_register_is_set = map->max_register ?: config->max_register_is_0;
map->wr_table = config->wr_table;
map->rd_table = config->rd_table;
map->volatile_table = config->volatile_table;
@@ -1050,13 +1062,13 @@ skip_format_initialization:
/* Sanity check */
if (range_cfg->range_max < range_cfg->range_min) {
- dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
+ dev_err(map->dev, "Invalid range %d: %u < %u\n", i,
range_cfg->range_max, range_cfg->range_min);
goto err_range;
}
if (range_cfg->range_max > map->max_register) {
- dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
+ dev_err(map->dev, "Invalid range %d: %u > %u\n", i,
range_cfg->range_max, map->max_register);
goto err_range;
}
@@ -1412,6 +1424,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
regmap_debugfs_exit(map);
map->max_register = config->max_register;
+ map->max_register_is_set = map->max_register ?: config->max_register_is_0;
map->writeable_reg = config->writeable_reg;
map->readable_reg = config->readable_reg;
map->volatile_reg = config->volatile_reg;
@@ -1442,7 +1455,9 @@ void regmap_exit(struct regmap *map)
{
struct regmap_async *async;
+ regmap_detach_dev(map->dev, map);
regcache_exit(map);
+
regmap_debugfs_exit(map);
regmap_range_exit(map);
if (map->bus && map->bus->free_context)
@@ -2345,7 +2360,7 @@ out:
} else {
void *wval;
- wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
+ wval = kmemdup_array(val, val_count, val_bytes, map->alloc_flags);
if (!wval)
return -ENOMEM;
@@ -2837,6 +2852,43 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
EXPORT_SYMBOL_GPL(regmap_read);
/**
+ * regmap_read_bypassed() - Read a value from a single register direct
+ * from the device, bypassing the cache
+ *
+ * @map: Register map to read from
+ * @reg: Register to be read from
+ * @val: Pointer to store read value
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val)
+{
+ int ret;
+ bool bypass, cache_only;
+
+ if (!IS_ALIGNED(reg, map->reg_stride))
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ bypass = map->cache_bypass;
+ cache_only = map->cache_only;
+ map->cache_bypass = true;
+ map->cache_only = false;
+
+ ret = _regmap_read(map, reg, val);
+
+ map->cache_bypass = bypass;
+ map->cache_only = cache_only;
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_read_bypassed);
+
+/**
* regmap_raw_read() - Read raw data from the device
*
* @map: Register map to read from
@@ -3062,8 +3114,53 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id,
}
EXPORT_SYMBOL_GPL(regmap_fields_read);
+static int _regmap_bulk_read(struct regmap *map, unsigned int reg,
+ const unsigned int *regs, void *val, size_t val_count)
+{
+ u32 *u32 = val;
+ u16 *u16 = val;
+ u8 *u8 = val;
+ int ret, i;
+
+ map->lock(map->lock_arg);
+
+ for (i = 0; i < val_count; i++) {
+ unsigned int ival;
+
+ if (regs) {
+ if (!IS_ALIGNED(regs[i], map->reg_stride)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = _regmap_read(map, regs[i], &ival);
+ } else {
+ ret = _regmap_read(map, reg + regmap_get_offset(map, i), &ival);
+ }
+ if (ret != 0)
+ goto out;
+
+ switch (map->format.val_bytes) {
+ case 4:
+ u32[i] = ival;
+ break;
+ case 2:
+ u16[i] = ival;
+ break;
+ case 1:
+ u8[i] = ival;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+out:
+ map->unlock(map->lock_arg);
+ return ret;
+}
+
/**
- * regmap_bulk_read() - Read multiple registers from the device
+ * regmap_bulk_read() - Read multiple sequential registers from the device
*
* @map: Register map to read from
* @reg: First register to be read from
@@ -3093,47 +3190,35 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_inplace(val + i);
} else {
- u32 *u32 = val;
- u16 *u16 = val;
- u8 *u8 = val;
-
- map->lock(map->lock_arg);
-
- for (i = 0; i < val_count; i++) {
- unsigned int ival;
-
- ret = _regmap_read(map, reg + regmap_get_offset(map, i),
- &ival);
- if (ret != 0)
- goto out;
-
- switch (map->format.val_bytes) {
- case 4:
- u32[i] = ival;
- break;
- case 2:
- u16[i] = ival;
- break;
- case 1:
- u8[i] = ival;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
- }
-
-out:
- map->unlock(map->lock_arg);
+ ret = _regmap_bulk_read(map, reg, NULL, val, val_count);
}
-
if (!ret)
trace_regmap_bulk_read(map, reg, val, val_bytes * val_count);
-
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_read);
+/**
+ * regmap_multi_reg_read() - Read multiple non-sequential registers from the device
+ *
+ * @map: Register map to read from
+ * @regs: Array of registers to read from
+ * @val: Pointer to store read value, in native register size for device
+ * @val_count: Number of registers to read
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_multi_reg_read(struct regmap *map, const unsigned int *regs, void *val,
+ size_t val_count)
+{
+ if (val_count == 0)
+ return -EINVAL;
+
+ return _regmap_bulk_read(map, 0, regs, val, val_count);
+}
+EXPORT_SYMBOL_GPL(regmap_multi_reg_read);
+
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change, bool force_write)
@@ -3383,7 +3468,7 @@ EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
*/
int regmap_get_max_register(struct regmap *map)
{
- return map->max_register ? map->max_register : -EINVAL;
+ return map->max_register_is_set ? map->max_register : -EINVAL;
}
EXPORT_SYMBOL_GPL(regmap_get_max_register);
diff --git a/drivers/base/regmap/trace.h b/drivers/base/regmap/trace.h
index 704e106e5dbd..bcc5a8b226a6 100644
--- a/drivers/base/regmap/trace.h
+++ b/drivers/base/regmap/trace.h
@@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(regmap_reg,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
+ __assign_str(name);
__entry->reg = reg;
__entry->val = val;
),
@@ -74,7 +74,7 @@ DECLARE_EVENT_CLASS(regmap_bulk,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
+ __assign_str(name);
__entry->reg = reg;
__entry->val_len = val_len;
memcpy(__get_dynamic_array(buf), val, val_len);
@@ -113,7 +113,7 @@ DECLARE_EVENT_CLASS(regmap_block,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
+ __assign_str(name);
__entry->reg = reg;
__entry->count = count;
),
@@ -163,9 +163,9 @@ TRACE_EVENT(regcache_sync,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
- __assign_str(status, status);
- __assign_str(type, type);
+ __assign_str(name);
+ __assign_str(status);
+ __assign_str(type);
),
TP_printk("%s type=%s status=%s", __get_str(name),
@@ -184,7 +184,7 @@ DECLARE_EVENT_CLASS(regmap_bool,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
+ __assign_str(name);
__entry->flag = flag;
),
@@ -216,7 +216,7 @@ DECLARE_EVENT_CLASS(regmap_async,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
+ __assign_str(name);
),
TP_printk("%s", __get_str(name))
@@ -264,7 +264,7 @@ TRACE_EVENT(regcache_drop_region,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
+ __assign_str(name);
__entry->from = from;
__entry->to = to;
),
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 36512fb75a20..b1726a3515f6 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -6,10 +6,21 @@
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
*/
+#include <linux/container_of.h>
#include <linux/device.h>
-#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/kstrtox.h>
+#include <linux/list.h>
#include <linux/property.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
#include "base.h"
@@ -666,6 +677,7 @@ static const struct fwnode_operations software_node_ops = {
.get = software_node_get,
.put = software_node_put,
.property_present = software_node_property_present,
+ .property_read_bool = software_node_property_present,
.property_read_int_array = software_node_read_int_array,
.property_read_string_array = software_node_read_string_array,
.get_name = software_node_get_name,
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
index 5c7fac80611c..2756870615cc 100644
--- a/drivers/base/test/Kconfig
+++ b/drivers/base/test/Kconfig
@@ -12,6 +12,7 @@ config TEST_ASYNC_DRIVER_PROBE
config DM_KUNIT_TEST
tristate "KUnit Tests for the device model" if !KUNIT_ALL_TESTS
depends on KUNIT
+ default KUNIT_ALL_TESTS
config DRIVER_PE_KUNIT_TEST
tristate "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS
diff --git a/drivers/base/test/platform-device-test.c b/drivers/base/test/platform-device-test.c
index ea05b8785743..6355a2231b74 100644
--- a/drivers/base/test/platform-device-test.c
+++ b/drivers/base/test/platform-device-test.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
+#include <kunit/platform_device.h>
#include <kunit/resource.h>
#include <linux/device.h>
+#include <linux/device/bus.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#define DEVICE_NAME "test"
@@ -217,7 +220,43 @@ static struct kunit_suite platform_device_devm_test_suite = {
.test_cases = platform_device_devm_tests,
};
-kunit_test_suite(platform_device_devm_test_suite);
+static void platform_device_find_by_null_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = kunit_platform_device_alloc(test, DEVICE_NAME, PLATFORM_DEVID_NONE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+ ret = kunit_platform_device_add(test, pdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, of_find_device_by_node(NULL), NULL);
+
+ KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_of_node(&platform_bus_type, NULL), NULL);
+ KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_fwnode(&platform_bus_type, NULL), NULL);
+ KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_acpi_dev(&platform_bus_type, NULL), NULL);
+
+ KUNIT_EXPECT_FALSE(test, device_match_of_node(&pdev->dev, NULL));
+ KUNIT_EXPECT_FALSE(test, device_match_fwnode(&pdev->dev, NULL));
+ KUNIT_EXPECT_FALSE(test, device_match_acpi_dev(&pdev->dev, NULL));
+ KUNIT_EXPECT_FALSE(test, device_match_acpi_handle(&pdev->dev, NULL));
+}
+
+static struct kunit_case platform_device_match_tests[] = {
+ KUNIT_CASE(platform_device_find_by_null_test),
+ {}
+};
+
+static struct kunit_suite platform_device_match_test_suite = {
+ .name = "platform-device-match",
+ .test_cases = platform_device_match_tests,
+};
+
+kunit_test_suites(
+ &platform_device_devm_test_suite,
+ &platform_device_match_test_suite,
+);
MODULE_DESCRIPTION("Test module for platform devices");
MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 89f98be5c5b9..b962da263eee 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -23,23 +23,39 @@ static ssize_t name##_show(struct device *dev, \
#define define_siblings_read_func(name, mask) \
static ssize_t name##_read(struct file *file, struct kobject *kobj, \
- struct bin_attribute *attr, char *buf, \
+ const struct bin_attribute *attr, char *buf, \
loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
+ cpumask_var_t mask; \
+ ssize_t n; \
\
- return cpumap_print_bitmask_to_buf(buf, topology_##mask(dev->id), \
- off, count); \
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) \
+ return -ENOMEM; \
+ \
+ cpumask_copy(mask, topology_##mask(dev->id)); \
+ n = cpumap_print_bitmask_to_buf(buf, mask, off, count); \
+ free_cpumask_var(mask); \
+ \
+ return n; \
} \
\
static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
- struct bin_attribute *attr, char *buf, \
+ const struct bin_attribute *attr, char *buf, \
loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
+ cpumask_var_t mask; \
+ ssize_t n; \
+ \
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) \
+ return -ENOMEM; \
+ \
+ cpumask_copy(mask, topology_##mask(dev->id)); \
+ n = cpumap_print_list_to_buf(buf, mask, off, count); \
+ free_cpumask_var(mask); \
\
- return cpumap_print_list_to_buf(buf, topology_##mask(dev->id), \
- off, count); \
+ return n; \
}
define_id_show_func(physical_package_id, "%d");
@@ -62,50 +78,50 @@ define_id_show_func(ppin, "0x%llx");
static DEVICE_ATTR_ADMIN_RO(ppin);
define_siblings_read_func(thread_siblings, sibling_cpumask);
-static BIN_ATTR_RO(thread_siblings, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(thread_siblings_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(thread_siblings, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(thread_siblings_list, CPULIST_FILE_MAX_BYTES);
define_siblings_read_func(core_cpus, sibling_cpumask);
-static BIN_ATTR_RO(core_cpus, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(core_cpus_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(core_cpus, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(core_cpus_list, CPULIST_FILE_MAX_BYTES);
define_siblings_read_func(core_siblings, core_cpumask);
-static BIN_ATTR_RO(core_siblings, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(core_siblings_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(core_siblings, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(core_siblings_list, CPULIST_FILE_MAX_BYTES);
#ifdef TOPOLOGY_CLUSTER_SYSFS
define_siblings_read_func(cluster_cpus, cluster_cpumask);
-static BIN_ATTR_RO(cluster_cpus, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(cluster_cpus_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(cluster_cpus, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(cluster_cpus_list, CPULIST_FILE_MAX_BYTES);
#endif
#ifdef TOPOLOGY_DIE_SYSFS
define_siblings_read_func(die_cpus, die_cpumask);
-static BIN_ATTR_RO(die_cpus, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(die_cpus_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(die_cpus, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(die_cpus_list, CPULIST_FILE_MAX_BYTES);
#endif
define_siblings_read_func(package_cpus, core_cpumask);
-static BIN_ATTR_RO(package_cpus, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(package_cpus_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(package_cpus, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(package_cpus_list, CPULIST_FILE_MAX_BYTES);
#ifdef TOPOLOGY_BOOK_SYSFS
define_id_show_func(book_id, "%d");
static DEVICE_ATTR_RO(book_id);
define_siblings_read_func(book_siblings, book_cpumask);
-static BIN_ATTR_RO(book_siblings, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(book_siblings_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(book_siblings, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(book_siblings_list, CPULIST_FILE_MAX_BYTES);
#endif
#ifdef TOPOLOGY_DRAWER_SYSFS
define_id_show_func(drawer_id, "%d");
static DEVICE_ATTR_RO(drawer_id);
define_siblings_read_func(drawer_siblings, drawer_cpumask);
-static BIN_ATTR_RO(drawer_siblings, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(drawer_siblings_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(drawer_siblings, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(drawer_siblings_list, CPULIST_FILE_MAX_BYTES);
#endif
-static struct bin_attribute *bin_attrs[] = {
+static const struct bin_attribute *const bin_attrs[] = {
&bin_attr_core_cpus,
&bin_attr_core_cpus_list,
&bin_attr_thread_siblings,
@@ -163,7 +179,7 @@ static umode_t topology_is_visible(struct kobject *kobj,
static const struct attribute_group topology_attr_group = {
.attrs = default_attrs,
- .bin_attrs = bin_attrs,
+ .bin_attrs_new = bin_attrs,
.is_visible = topology_is_visible,
.name = "topology"
};
diff --git a/drivers/base/trace.h b/drivers/base/trace.h
index 3192e18f877e..3b83b13a57ff 100644
--- a/drivers/base/trace.h
+++ b/drivers/base/trace.h
@@ -24,18 +24,18 @@ DECLARE_EVENT_CLASS(devres,
__field(struct device *, dev)
__field(const char *, op)
__field(void *, node)
- __field(const char *, name)
+ __string(name, name)
__field(size_t, size)
),
TP_fast_assign(
- __assign_str(devname, dev_name(dev));
+ __assign_str(devname);
__entry->op = op;
__entry->node = node;
- __entry->name = name;
+ __assign_str(name);
__entry->size = size;
),
TP_printk("%s %3s %p %s (%zu bytes)", __get_str(devname),
- __entry->op, __entry->node, __entry->name, __entry->size)
+ __entry->op, __entry->node, __get_str(name), __entry->size)
);
DEFINE_EVENT(devres, devres_log,