summaryrefslogtreecommitdiff
path: root/drivers/base/devres.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/devres.c')
-rw-r--r--drivers/base/devres.c659
1 files changed, 558 insertions, 101 deletions
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 507379e7b763..f54db6d138ab 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -1,31 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/devres.c - device resource management
*
* Copyright (c) 2006 SUSE Linux Products GmbH
* Copyright (c) 2006 Tejun Heo <teheo@suse.de>
- *
- * This file is released under the GPLv2.
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/percpu.h>
+
+#include <asm/sections.h>
#include "base.h"
+#include "trace.h"
struct devres_node {
struct list_head entry;
dr_release_t release;
-#ifdef CONFIG_DEBUG_DEVRES
const char *name;
size_t size;
-#endif
};
struct devres {
struct devres_node node;
- /* -- 3 pointers */
- unsigned long long data[]; /* guarantee ull alignment */
+ /*
+ * Some archs want to perform DMA into kmalloc caches
+ * and need a guaranteed alignment larger than
+ * the alignment of a 64-bit integer.
+ * Thus we use ARCH_DMA_MINALIGN for data[] which will force the same
+ * alignment for struct devres when allocated by kmalloc().
+ */
+ u8 __aligned(ARCH_DMA_MINALIGN) data[];
};
struct devres_group {
@@ -35,10 +42,6 @@ struct devres_group {
/* -- 8 pointers */
};
-#ifdef CONFIG_DEBUG_DEVRES
-static int log_devres = 0;
-module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
-
static void set_node_dbginfo(struct devres_node *node, const char *name,
size_t size)
{
@@ -46,18 +49,28 @@ static void set_node_dbginfo(struct devres_node *node, const char *name,
node->size = size;
}
-static void devres_log(struct device *dev, struct devres_node *node,
+#ifdef CONFIG_DEBUG_DEVRES
+static int log_devres = 0;
+module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
+
+static void devres_dbg(struct device *dev, struct devres_node *node,
const char *op)
{
if (unlikely(log_devres))
- dev_err(dev, "DEVRES %3s %p %s (%lu bytes)\n",
- op, node, node->name, (unsigned long)node->size);
+ dev_err(dev, "DEVRES %3s %p %s (%zu bytes)\n",
+ op, node, node->name, node->size);
}
#else /* CONFIG_DEBUG_DEVRES */
-#define set_node_dbginfo(node, n, s) do {} while (0)
-#define devres_log(dev, node, op) do {} while (0)
+#define devres_dbg(dev, node, op) do {} while (0)
#endif /* CONFIG_DEBUG_DEVRES */
+static void devres_log(struct device *dev, struct devres_node *node,
+ const char *op)
+{
+ trace_devres_log(dev, op, node, node->name, node->size);
+ devres_dbg(dev, node, op);
+}
+
/*
* Release functions for devres group. These callbacks are used only
* for identification.
@@ -72,7 +85,7 @@ static void group_close_release(struct device *dev, void *res)
/* noop */
}
-static struct devres_group * node_to_group(struct devres_node *node)
+static struct devres_group *node_to_group(struct devres_node *node)
{
if (node->release == &group_open_release)
return container_of(node, struct devres_group, node[0]);
@@ -81,17 +94,36 @@ static struct devres_group * node_to_group(struct devres_node *node)
return NULL;
}
-static __always_inline struct devres * alloc_dr(dr_release_t release,
- size_t size, gfp_t gfp)
+static bool check_dr_size(size_t size, size_t *tot_size)
+{
+ /* We must catch any near-SIZE_MAX cases that could overflow. */
+ if (unlikely(check_add_overflow(sizeof(struct devres),
+ size, tot_size)))
+ return false;
+
+ /* Actually allocate the full kmalloc bucket size. */
+ *tot_size = kmalloc_size_roundup(*tot_size);
+
+ return true;
+}
+
+static __always_inline struct devres *alloc_dr(dr_release_t release,
+ size_t size, gfp_t gfp, int nid)
{
- size_t tot_size = sizeof(struct devres) + size;
+ size_t tot_size;
struct devres *dr;
- dr = kmalloc_track_caller(tot_size, gfp);
+ if (!check_dr_size(size, &tot_size))
+ return NULL;
+
+ dr = kmalloc_node_track_caller(tot_size, gfp, nid);
if (unlikely(!dr))
return NULL;
- memset(dr, 0, tot_size);
+ /* No need to clear memory twice */
+ if (!(gfp & __GFP_ZERO))
+ memset(dr, 0, offsetof(struct devres, data));
+
INIT_LIST_HEAD(&dr->node.entry);
dr->node.release = release;
return dr;
@@ -104,25 +136,21 @@ static void add_dr(struct device *dev, struct devres_node *node)
list_add_tail(&node->entry, &dev->devres_head);
}
-#ifdef CONFIG_DEBUG_DEVRES
-void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
- const char *name)
+static void replace_dr(struct device *dev,
+ struct devres_node *old, struct devres_node *new)
{
- struct devres *dr;
-
- dr = alloc_dr(release, size, gfp);
- if (unlikely(!dr))
- return NULL;
- set_node_dbginfo(&dr->node, name, size);
- return dr->data;
+ devres_log(dev, old, "REPLACE");
+ BUG_ON(!list_empty(&new->entry));
+ list_replace(&old->entry, &new->entry);
}
-EXPORT_SYMBOL_GPL(__devres_alloc);
-#else
+
/**
- * devres_alloc - Allocate device resource data
+ * __devres_alloc_node - Allocate device resource data
* @release: Release function devres will be associated with
* @size: Allocation size
* @gfp: Allocation flags
+ * @nid: NUMA node
+ * @name: Name of the resource
*
* Allocate devres of @size bytes. The allocated area is zeroed, then
* associated with @release. The returned pointer can be passed to
@@ -131,17 +159,18 @@ EXPORT_SYMBOL_GPL(__devres_alloc);
* RETURNS:
* Pointer to allocated devres on success, NULL on failure.
*/
-void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
+void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
+ const char *name)
{
struct devres *dr;
- dr = alloc_dr(release, size, gfp);
+ dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
if (unlikely(!dr))
return NULL;
+ set_node_dbginfo(&dr->node, name, size);
return dr->data;
}
-EXPORT_SYMBOL_GPL(devres_alloc);
-#endif
+EXPORT_SYMBOL_GPL(__devres_alloc_node);
/**
* devres_for_each_res - Resource iterator
@@ -254,8 +283,8 @@ static struct devres *find_dr(struct device *dev, dr_release_t release,
* RETURNS:
* Pointer to found devres, NULL if not found.
*/
-void * devres_find(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data)
+void *devres_find(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data)
{
struct devres *dr;
unsigned long flags;
@@ -284,8 +313,8 @@ EXPORT_SYMBOL_GPL(devres_find);
* RETURNS:
* Pointer to found or added devres.
*/
-void * devres_get(struct device *dev, void *new_res,
- dr_match_t match, void *match_data)
+void *devres_get(struct device *dev, void *new_res,
+ dr_match_t match, void *match_data)
{
struct devres *new_dr = container_of(new_res, struct devres, data);
struct devres *dr;
@@ -296,10 +325,10 @@ void * devres_get(struct device *dev, void *new_res,
if (!dr) {
add_dr(dev, &new_dr->node);
dr = new_dr;
- new_dr = NULL;
+ new_res = NULL;
}
spin_unlock_irqrestore(&dev->devres_lock, flags);
- devres_free(new_dr);
+ devres_free(new_res);
return dr->data;
}
@@ -320,8 +349,8 @@ EXPORT_SYMBOL_GPL(devres_get);
* RETURNS:
* Pointer to removed devres on success, NULL if not found.
*/
-void * devres_remove(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data)
+void *devres_remove(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data)
{
struct devres *dr;
unsigned long flags;
@@ -407,20 +436,16 @@ static int remove_nodes(struct device *dev,
struct list_head *first, struct list_head *end,
struct list_head *todo)
{
+ struct devres_node *node, *n;
int cnt = 0, nr_groups = 0;
- struct list_head *cur;
/* First pass - move normal devres entries to @todo and clear
* devres_group colors.
*/
- cur = first;
- while (cur != end) {
- struct devres_node *node;
+ node = list_entry(first, struct devres_node, entry);
+ list_for_each_entry_safe_from(node, n, end, entry) {
struct devres_group *grp;
- node = list_entry(cur, struct devres_node, entry);
- cur = cur->next;
-
grp = node_to_group(node);
if (grp) {
/* clear color of group markers in the first pass */
@@ -440,18 +465,14 @@ static int remove_nodes(struct device *dev,
/* Second pass - Scan groups and color them. A group gets
* color value of two iff the group is wholly contained in
- * [cur, end). That is, for a closed group, both opening and
- * closing markers should be in the range, while just the
+ * [current node, end). That is, for a closed group, both opening
+ * and closing markers should be in the range, while just the
* opening marker is enough for an open group.
*/
- cur = first;
- while (cur != end) {
- struct devres_node *node;
+ node = list_entry(first, struct devres_node, entry);
+ list_for_each_entry_safe_from(node, n, end, entry) {
struct devres_group *grp;
- node = list_entry(cur, struct devres_node, entry);
- cur = cur->next;
-
grp = node_to_group(node);
BUG_ON(!grp || list_empty(&grp->node[0].entry));
@@ -461,7 +482,7 @@ static int remove_nodes(struct device *dev,
BUG_ON(grp->color <= 0 || grp->color > 2);
if (grp->color == 2) {
- /* No need to update cur or end. The removed
+ /* No need to update current node or end. The removed
* nodes are always before both.
*/
list_move_tail(&grp->node[0].entry, todo);
@@ -472,28 +493,18 @@ static int remove_nodes(struct device *dev,
return cnt;
}
-static int release_nodes(struct device *dev, struct list_head *first,
- struct list_head *end, unsigned long flags)
- __releases(&dev->devres_lock)
+static void release_nodes(struct device *dev, struct list_head *todo)
{
- LIST_HEAD(todo);
- int cnt;
struct devres *dr, *tmp;
- cnt = remove_nodes(dev, first, end, &todo);
-
- spin_unlock_irqrestore(&dev->devres_lock, flags);
-
/* Release. Note that both devres and devres_group are
* handled as devres in the following loop. This is safe.
*/
- list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) {
+ list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry) {
devres_log(dev, &dr->node, "REL");
dr->node.release(dev, dr->data);
kfree(dr);
}
-
- return cnt;
}
/**
@@ -506,13 +517,23 @@ static int release_nodes(struct device *dev, struct list_head *first,
int devres_release_all(struct device *dev)
{
unsigned long flags;
+ LIST_HEAD(todo);
+ int cnt;
/* Looks like an uninitialized device structure */
if (WARN_ON(dev->devres_head.next == NULL))
return -ENODEV;
+
+ /* Nothing to release if list is empty */
+ if (list_empty(&dev->devres_head))
+ return 0;
+
spin_lock_irqsave(&dev->devres_lock, flags);
- return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
- flags);
+ cnt = remove_nodes(dev, dev->devres_head.next, &dev->devres_head, &todo);
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+
+ release_nodes(dev, &todo);
+ return cnt;
}
/**
@@ -528,7 +549,7 @@ int devres_release_all(struct device *dev)
* RETURNS:
* ID of the new group, NULL on failure.
*/
-void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
+void *devres_open_group(struct device *dev, void *id, gfp_t gfp)
{
struct devres_group *grp;
unsigned long flags;
@@ -546,6 +567,7 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
grp->id = grp;
if (id)
grp->id = id;
+ grp->color = 0;
spin_lock_irqsave(&dev->devres_lock, flags);
add_dr(dev, &grp->node[0]);
@@ -554,8 +576,11 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
}
EXPORT_SYMBOL_GPL(devres_open_group);
-/* Find devres group with ID @id. If @id is NULL, look for the latest. */
-static struct devres_group * find_group(struct device *dev, void *id)
+/*
+ * Find devres group with ID @id. If @id is NULL, look for the latest open
+ * group.
+ */
+static struct devres_group *find_group(struct device *dev, void *id)
{
struct devres_node *node;
@@ -648,6 +673,7 @@ int devres_release_group(struct device *dev, void *id)
{
struct devres_group *grp;
unsigned long flags;
+ LIST_HEAD(todo);
int cnt = 0;
spin_lock_irqsave(&dev->devres_lock, flags);
@@ -660,7 +686,17 @@ int devres_release_group(struct device *dev, void *id)
if (!list_empty(&grp->node[1].entry))
end = grp->node[1].entry.next;
- cnt = release_nodes(dev, first, end, flags);
+ cnt = remove_nodes(dev, first, end, &todo);
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+
+ release_nodes(dev, &todo);
+ } else if (list_empty(&dev->devres_head)) {
+ /*
+ * dev is probably dying via devres_release_all(): groups
+ * have already been removed and are on the process of
+ * being released - don't touch and don't warn.
+ */
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
} else {
WARN_ON(1);
spin_unlock_irqrestore(&dev->devres_lock, flags);
@@ -672,7 +708,7 @@ EXPORT_SYMBOL_GPL(devres_release_group);
/*
* Custom devres actions allow inserting a simple function call
- * into the teadown sequence.
+ * into the teardown sequence.
*/
struct action_devres {
@@ -697,20 +733,21 @@ static void devm_action_release(struct device *dev, void *res)
}
/**
- * devm_add_action() - add a custom action to list of managed resources
+ * __devm_add_action() - add a custom action to list of managed resources
* @dev: Device that owns the action
* @action: Function that should be called
* @data: Pointer to data passed to @action implementation
+ * @name: Name of the resource (for debugging purposes)
*
* This adds a custom action to the list of managed resources so that
* it gets executed as part of standard resource unwinding.
*/
-int devm_add_action(struct device *dev, void (*action)(void *), void *data)
+int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name)
{
struct action_devres *devres;
- devres = devres_alloc(devm_action_release,
- sizeof(struct action_devres), GFP_KERNEL);
+ devres = __devres_alloc_node(devm_action_release, sizeof(struct action_devres),
+ GFP_KERNEL, NUMA_NO_NODE, name);
if (!devres)
return -ENOMEM;
@@ -720,83 +757,503 @@ int devm_add_action(struct device *dev, void (*action)(void *), void *data)
devres_add(dev, devres);
return 0;
}
-EXPORT_SYMBOL_GPL(devm_add_action);
+EXPORT_SYMBOL_GPL(__devm_add_action);
+
+bool devm_is_action_added(struct device *dev, void (*action)(void *), void *data)
+{
+ struct action_devres devres = {
+ .data = data,
+ .action = action,
+ };
+
+ return devres_find(dev, devm_action_release, devm_action_match, &devres);
+}
+EXPORT_SYMBOL_GPL(devm_is_action_added);
/**
- * devm_remove_action() - removes previously added custom action
+ * devm_remove_action_nowarn() - removes previously added custom action
* @dev: Device that owns the action
* @action: Function implementing the action
* @data: Pointer to data passed to @action implementation
*
* Removes instance of @action previously added by devm_add_action().
* Both action and data should match one of the existing entries.
+ *
+ * In contrast to devm_remove_action(), this function does not WARN() if no
+ * entry could have been found.
+ *
+ * This should only be used if the action is contained in an object with
+ * independent lifetime management, e.g. the Devres rust abstraction.
+ *
+ * Causing the warning from regular driver code most likely indicates an abuse
+ * of the devres API.
+ *
+ * Returns: 0 on success, -ENOENT if no entry could have been found.
+ */
+int devm_remove_action_nowarn(struct device *dev,
+ void (*action)(void *),
+ void *data)
+{
+ struct action_devres devres = {
+ .data = data,
+ .action = action,
+ };
+
+ return devres_destroy(dev, devm_action_release, devm_action_match,
+ &devres);
+}
+EXPORT_SYMBOL_GPL(devm_remove_action_nowarn);
+
+/**
+ * devm_release_action() - release previously added custom action
+ * @dev: Device that owns the action
+ * @action: Function implementing the action
+ * @data: Pointer to data passed to @action implementation
+ *
+ * Releases and removes instance of @action previously added by
+ * devm_add_action(). Both action and data should match one of the
+ * existing entries.
*/
-void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
+void devm_release_action(struct device *dev, void (*action)(void *), void *data)
{
struct action_devres devres = {
.data = data,
.action = action,
};
- WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
+ WARN_ON(devres_release(dev, devm_action_release, devm_action_match,
&devres));
}
-EXPORT_SYMBOL_GPL(devm_remove_action);
+EXPORT_SYMBOL_GPL(devm_release_action);
/*
- * Managed kzalloc/kfree
+ * Managed kmalloc/kfree
*/
-static void devm_kzalloc_release(struct device *dev, void *res)
+static void devm_kmalloc_release(struct device *dev, void *res)
{
/* noop */
}
-static int devm_kzalloc_match(struct device *dev, void *res, void *data)
+static int devm_kmalloc_match(struct device *dev, void *res, void *data)
{
return res == data;
}
/**
- * devm_kzalloc - Resource-managed kzalloc
+ * devm_kmalloc - Resource-managed kmalloc
* @dev: Device to allocate memory for
* @size: Allocation size
* @gfp: Allocation gfp flags
*
- * Managed kzalloc. Memory allocated with this function is
+ * Managed kmalloc. Memory allocated with this function is
* automatically freed on driver detach. Like all other devres
* resources, guaranteed alignment is unsigned long long.
*
* RETURNS:
* Pointer to allocated memory on success, NULL on failure.
*/
-void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
+void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
{
struct devres *dr;
+ if (unlikely(!size))
+ return ZERO_SIZE_PTR;
+
/* use raw alloc_dr for kmalloc caller tracing */
- dr = alloc_dr(devm_kzalloc_release, size, gfp);
+ dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
if (unlikely(!dr))
return NULL;
+ /*
+ * This is named devm_kzalloc_release for historical reasons
+ * The initial implementation did not support kmalloc, only kzalloc
+ */
set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
devres_add(dev, dr->data);
return dr->data;
}
-EXPORT_SYMBOL_GPL(devm_kzalloc);
+EXPORT_SYMBOL_GPL(devm_kmalloc);
+
+/**
+ * devm_krealloc - Resource-managed krealloc()
+ * @dev: Device to re-allocate memory for
+ * @ptr: Pointer to the memory chunk to re-allocate
+ * @new_size: New allocation size
+ * @gfp: Allocation gfp flags
+ *
+ * Managed krealloc(). Resizes the memory chunk allocated with devm_kmalloc().
+ * Behaves similarly to regular krealloc(): if @ptr is NULL or ZERO_SIZE_PTR,
+ * it's the equivalent of devm_kmalloc(). If new_size is zero, it frees the
+ * previously allocated memory and returns ZERO_SIZE_PTR. This function doesn't
+ * change the order in which the release callback for the re-alloc'ed devres
+ * will be called (except when falling back to devm_kmalloc() or when freeing
+ * resources when new_size is zero). The contents of the memory are preserved
+ * up to the lesser of new and old sizes.
+ */
+void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
+{
+ size_t total_new_size, total_old_size;
+ struct devres *old_dr, *new_dr;
+ unsigned long flags;
+
+ if (unlikely(!new_size)) {
+ devm_kfree(dev, ptr);
+ return ZERO_SIZE_PTR;
+ }
+
+ if (unlikely(ZERO_OR_NULL_PTR(ptr)))
+ return devm_kmalloc(dev, new_size, gfp);
+
+ if (WARN_ON(is_kernel_rodata((unsigned long)ptr)))
+ /*
+ * We cannot reliably realloc a const string returned by
+ * devm_kstrdup_const().
+ */
+ return NULL;
+
+ if (!check_dr_size(new_size, &total_new_size))
+ return NULL;
+
+ total_old_size = ksize(container_of(ptr, struct devres, data));
+ if (total_old_size == 0) {
+ WARN(1, "Pointer doesn't point to dynamically allocated memory.");
+ return NULL;
+ }
+
+ /*
+ * If new size is smaller or equal to the actual number of bytes
+ * allocated previously - just return the same pointer.
+ */
+ if (total_new_size <= total_old_size)
+ return ptr;
+
+ /*
+ * Otherwise: allocate new, larger chunk. We need to allocate before
+ * taking the lock as most probably the caller uses GFP_KERNEL.
+ * alloc_dr() will call check_dr_size() to reserve extra memory
+ * for struct devres automatically, so size @new_size user request
+ * is delivered to it directly as devm_kmalloc() does.
+ */
+ new_dr = alloc_dr(devm_kmalloc_release,
+ new_size, gfp, dev_to_node(dev));
+ if (!new_dr)
+ return NULL;
+
+ /*
+ * The spinlock protects the linked list against concurrent
+ * modifications but not the resource itself.
+ */
+ spin_lock_irqsave(&dev->devres_lock, flags);
+
+ old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr);
+ if (!old_dr) {
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+ kfree(new_dr);
+ WARN(1, "Memory chunk not managed or managed by a different device.");
+ return NULL;
+ }
+
+ replace_dr(dev, &old_dr->node, &new_dr->node);
+
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+
+ /*
+ * We can copy the memory contents after releasing the lock as we're
+ * no longer modifying the list links.
+ */
+ memcpy(new_dr->data, old_dr->data,
+ total_old_size - offsetof(struct devres, data));
+ /*
+ * Same for releasing the old devres - it's now been removed from the
+ * list. This is also the reason why we must not use devm_kfree() - the
+ * links are no longer valid.
+ */
+ kfree(old_dr);
+
+ return new_dr->data;
+}
+EXPORT_SYMBOL_GPL(devm_krealloc);
+
+/**
+ * devm_kstrdup - Allocate resource managed space and
+ * copy an existing string into that.
+ * @dev: Device to allocate memory for
+ * @s: the string to duplicate
+ * @gfp: the GFP mask used in the devm_kmalloc() call when
+ * allocating memory
+ * RETURNS:
+ * Pointer to allocated string on success, NULL on failure.
+ */
+char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
+{
+ if (!s)
+ return NULL;
+
+ return devm_kmemdup(dev, s, strlen(s) + 1, gfp);
+}
+EXPORT_SYMBOL_GPL(devm_kstrdup);
+
+/**
+ * devm_kstrdup_const - resource managed conditional string duplication
+ * @dev: device for which to duplicate the string
+ * @s: the string to duplicate
+ * @gfp: the GFP mask used in the kmalloc() call when allocating memory
+ *
+ * Strings allocated by devm_kstrdup_const will be automatically freed when
+ * the associated device is detached.
+ *
+ * RETURNS:
+ * Source string if it is in .rodata section otherwise it falls back to
+ * devm_kstrdup.
+ */
+const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp)
+{
+ if (is_kernel_rodata((unsigned long)s))
+ return s;
+
+ return devm_kstrdup(dev, s, gfp);
+}
+EXPORT_SYMBOL_GPL(devm_kstrdup_const);
+
+/**
+ * devm_kvasprintf - Allocate resource managed space and format a string
+ * into that.
+ * @dev: Device to allocate memory for
+ * @gfp: the GFP mask used in the devm_kmalloc() call when
+ * allocating memory
+ * @fmt: The printf()-style format string
+ * @ap: Arguments for the format string
+ * RETURNS:
+ * Pointer to allocated string on success, NULL on failure.
+ */
+char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
+ va_list ap)
+{
+ unsigned int len;
+ char *p;
+ va_list aq;
+
+ va_copy(aq, ap);
+ len = vsnprintf(NULL, 0, fmt, aq);
+ va_end(aq);
+
+ p = devm_kmalloc(dev, len+1, gfp);
+ if (!p)
+ return NULL;
+
+ vsnprintf(p, len+1, fmt, ap);
+
+ return p;
+}
+EXPORT_SYMBOL(devm_kvasprintf);
+
+/**
+ * devm_kasprintf - Allocate resource managed space and format a string
+ * into that.
+ * @dev: Device to allocate memory for
+ * @gfp: the GFP mask used in the devm_kmalloc() call when
+ * allocating memory
+ * @fmt: The printf()-style format string
+ * @...: Arguments for the format string
+ * RETURNS:
+ * Pointer to allocated string on success, NULL on failure.
+ */
+char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
+{
+ va_list ap;
+ char *p;
+
+ va_start(ap, fmt);
+ p = devm_kvasprintf(dev, gfp, fmt, ap);
+ va_end(ap);
+
+ return p;
+}
+EXPORT_SYMBOL_GPL(devm_kasprintf);
/**
* devm_kfree - Resource-managed kfree
* @dev: Device this memory belongs to
* @p: Memory to free
*
- * Free memory allocated with devm_kzalloc().
+ * Free memory allocated with devm_kmalloc().
*/
-void devm_kfree(struct device *dev, void *p)
+void devm_kfree(struct device *dev, const void *p)
{
int rc;
- rc = devres_destroy(dev, devm_kzalloc_release, devm_kzalloc_match, p);
+ /*
+ * Special cases: pointer to a string in .rodata returned by
+ * devm_kstrdup_const() or NULL/ZERO ptr.
+ */
+ if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p)))
+ return;
+
+ rc = devres_destroy(dev, devm_kmalloc_release,
+ devm_kmalloc_match, (void *)p);
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_kfree);
+
+/**
+ * devm_kmemdup - Resource-managed kmemdup
+ * @dev: Device this memory belongs to
+ * @src: Memory region to duplicate
+ * @len: Memory region length
+ * @gfp: GFP mask to use
+ *
+ * Duplicate region of a memory using resource managed kmalloc
+ */
+void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
+{
+ void *p;
+
+ p = devm_kmalloc(dev, len, gfp);
+ if (p)
+ memcpy(p, src, len);
+
+ return p;
+}
+EXPORT_SYMBOL_GPL(devm_kmemdup);
+
+/**
+ * devm_kmemdup_const - conditionally duplicate and manage a region of memory
+ *
+ * @dev: Device this memory belongs to
+ * @src: memory region to duplicate
+ * @len: memory region length,
+ * @gfp: GFP mask to use
+ *
+ * Return: source address if it is in .rodata or the return value of kmemdup()
+ * to which the function falls back otherwise.
+ */
+const void *
+devm_kmemdup_const(struct device *dev, const void *src, size_t len, gfp_t gfp)
+{
+ if (is_kernel_rodata((unsigned long)src))
+ return src;
+
+ return devm_kmemdup(dev, src, len, gfp);
+}
+EXPORT_SYMBOL_GPL(devm_kmemdup_const);
+
+struct pages_devres {
+ unsigned long addr;
+ unsigned int order;
+};
+
+static int devm_pages_match(struct device *dev, void *res, void *p)
+{
+ struct pages_devres *devres = res;
+ struct pages_devres *target = p;
+
+ return devres->addr == target->addr;
+}
+
+static void devm_pages_release(struct device *dev, void *res)
+{
+ struct pages_devres *devres = res;
+
+ free_pages(devres->addr, devres->order);
+}
+
+/**
+ * devm_get_free_pages - Resource-managed __get_free_pages
+ * @dev: Device to allocate memory for
+ * @gfp_mask: Allocation gfp flags
+ * @order: Allocation size is (1 << order) pages
+ *
+ * Managed get_free_pages. Memory allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * RETURNS:
+ * Address of allocated memory on success, 0 on failure.
+ */
+
+unsigned long devm_get_free_pages(struct device *dev,
+ gfp_t gfp_mask, unsigned int order)
+{
+ struct pages_devres *devres;
+ unsigned long addr;
+
+ addr = __get_free_pages(gfp_mask, order);
+
+ if (unlikely(!addr))
+ return 0;
+
+ devres = devres_alloc(devm_pages_release,
+ sizeof(struct pages_devres), GFP_KERNEL);
+ if (unlikely(!devres)) {
+ free_pages(addr, order);
+ return 0;
+ }
+
+ devres->addr = addr;
+ devres->order = order;
+
+ devres_add(dev, devres);
+ return addr;
+}
+EXPORT_SYMBOL_GPL(devm_get_free_pages);
+
+/**
+ * devm_free_pages - Resource-managed free_pages
+ * @dev: Device this memory belongs to
+ * @addr: Memory to free
+ *
+ * Free memory allocated with devm_get_free_pages(). Unlike free_pages,
+ * there is no need to supply the @order.
+ */
+void devm_free_pages(struct device *dev, unsigned long addr)
+{
+ struct pages_devres devres = { .addr = addr };
+
+ WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
+ &devres));
+}
+EXPORT_SYMBOL_GPL(devm_free_pages);
+
+static void devm_percpu_release(struct device *dev, void *pdata)
+{
+ void __percpu *p;
+
+ p = *(void __percpu **)pdata;
+ free_percpu(p);
+}
+
+/**
+ * __devm_alloc_percpu - Resource-managed alloc_percpu
+ * @dev: Device to allocate per-cpu memory for
+ * @size: Size of per-cpu memory to allocate
+ * @align: Alignment of per-cpu memory to allocate
+ *
+ * Managed alloc_percpu. Per-cpu memory allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * RETURNS:
+ * Pointer to allocated memory on success, NULL on failure.
+ */
+void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
+ size_t align)
+{
+ void *p;
+ void __percpu *pcpu;
+
+ pcpu = __alloc_percpu(size, align);
+ if (!pcpu)
+ return NULL;
+
+ p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
+ if (!p) {
+ free_percpu(pcpu);
+ return NULL;
+ }
+
+ *(void __percpu **)p = pcpu;
+
+ devres_add(dev, p);
+
+ return pcpu;
+}
+EXPORT_SYMBOL_GPL(__devm_alloc_percpu);