summaryrefslogtreecommitdiff
path: root/mm/slab_common.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2021-02-12 12:50:09 +0100
committerIngo Molnar <mingo@kernel.org>2021-02-12 12:56:55 +0100
commit85e853c5ec8486117182baab10c98b321daa6d47 (patch)
tree404551f51bc88ba9f4cc64431e2633493a971842 /mm/slab_common.c
parentdcc0b49040c70ad827a7f3d58a21b01fdb14e749 (diff)
parent0d2460ba61841e5c2e64e77f7a84d3fc69cfe899 (diff)
Merge branch 'for-mingo-rcu' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull RCU updates from Paul E. McKenney: - Documentation updates. - Miscellaneous fixes. - kfree_rcu() updates: Addition of mem_dump_obj() to provide allocator return addresses to more easily locate bugs. This has a couple of RCU-related commits, but is mostly MM. Was pulled in with akpm's agreement. - Per-callback-batch tracking of numbers of callbacks, which enables better debugging information and smarter reactions to large numbers of callbacks. - The first round of changes to allow CPUs to be runtime switched from and to callback-offloaded state. - CONFIG_PREEMPT_RT-related changes. - RCU CPU stall warning updates. - Addition of polling grace-period APIs for SRCU. - Torture-test and torture-test scripting updates, including a "torture everything" script that runs rcutorture, locktorture, scftorture, rcuscale, and refscale. Plus does an allmodconfig build. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c75
1 files changed, 75 insertions, 0 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index e981c80d216c..adbace4256ef 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -537,6 +537,81 @@ bool slab_is_available(void)
return slab_state >= UP;
}
+/**
+ * kmem_valid_obj - does the pointer reference a valid slab object?
+ * @object: pointer to query.
+ *
+ * Return: %true if the pointer is to a not-yet-freed object from
+ * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
+ * is to an already-freed object, and %false otherwise.
+ */
+bool kmem_valid_obj(void *object)
+{
+ struct page *page;
+
+ /* Some arches consider ZERO_SIZE_PTR to be a valid address. */
+ if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
+ return false;
+ page = virt_to_head_page(object);
+ return PageSlab(page);
+}
+
+/**
+ * kmem_dump_obj - Print available slab provenance information
+ * @object: slab object for which to find provenance information.
+ *
+ * This function uses pr_cont(), so that the caller is expected to have
+ * printed out whatever preamble is appropriate. The provenance information
+ * depends on the type of object and on how much debugging is enabled.
+ * For a slab-cache object, the fact that it is a slab object is printed,
+ * and, if available, the slab name, return address, and stack trace from
+ * the allocation of that object.
+ *
+ * This function will splat if passed a pointer to a non-slab object.
+ * If you are not sure what type of object you have, you should instead
+ * use mem_dump_obj().
+ */
+void kmem_dump_obj(void *object)
+{
+ char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
+ int i;
+ struct page *page;
+ unsigned long ptroffset;
+ struct kmem_obj_info kp = { };
+
+ if (WARN_ON_ONCE(!virt_addr_valid(object)))
+ return;
+ page = virt_to_head_page(object);
+ if (WARN_ON_ONCE(!PageSlab(page))) {
+ pr_cont(" non-slab memory.\n");
+ return;
+ }
+ kmem_obj_info(&kp, object, page);
+ if (kp.kp_slab_cache)
+ pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
+ else
+ pr_cont(" slab%s", cp);
+ if (kp.kp_objp)
+ pr_cont(" start %px", kp.kp_objp);
+ if (kp.kp_data_offset)
+ pr_cont(" data offset %lu", kp.kp_data_offset);
+ if (kp.kp_objp) {
+ ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset;
+ pr_cont(" pointer offset %lu", ptroffset);
+ }
+ if (kp.kp_slab_cache && kp.kp_slab_cache->usersize)
+ pr_cont(" size %u", kp.kp_slab_cache->usersize);
+ if (kp.kp_ret)
+ pr_cont(" allocated at %pS\n", kp.kp_ret);
+ else
+ pr_cont("\n");
+ for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) {
+ if (!kp.kp_stack[i])
+ break;
+ pr_info(" %pS\n", kp.kp_stack[i]);
+ }
+}
+
#ifndef CONFIG_SLOB
/* Create a cache during boot when no slab services are available yet */
void __init create_boot_cache(struct kmem_cache *s, const char *name,