summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.h2
-rw-r--r--mm/slab_common.c138
-rw-r--r--mm/slub.c20
3 files changed, 112 insertions, 48 deletions
diff --git a/mm/slab.h b/mm/slab.h
index dcdb56b8e7f5..a6051385186e 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -261,6 +261,8 @@ struct kmem_cache {
unsigned int object_size; /* Object size without metadata */
struct reciprocal_value reciprocal_size;
unsigned int offset; /* Free pointer offset */
+ /* Specific free pointer requested (if not UINT_MAX) */
+ unsigned int rcu_freeptr_offset;
#ifdef CONFIG_SLUB_CPU_PARTIAL
/* Number of per cpu partial objects to keep around */
unsigned int cpu_partial;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 40b582a014b8..95db3702f8d6 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -202,10 +202,10 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
}
static struct kmem_cache *create_cache(const char *name,
- unsigned int object_size, unsigned int align,
- slab_flags_t flags, unsigned int useroffset,
- unsigned int usersize, void (*ctor)(void *),
- struct kmem_cache *root_cache)
+ unsigned int object_size, unsigned int freeptr_offset,
+ unsigned int align, slab_flags_t flags,
+ unsigned int useroffset, unsigned int usersize,
+ void (*ctor)(void *))
{
struct kmem_cache *s;
int err;
@@ -213,6 +213,13 @@ static struct kmem_cache *create_cache(const char *name,
if (WARN_ON(useroffset + usersize > object_size))
useroffset = usersize = 0;
+ /* If a custom freelist pointer is requested make sure it's sane. */
+ err = -EINVAL;
+ if (freeptr_offset != UINT_MAX &&
+ (freeptr_offset >= object_size || !(flags & SLAB_TYPESAFE_BY_RCU) ||
+ !IS_ALIGNED(freeptr_offset, sizeof(freeptr_t))))
+ goto out;
+
err = -ENOMEM;
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
if (!s)
@@ -220,13 +227,13 @@ static struct kmem_cache *create_cache(const char *name,
s->name = name;
s->size = s->object_size = object_size;
+ s->rcu_freeptr_offset = freeptr_offset;
s->align = align;
s->ctor = ctor;
#ifdef CONFIG_HARDENED_USERCOPY
s->useroffset = useroffset;
s->usersize = usersize;
#endif
-
err = __kmem_cache_create(s, flags);
if (err)
goto out_free_cache;
@@ -241,38 +248,10 @@ out:
return ERR_PTR(err);
}
-/**
- * kmem_cache_create_usercopy - Create a cache with a region suitable
- * for copying to userspace
- * @name: A string which is used in /proc/slabinfo to identify this cache.
- * @size: The size of objects to be created in this cache.
- * @align: The required alignment for the objects.
- * @flags: SLAB flags
- * @useroffset: Usercopy region offset
- * @usersize: Usercopy region size
- * @ctor: A constructor for the objects.
- *
- * Cannot be called within a interrupt, but can be interrupted.
- * The @ctor is run when new pages are allocated by the cache.
- *
- * The flags are
- *
- * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
- * to catch references to uninitialised memory.
- *
- * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
- * for buffer overruns.
- *
- * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
- * cacheline. This can be beneficial if you're counting cycles as closely
- * as davem.
- *
- * Return: a pointer to the cache on success, NULL on failure.
- */
-struct kmem_cache *
-kmem_cache_create_usercopy(const char *name,
- unsigned int size, unsigned int align,
- slab_flags_t flags,
+static struct kmem_cache *
+do_kmem_cache_create_usercopy(const char *name,
+ unsigned int size, unsigned int freeptr_offset,
+ unsigned int align, slab_flags_t flags,
unsigned int useroffset, unsigned int usersize,
void (*ctor)(void *))
{
@@ -332,9 +311,9 @@ kmem_cache_create_usercopy(const char *name,
goto out_unlock;
}
- s = create_cache(cache_name, size,
+ s = create_cache(cache_name, size, freeptr_offset,
calculate_alignment(flags, align, size),
- flags, useroffset, usersize, ctor, NULL);
+ flags, useroffset, usersize, ctor);
if (IS_ERR(s)) {
err = PTR_ERR(s);
kfree_const(cache_name);
@@ -356,6 +335,44 @@ out_unlock:
}
return s;
}
+
+/**
+ * kmem_cache_create_usercopy - Create a cache with a region suitable
+ * for copying to userspace
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
+ * @size: The size of objects to be created in this cache.
+ * @align: The required alignment for the objects.
+ * @flags: SLAB flags
+ * @useroffset: Usercopy region offset
+ * @usersize: Usercopy region size
+ * @ctor: A constructor for the objects.
+ *
+ * Cannot be called within a interrupt, but can be interrupted.
+ * The @ctor is run when new pages are allocated by the cache.
+ *
+ * The flags are
+ *
+ * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
+ * to catch references to uninitialised memory.
+ *
+ * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
+ * for buffer overruns.
+ *
+ * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
+ * cacheline. This can be beneficial if you're counting cycles as closely
+ * as davem.
+ *
+ * Return: a pointer to the cache on success, NULL on failure.
+ */
+struct kmem_cache *
+kmem_cache_create_usercopy(const char *name, unsigned int size,
+ unsigned int align, slab_flags_t flags,
+ unsigned int useroffset, unsigned int usersize,
+ void (*ctor)(void *))
+{
+ return do_kmem_cache_create_usercopy(name, size, UINT_MAX, align, flags,
+ useroffset, usersize, ctor);
+}
EXPORT_SYMBOL(kmem_cache_create_usercopy);
/**
@@ -387,11 +404,50 @@ struct kmem_cache *
kmem_cache_create(const char *name, unsigned int size, unsigned int align,
slab_flags_t flags, void (*ctor)(void *))
{
- return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
- ctor);
+ return do_kmem_cache_create_usercopy(name, size, UINT_MAX, align, flags,
+ 0, 0, ctor);
}
EXPORT_SYMBOL(kmem_cache_create);
+/**
+ * kmem_cache_create_rcu - Create a SLAB_TYPESAFE_BY_RCU cache.
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
+ * @size: The size of objects to be created in this cache.
+ * @freeptr_offset: The offset into the memory to the free pointer
+ * @flags: SLAB flags
+ *
+ * Cannot be called within an interrupt, but can be interrupted.
+ *
+ * See kmem_cache_create() for an explanation of possible @flags.
+ *
+ * By default SLAB_TYPESAFE_BY_RCU caches place the free pointer outside
+ * of the object. This might cause the object to grow in size. Callers
+ * that have a reason to avoid this can specify a custom free pointer
+ * offset in their struct where the free pointer will be placed.
+ *
+ * Note that placing the free pointer inside the object requires the
+ * caller to ensure that no fields are invalidated that are required to
+ * guard against object recycling (See SLAB_TYPESAFE_BY_RCU for
+ * details.).
+ *
+ * Using zero as a value for @freeptr_offset is valid. To request no
+ * offset UINT_MAX must be specified.
+ *
+ * Note that @ctor isn't supported with custom free pointers as a @ctor
+ * requires an external free pointer.
+ *
+ * Return: a pointer to the cache on success, NULL on failure.
+ */
+struct kmem_cache *kmem_cache_create_rcu(const char *name, unsigned int size,
+ unsigned int freeptr_offset,
+ slab_flags_t flags)
+{
+ return do_kmem_cache_create_usercopy(name, size, freeptr_offset, 0,
+ flags | SLAB_TYPESAFE_BY_RCU, 0, 0,
+ NULL);
+}
+EXPORT_SYMBOL(kmem_cache_create_rcu);
+
static struct kmem_cache *kmem_buckets_cache __ro_after_init;
/**
diff --git a/mm/slub.c b/mm/slub.c
index a77f354f8325..be36f1df809c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -466,12 +466,6 @@ static struct workqueue_struct *flushwq;
*******************************************************************/
/*
- * freeptr_t represents a SLUB freelist pointer, which might be encoded
- * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
- */
-typedef struct { unsigned long v; } freeptr_t;
-
-/*
* Returns freelist pointer (ptr). With hardening, this is obfuscated
* with an XOR of the address where the pointer is held and a per-cache
* random number.
@@ -3925,6 +3919,9 @@ static void *__slab_alloc_node(struct kmem_cache *s,
/*
* If the object has been wiped upon free, make sure it's fully initialized by
* zeroing out freelist pointer.
+ *
+ * Note that we also wipe custom freelist pointers specified via
+ * s->rcu_freeptr_offset.
*/
static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
void *obj)
@@ -5148,6 +5145,12 @@ static void set_cpu_partial(struct kmem_cache *s)
#endif
}
+/* Was a valid freeptr offset requested? */
+static inline bool has_freeptr_offset(const struct kmem_cache *s)
+{
+ return s->rcu_freeptr_offset != UINT_MAX;
+}
+
/*
* calculate_sizes() determines the order and the distribution of data within
* a slab object.
@@ -5193,7 +5196,8 @@ static int calculate_sizes(struct kmem_cache *s)
*/
s->inuse = size;
- if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || s->ctor ||
+ if (((flags & SLAB_TYPESAFE_BY_RCU) && !has_freeptr_offset(s)) ||
+ (flags & SLAB_POISON) || s->ctor ||
((flags & SLAB_RED_ZONE) &&
(s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
/*
@@ -5214,6 +5218,8 @@ static int calculate_sizes(struct kmem_cache *s)
*/
s->offset = size;
size += sizeof(void *);
+ } else if ((flags & SLAB_TYPESAFE_BY_RCU) && has_freeptr_offset(s)) {
+ s->offset = s->rcu_freeptr_offset;
} else {
/*
* Store freelist pointer near middle of object to keep