summaryrefslogtreecommitdiff
path: root/include/linux/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h81
1 files changed, 51 insertions, 30 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index af5aa65c7c18..50697a1d6621 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -21,13 +21,20 @@
* Flags to pass to kmem_cache_create().
* The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
*/
-#define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */
-#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
-#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
-#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
-#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
-#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
-#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
+/* DEBUG: Perform (expensive) checks on alloc/free */
+#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
+/* DEBUG: Red zone objs in a cache */
+#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
+/* DEBUG: Poison objects */
+#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
+/* Align objs on cache lines */
+#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
+/* Use GFP_DMA memory */
+#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
+/* DEBUG: Store the last owner for bug hunting */
+#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
+/* Panic if kmem_cache_create() fails */
+#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
/*
* SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
*
@@ -65,44 +72,45 @@
*
* Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
*/
-#define SLAB_TYPESAFE_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
-#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
-#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
+/* Defer freeing slabs to RCU */
+#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
+/* Spread some memory over cpuset */
+#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
+/* Trace allocations and frees */
+#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
/* Flag to prevent checks on free */
#ifdef CONFIG_DEBUG_OBJECTS
-# define SLAB_DEBUG_OBJECTS 0x00400000UL
+# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
#else
-# define SLAB_DEBUG_OBJECTS 0x00000000UL
+# define SLAB_DEBUG_OBJECTS 0
#endif
-#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
+/* Avoid kmemleak tracing */
+#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
-/* Don't track use of uninitialized memory */
-#ifdef CONFIG_KMEMCHECK
-# define SLAB_NOTRACK 0x01000000UL
-#else
-# define SLAB_NOTRACK 0x00000000UL
-#endif
+/* Fault injection mark */
#ifdef CONFIG_FAILSLAB
-# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
+# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
#else
-# define SLAB_FAILSLAB 0x00000000UL
+# define SLAB_FAILSLAB 0
#endif
+/* Account to memcg */
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
-# define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */
+# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
#else
-# define SLAB_ACCOUNT 0x00000000UL
+# define SLAB_ACCOUNT 0
#endif
#ifdef CONFIG_KASAN
-#define SLAB_KASAN 0x08000000UL
+#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
#else
-#define SLAB_KASAN 0x00000000UL
+#define SLAB_KASAN 0
#endif
/* The following flags affect the page allocator grouping pages by mobility */
-#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
+/* Objects are reclaimable */
+#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
@@ -128,7 +136,7 @@ void __init kmem_cache_init(void);
bool slab_is_available(void);
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
- unsigned long,
+ slab_flags_t,
void (*)(void *));
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
@@ -459,9 +467,6 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* Also it is possible to set different flags by OR'ing
* in one or more of the following additional @flags:
*
- * %__GFP_COLD - Request cache-cold pages instead of
- * trying to return cache-warm pages.
- *
* %__GFP_HIGH - This allocation has high priority and may use emergency pools.
*
* %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
@@ -636,6 +641,22 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
+static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
+ int node)
+{
+ if (size != 0 && n > SIZE_MAX / size)
+ return NULL;
+ if (__builtin_constant_p(n) && __builtin_constant_p(size))
+ return kmalloc_node(n * size, flags, node);
+ return __kmalloc_node(n * size, flags, node);
+}
+
+static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
+{
+ return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
+}
+
+
#ifdef CONFIG_NUMA
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
#define kmalloc_node_track_caller(size, flags, node) \