summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c86
1 files changed, 49 insertions, 37 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 2acce22590f8..f21b2f0c6f5a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2530,7 +2530,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
memset((char *)kasan_reset_tag(x) + inuse, 0,
s->size - inuse - rsize);
/*
- * Restore orig_size, otherwize kmalloc redzone overwritten
+ * Restore orig_size, otherwise kmalloc redzone overwritten
* would be reported
*/
set_orig_size(s, x, orig_size);
@@ -4122,42 +4122,47 @@ static void flush_rcu_sheaf(struct work_struct *w)
/* needed for kvfree_rcu_barrier() */
-void flush_all_rcu_sheaves(void)
+void flush_rcu_sheaves_on_cache(struct kmem_cache *s)
{
struct slub_flush_work *sfw;
- struct kmem_cache *s;
unsigned int cpu;
- cpus_read_lock();
- mutex_lock(&slab_mutex);
+ mutex_lock(&flush_lock);
- list_for_each_entry(s, &slab_caches, list) {
- if (!s->cpu_sheaves)
- continue;
+ for_each_online_cpu(cpu) {
+ sfw = &per_cpu(slub_flush, cpu);
- mutex_lock(&flush_lock);
+ /*
+ * we don't check if rcu_free sheaf exists - racing
+ * __kfree_rcu_sheaf() might have just removed it.
+ * by executing flush_rcu_sheaf() on the cpu we make
+ * sure the __kfree_rcu_sheaf() finished its call_rcu()
+ */
- for_each_online_cpu(cpu) {
- sfw = &per_cpu(slub_flush, cpu);
+ INIT_WORK(&sfw->work, flush_rcu_sheaf);
+ sfw->s = s;
+ queue_work_on(cpu, flushwq, &sfw->work);
+ }
- /*
- * we don't check if rcu_free sheaf exists - racing
- * __kfree_rcu_sheaf() might have just removed it.
- * by executing flush_rcu_sheaf() on the cpu we make
- * sure the __kfree_rcu_sheaf() finished its call_rcu()
- */
+ for_each_online_cpu(cpu) {
+ sfw = &per_cpu(slub_flush, cpu);
+ flush_work(&sfw->work);
+ }
- INIT_WORK(&sfw->work, flush_rcu_sheaf);
- sfw->s = s;
- queue_work_on(cpu, flushwq, &sfw->work);
- }
+ mutex_unlock(&flush_lock);
+}
- for_each_online_cpu(cpu) {
- sfw = &per_cpu(slub_flush, cpu);
- flush_work(&sfw->work);
- }
+void flush_all_rcu_sheaves(void)
+{
+ struct kmem_cache *s;
+
+ cpus_read_lock();
+ mutex_lock(&slab_mutex);
- mutex_unlock(&flush_lock);
+ list_for_each_entry(s, &slab_caches, list) {
+ if (!s->cpu_sheaves)
+ continue;
+ flush_rcu_sheaves_on_cache(s);
}
mutex_unlock(&slab_mutex);
@@ -7110,7 +7115,7 @@ static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
* Uses kmalloc to get the memory but if the allocation fails then falls back
* to the vmalloc allocator. Use kvfree for freeing the memory.
*
- * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
+ * GFP_NOWAIT and GFP_ATOMIC are supported, the __GFP_NORETRY modifier is not.
* __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
* preferable to the vmalloc fallback, due to visible performance drawbacks.
*
@@ -7119,6 +7124,7 @@ static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align,
gfp_t flags, int node)
{
+ bool allow_block;
void *ret;
/*
@@ -7131,10 +7137,6 @@ void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align,
if (ret || size <= PAGE_SIZE)
return ret;
- /* non-sleeping allocations are not supported by vmalloc */
- if (!gfpflags_allow_blocking(flags))
- return NULL;
-
/* Don't even allow crazy sizes */
if (unlikely(size > INT_MAX)) {
WARN_ON_ONCE(!(flags & __GFP_NOWARN));
@@ -7142,13 +7144,23 @@ void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align,
}
/*
+ * For non-blocking the VM_ALLOW_HUGE_VMAP is not used
+ * because the huge-mapping path in vmalloc contains at
+ * least one might_sleep() call.
+ *
+ * TODO: Revise huge-mapping path to support non-blocking
+ * flags.
+ */
+ allow_block = gfpflags_allow_blocking(flags);
+
+ /*
* kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
* since the callers already cannot assume anything
* about the resulting pointer, and cannot play
* protection games.
*/
return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END,
- flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
+ flags, PAGE_KERNEL, allow_block ? VM_ALLOW_HUGE_VMAP:0,
node, __builtin_return_address(0));
}
EXPORT_SYMBOL(__kvmalloc_node_noprof);
@@ -7899,11 +7911,11 @@ static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
* permitted to overwrite the first word of the object on
* kmem_cache_free.
*
- * This is the case if we do RCU, have a constructor or
- * destructor, are poisoning the objects, or are
- * redzoning an object smaller than sizeof(void *) or are
- * redzoning an object with slub_debug_orig_size() enabled,
- * in which case the right redzone may be extended.
+ * This is the case if we do RCU, have a constructor, are
+ * poisoning the objects, or are redzoning an object smaller
+ * than sizeof(void *) or are redzoning an object with
+ * slub_debug_orig_size() enabled, in which case the right
+ * redzone may be extended.
*
* The assumption that s->offset >= s->inuse means free
* pointer is outside of the object is used in the