summaryrefslogtreecommitdiff
path: root/mm/slab_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c37
1 files changed, 36 insertions, 1 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index bfe7c40eeee1..932d13ada36c 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -163,6 +163,9 @@ int slab_unmergeable(struct kmem_cache *s)
return 1;
#endif
+ if (s->cpu_sheaves)
+ return 1;
+
/*
* We may have set a slab to be unmergeable during bootstrap.
*/
@@ -321,7 +324,7 @@ struct kmem_cache *__kmem_cache_create_args(const char *name,
object_size - args->usersize < args->useroffset))
args->usersize = args->useroffset = 0;
- if (!args->usersize)
+ if (!args->usersize && !args->sheaf_capacity)
s = __kmem_cache_alias(name, object_size, args->align, flags,
args->ctor);
if (s)
@@ -507,6 +510,9 @@ void kmem_cache_destroy(struct kmem_cache *s)
rcu_barrier();
}
+ /* Wait for deferred work from kmalloc/kfree_nolock() */
+ defer_free_barrier();
+
cpus_read_lock();
mutex_lock(&slab_mutex);
@@ -1605,6 +1611,30 @@ static void kfree_rcu_work(struct work_struct *work)
kvfree_rcu_list(head);
}
+static bool kfree_rcu_sheaf(void *obj)
+{
+ struct kmem_cache *s;
+ struct folio *folio;
+ struct slab *slab;
+
+ if (is_vmalloc_addr(obj))
+ return false;
+
+ folio = virt_to_folio(obj);
+ if (unlikely(!folio_test_slab(folio)))
+ return false;
+
+ slab = folio_slab(folio);
+ s = slab->slab_cache;
+ if (s->cpu_sheaves) {
+ if (likely(!IS_ENABLED(CONFIG_NUMA) ||
+ slab_nid(slab) == numa_mem_id()))
+ return __kfree_rcu_sheaf(s, obj);
+ }
+
+ return false;
+}
+
static bool
need_offload_krc(struct kfree_rcu_cpu *krcp)
{
@@ -1949,6 +1979,9 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
if (!head)
might_sleep();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && kfree_rcu_sheaf(ptr))
+ return;
+
// Queue the object but don't yet schedule the batch.
if (debug_rcu_head_queue(ptr)) {
// Probable double kfree_rcu(), just leak.
@@ -2023,6 +2056,8 @@ void kvfree_rcu_barrier(void)
bool queued;
int i, cpu;
+ flush_all_rcu_sheaves();
+
/*
* Firstly we detach objects and queue them over an RCU-batch
* for all CPUs. Finally queued works are flushed for each CPU.