diff options
Diffstat (limited to 'mm/slab_common.c')
| -rw-r--r-- | mm/slab_common.c | 54 |
1 files changed, 38 insertions, 16 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index 84dfff4f7b1f..eed7ea556cb1 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -259,7 +259,7 @@ out: * @object_size: The size of objects to be created in this cache. * @args: Additional arguments for the cache creation (see * &struct kmem_cache_args). - * @flags: See the desriptions of individual flags. The common ones are listed + * @flags: See the descriptions of individual flags. The common ones are listed * in the description below. * * Not to be called directly, use the kmem_cache_create() wrapper with the same @@ -492,7 +492,7 @@ void kmem_cache_destroy(struct kmem_cache *s) return; /* in-flight kfree_rcu()'s may include objects from our cache */ - kvfree_rcu_barrier(); + kvfree_rcu_barrier_on_cache(s); if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG) && (s->flags & SLAB_TYPESAFE_BY_RCU)) { @@ -2038,25 +2038,13 @@ unlock_return: } EXPORT_SYMBOL_GPL(kvfree_call_rcu); -/** - * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete. - * - * Note that a single argument of kvfree_rcu() call has a slow path that - * triggers synchronize_rcu() following by freeing a pointer. It is done - * before the return from the function. Therefore for any single-argument - * call that will result in a kfree() to a cache that is to be destroyed - * during module exit, it is developer's responsibility to ensure that all - * such calls have returned before the call to kmem_cache_destroy(). - */ -void kvfree_rcu_barrier(void) +static inline void __kvfree_rcu_barrier(void) { struct kfree_rcu_cpu_work *krwp; struct kfree_rcu_cpu *krcp; bool queued; int i, cpu; - flush_all_rcu_sheaves(); - /* * Firstly we detach objects and queue them over an RCU-batch * for all CPUs. Finally queued works are flushed for each CPU. @@ -2118,8 +2106,43 @@ void kvfree_rcu_barrier(void) } } } + +/** + * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete. + * + * Note that a single argument of kvfree_rcu() call has a slow path that + * triggers synchronize_rcu() following by freeing a pointer. It is done + * before the return from the function. Therefore for any single-argument + * call that will result in a kfree() to a cache that is to be destroyed + * during module exit, it is developer's responsibility to ensure that all + * such calls have returned before the call to kmem_cache_destroy(). + */ +void kvfree_rcu_barrier(void) +{ + flush_all_rcu_sheaves(); + __kvfree_rcu_barrier(); +} EXPORT_SYMBOL_GPL(kvfree_rcu_barrier); +/** + * kvfree_rcu_barrier_on_cache - Wait for in-flight kvfree_rcu() calls on a + * specific slab cache. + * @s: slab cache to wait for + * + * See the description of kvfree_rcu_barrier() for details. + */ +void kvfree_rcu_barrier_on_cache(struct kmem_cache *s) +{ + if (s->cpu_sheaves) + flush_rcu_sheaves_on_cache(s); + /* + * TODO: Introduce a version of __kvfree_rcu_barrier() that works + * on a specific slab cache. + */ + __kvfree_rcu_barrier(); +} +EXPORT_SYMBOL_GPL(kvfree_rcu_barrier_on_cache); + static unsigned long kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { @@ -2215,4 +2238,3 @@ void __init kvfree_rcu_init(void) } #endif /* CONFIG_KVFREE_RCU_BATCHED */ - |
