diff options
Diffstat (limited to 'io_uring/alloc_cache.h')
-rw-r--r-- | io_uring/alloc_cache.h | 79 |
1 files changed, 42 insertions, 37 deletions
diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h index bf2fb26a6539..0dd17d8ba93a 100644 --- a/io_uring/alloc_cache.h +++ b/io_uring/alloc_cache.h @@ -1,66 +1,71 @@ #ifndef IOU_ALLOC_CACHE_H #define IOU_ALLOC_CACHE_H +#include <linux/io_uring_types.h> + /* * Don't allow the cache to grow beyond this size. */ -#define IO_ALLOC_CACHE_MAX 512 +#define IO_ALLOC_CACHE_MAX 128 + +void io_alloc_cache_free(struct io_alloc_cache *cache, + void (*free)(const void *)); +bool io_alloc_cache_init(struct io_alloc_cache *cache, + unsigned max_nr, unsigned int size, + unsigned int init_bytes); -struct io_cache_entry { - struct io_wq_work_node node; -}; +void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp); + +static inline void io_alloc_cache_kasan(struct iovec **iov, int *nr) +{ + if (IS_ENABLED(CONFIG_KASAN)) { + kfree(*iov); + *iov = NULL; + *nr = 0; + } +} static inline bool io_alloc_cache_put(struct io_alloc_cache *cache, - struct io_cache_entry *entry) + void *entry) { if (cache->nr_cached < cache->max_cached) { - cache->nr_cached++; - wq_stack_add_head(&entry->node, &cache->list); - kasan_mempool_poison_object(entry); + if (!kasan_mempool_poison_object(entry)) + return false; + cache->entries[cache->nr_cached++] = entry; return true; } return false; } -static inline bool io_alloc_cache_empty(struct io_alloc_cache *cache) +static inline void *io_alloc_cache_get(struct io_alloc_cache *cache) { - return !cache->list.next; -} + if (cache->nr_cached) { + void *entry = cache->entries[--cache->nr_cached]; -static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache) -{ - if (cache->list.next) { - struct io_cache_entry *entry; - - entry = container_of(cache->list.next, struct io_cache_entry, node); + /* + * If KASAN is enabled, always clear the initial bytes that + * must be zeroed post alloc, in case any of them overlap + * with KASAN storage. + */ +#if defined(CONFIG_KASAN) kasan_mempool_unpoison_object(entry, cache->elem_size); - cache->list.next = cache->list.next->next; - cache->nr_cached--; + if (cache->init_clear) + memset(entry, 0, cache->init_clear); +#endif return entry; } return NULL; } -static inline void io_alloc_cache_init(struct io_alloc_cache *cache, - unsigned max_nr, size_t size) +static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp) { - cache->list.next = NULL; - cache->nr_cached = 0; - cache->max_cached = max_nr; - cache->elem_size = size; -} + void *obj; -static inline void io_alloc_cache_free(struct io_alloc_cache *cache, - void (*free)(struct io_cache_entry *)) -{ - while (1) { - struct io_cache_entry *entry = io_alloc_cache_get(cache); - - if (!entry) - break; - free(entry); - } - cache->nr_cached = 0; + obj = io_alloc_cache_get(cache); + if (obj) + return obj; + return io_cache_alloc_new(cache, gfp); } + #endif |