summaryrefslogtreecommitdiff
path: root/io_uring/alloc_cache.h
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/alloc_cache.h')
-rw-r--r--io_uring/alloc_cache.h68
1 files changed, 68 insertions, 0 deletions
diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
new file mode 100644
index 000000000000..d33ce159ef33
--- /dev/null
+++ b/io_uring/alloc_cache.h
@@ -0,0 +1,68 @@
+#ifndef IOU_ALLOC_CACHE_H
+#define IOU_ALLOC_CACHE_H
+
+#include <linux/io_uring_types.h>
+
+/*
+ * Don't allow the cache to grow beyond this size.
+ */
+#define IO_ALLOC_CACHE_MAX 128
+
+void io_alloc_cache_free(struct io_alloc_cache *cache,
+ void (*free)(const void *));
+bool io_alloc_cache_init(struct io_alloc_cache *cache,
+ unsigned max_nr, unsigned int size,
+ unsigned int init_bytes);
+
+void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp);
+
+static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
+ void *entry)
+{
+ if (cache->nr_cached < cache->max_cached) {
+ if (!kasan_mempool_poison_object(entry))
+ return false;
+ cache->entries[cache->nr_cached++] = entry;
+ return true;
+ }
+ return false;
+}
+
+static inline void *io_alloc_cache_get(struct io_alloc_cache *cache)
+{
+ if (cache->nr_cached) {
+ void *entry = cache->entries[--cache->nr_cached];
+
+ /*
+ * If KASAN is enabled, always clear the initial bytes that
+ * must be zeroed post alloc, in case any of them overlap
+ * with KASAN storage.
+ */
+#if defined(CONFIG_KASAN)
+ kasan_mempool_unpoison_object(entry, cache->elem_size);
+ if (cache->init_clear)
+ memset(entry, 0, cache->init_clear);
+#endif
+ return entry;
+ }
+
+ return NULL;
+}
+
+static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp)
+{
+ void *obj;
+
+ obj = io_alloc_cache_get(cache);
+ if (obj)
+ return obj;
+ return io_cache_alloc_new(cache, gfp);
+}
+
+static inline void io_cache_free(struct io_alloc_cache *cache, void *obj)
+{
+ if (!io_alloc_cache_put(cache, obj))
+ kfree(obj);
+}
+
+#endif