summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-03-08 11:37:47 -0700
committerJens Axboe <axboe@kernel.dk>2021-08-23 13:44:43 -0600
commitbe4d234d7aebbfe0c233bc20b9cdef7ab3408ff4 (patch)
treecc30999a6c468df13fdd8cdf1997789882e4ff9f /include
parent6c7ef543df909dbdcd8cb24ef30627cba62a4e91 (diff)
bio: add allocation cache abstraction
Add a per-cpu bio_set cache for bio allocations, enabling us to quickly recycle them instead of going through the slab allocator. This cache isn't IRQ safe, and hence is only really suitable for polled IO. Very simple - keeps a count of bio's in the cache, and maintains a max of 512 with a slack of 64. If we get above max + slack, we drop slack number of bio's. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include')
-rw-r--r--include/linux/bio.h13
-rw-r--r--include/linux/blk_types.h1
-rw-r--r--include/linux/cpuhotplug.h1
3 files changed, 15 insertions, 0 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 2203b686e1f0..89ad28213b1d 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -401,6 +401,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
enum {
BIOSET_NEED_BVECS = BIT(0),
BIOSET_NEED_RESCUER = BIT(1),
+ BIOSET_PERCPU_CACHE = BIT(2),
};
extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
extern void bioset_exit(struct bio_set *);
@@ -409,6 +410,8 @@ extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs,
struct bio_set *bs);
+struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
+ struct bio_set *bs);
struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
extern void bio_put(struct bio *);
@@ -699,6 +702,11 @@ struct bio_set {
struct kmem_cache *bio_slab;
unsigned int front_pad;
+ /*
+ * per-cpu bio alloc cache
+ */
+ struct bio_alloc_cache __percpu *cache;
+
mempool_t bio_pool;
mempool_t bvec_pool;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
@@ -715,6 +723,11 @@ struct bio_set {
struct bio_list rescue_list;
struct work_struct rescue_work;
struct workqueue_struct *rescue_workqueue;
+
+ /*
+ * Hot un-plug notifier for the per-cpu cache, if used
+ */
+ struct hlist_node cpuhp_dead;
};
static inline bool bioset_initialized(struct bio_set *bs)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 290f9061b29a..f68d4e8c775e 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -301,6 +301,7 @@ enum {
BIO_TRACKED, /* set if bio goes through the rq_qos path */
BIO_REMAPPED,
BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
+ BIO_PERCPU_CACHE, /* can participate in per-cpu alloc cache */
BIO_FLAG_LAST
};
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index f39b34b13871..fe72c8d6c980 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -46,6 +46,7 @@ enum cpuhp_state {
CPUHP_ARM_OMAP_WAKE_DEAD,
CPUHP_IRQ_POLL_DEAD,
CPUHP_BLOCK_SOFTIRQ_DEAD,
+ CPUHP_BIO_DEAD,
CPUHP_ACPI_CPUDRV_DEAD,
CPUHP_S390_PFAULT_DEAD,
CPUHP_BLK_MQ_DEAD,