summaryrefslogtreecommitdiff
path: root/block/blk-ioc.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2021-11-26 12:58:15 +0100
committerJens Axboe <axboe@kernel.dk>2021-11-29 06:41:29 -0700
commitd538ea4cb8e7241af8091eee30483fabf64444a5 (patch)
treeaa6741c4badd6d776f0df6d5c618af9ac9985e99 /block/blk-ioc.c
parent8ffc13680eac16a1eec86275b65fc6f0e27a30d8 (diff)
block: return the io_context from create_task_io_context
Grab a reference to the newly allocated or existing io_context in create_task_io_context and return it. This simplifies the callers and removes the need for double lookups. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20211126115817.2087431-13-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r--block/blk-ioc.c66
1 files changed, 30 insertions, 36 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index f06d1040442c..5bfe810496fc 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -268,15 +268,14 @@ static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
return ioc;
}
-static int create_task_io_context(struct task_struct *task, gfp_t gfp_flags,
- int node)
+static struct io_context *create_task_io_context(struct task_struct *task,
+ gfp_t gfp_flags, int node)
{
struct io_context *ioc;
- int ret;
ioc = alloc_io_context(gfp_flags, node);
if (!ioc)
- return -ENOMEM;
+ return NULL;
/*
* Try to install. ioc shouldn't be installed if someone else
@@ -292,11 +291,11 @@ static int create_task_io_context(struct task_struct *task, gfp_t gfp_flags,
else
kmem_cache_free(iocontext_cachep, ioc);
- ret = task->io_context ? 0 : -EBUSY;
-
+ ioc = task->io_context;
+ if (ioc)
+ get_io_context(ioc);
task_unlock(task);
-
- return ret;
+ return ioc;
}
/**
@@ -319,18 +318,15 @@ struct io_context *get_task_io_context(struct task_struct *task,
might_sleep_if(gfpflags_allow_blocking(gfp_flags));
- do {
- task_lock(task);
- ioc = task->io_context;
- if (likely(ioc)) {
- get_io_context(ioc);
- task_unlock(task);
- return ioc;
- }
+ task_lock(task);
+ ioc = task->io_context;
+ if (unlikely(!ioc)) {
task_unlock(task);
- } while (!create_task_io_context(task, gfp_flags, node));
-
- return NULL;
+ return create_task_io_context(task, gfp_flags, node);
+ }
+ get_io_context(ioc);
+ task_unlock(task);
+ return ioc;
}
int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
@@ -449,30 +445,28 @@ static struct io_cq *ioc_create_icq(struct io_context *ioc,
struct io_cq *ioc_find_get_icq(struct request_queue *q)
{
- struct io_context *ioc;
- struct io_cq *icq;
-
- /* create task io_context, if we don't have one already */
- if (unlikely(!current->io_context))
- create_task_io_context(current, GFP_ATOMIC, q->node);
+ struct io_context *ioc = current->io_context;
+ struct io_cq *icq = NULL;
- /*
- * May not have an IO context if it's a passthrough request
- */
- ioc = current->io_context;
- if (!ioc)
- return NULL;
+ if (unlikely(!ioc)) {
+ ioc = create_task_io_context(current, GFP_ATOMIC, q->node);
+ if (!ioc)
+ return NULL;
+ } else {
+ get_io_context(ioc);
- spin_lock_irq(&q->queue_lock);
- icq = ioc_lookup_icq(ioc, q);
- spin_unlock_irq(&q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
+ icq = ioc_lookup_icq(ioc, q);
+ spin_unlock_irq(&q->queue_lock);
+ }
if (!icq) {
icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
- if (!icq)
+ if (!icq) {
+ put_io_context(ioc);
return NULL;
+ }
}
- get_io_context(icq->ioc);
return icq;
}
EXPORT_SYMBOL_GPL(ioc_find_get_icq);