summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-11-14 17:02:05 +0100
committerJens Axboe <axboe@kernel.dk>2018-11-15 12:13:16 -0700
commit079076b3416e78ba2bb3ce38e05e320c388c3120 (patch)
tree9542a0f94fc2fcf98be8be557148904f8a55e58e /block
parent8f4236d9008b0973a8281256ccfde6913cdec6cb (diff)
block: remove deadline __deadline manipulation helpers
No users left since the removal of the legacy request interface, we can remove all the magic bit stealing now and make it a normal field. But use WRITE_ONCE/READ_ONCE on the new deadline field, given that we don't seem to have any mechanism to guarantee a new value actually gets seen by other threads. Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c4
-rw-r--r--block/blk-timeout.c8
-rw-r--r--block/blk.h35
3 files changed, 7 insertions, 40 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 411be60d0cb6..4c82b4b4fa3e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -325,7 +325,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->special = NULL;
/* tag was already set */
rq->extra_len = 0;
- rq->__deadline = 0;
+ WRITE_ONCE(rq->deadline, 0);
rq->timeout = 0;
@@ -839,7 +839,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
if (rq->rq_flags & RQF_TIMED_OUT)
return false;
- deadline = blk_rq_deadline(rq);
+ deadline = READ_ONCE(rq->deadline);
if (time_after_eq(jiffies, deadline))
return true;
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 006cff4390c0..3b0179fbdd6a 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -84,7 +84,7 @@ void blk_abort_request(struct request *req)
* immediately and that scan sees the new timeout value.
* No need for fancy synchronizations.
*/
- blk_rq_set_deadline(req, jiffies);
+ WRITE_ONCE(req->deadline, jiffies);
kblockd_schedule_work(&req->q->timeout_work);
}
EXPORT_SYMBOL_GPL(blk_abort_request);
@@ -121,14 +121,16 @@ void blk_add_timer(struct request *req)
req->timeout = q->rq_timeout;
req->rq_flags &= ~RQF_TIMED_OUT;
- blk_rq_set_deadline(req, jiffies + req->timeout);
+
+ expiry = jiffies + req->timeout;
+ WRITE_ONCE(req->deadline, expiry);
/*
* If the timer isn't already pending or this timeout is earlier
* than an existing one, modify the timer. Round up to next nearest
* second.
*/
- expiry = blk_rq_timeout(round_jiffies_up(blk_rq_deadline(req)));
+ expiry = blk_rq_timeout(round_jiffies_up(expiry));
if (!timer_pending(&q->timeout) ||
time_before(expiry, q->timeout.expires)) {
diff --git a/block/blk.h b/block/blk.h
index 41b64e6e101b..08a5845b03ba 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -239,26 +239,6 @@ void blk_account_io_completion(struct request *req, unsigned int bytes);
void blk_account_io_done(struct request *req, u64 now);
/*
- * EH timer and IO completion will both attempt to 'grab' the request, make
- * sure that only one of them succeeds. Steal the bottom bit of the
- * __deadline field for this.
- */
-static inline int blk_mark_rq_complete(struct request *rq)
-{
- return test_and_set_bit(0, &rq->__deadline);
-}
-
-static inline void blk_clear_rq_complete(struct request *rq)
-{
- clear_bit(0, &rq->__deadline);
-}
-
-static inline bool blk_rq_is_complete(struct request *rq)
-{
- return test_bit(0, &rq->__deadline);
-}
-
-/*
* Internal elevator interface
*/
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
@@ -323,21 +303,6 @@ static inline void req_set_nomerge(struct request_queue *q, struct request *req)
}
/*
- * Steal a bit from this field for legacy IO path atomic IO marking. Note that
- * setting the deadline clears the bottom bit, potentially clearing the
- * completed bit. The user has to be OK with this (current ones are fine).
- */
-static inline void blk_rq_set_deadline(struct request *rq, unsigned long time)
-{
- rq->__deadline = time & ~0x1UL;
-}
-
-static inline unsigned long blk_rq_deadline(struct request *rq)
-{
- return rq->__deadline & ~0x1UL;
-}
-
-/*
* Internal io_context interface
*/
void get_io_context(struct io_context *ioc);