summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/block/biodoc.txt4
-rw-r--r--block/blk-core.c60
-rw-r--r--block/blk-flush.c2
-rw-r--r--block/blk-lib.c2
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-mq.c28
-rw-r--r--block/cfq-iosched.c66
-rw-r--r--block/elevator.c4
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--fs/btrfs/inode.c5
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/f2fs/f2fs.h2
-rw-r--r--fs/gfs2/lops.c2
-rw-r--r--include/linux/blk-cgroup.h11
-rw-r--r--include/linux/blk_types.h83
-rw-r--r--include/linux/blkdev.h26
-rw-r--r--include/linux/blktrace_api.h2
-rw-r--r--include/linux/dm-io.h2
-rw-r--r--include/linux/elevator.h4
-rw-r--r--include/trace/events/bcache.h12
-rw-r--r--include/trace/events/block.h31
-rw-r--r--kernel/trace/blktrace.c14
23 files changed, 148 insertions, 221 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 6acea160298c..01ddeaf64b0f 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -553,8 +553,8 @@ struct request {
struct request_list *rl;
}
-See the rq_flag_bits definitions for an explanation of the various flags
-available. Some bits are used by the block layer or i/o scheduler.
+See the req_ops and req_flag_bits definitions for an explanation of the various
+flags available. Some bits are used by the block layer or i/o scheduler.
The behaviour of the various sector counts are almost the same as before,
except that since we have multi-segment bios, current_nr_sectors refers
diff --git a/block/blk-core.c b/block/blk-core.c
index fd416651a676..0bfaa54d3e9f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1056,8 +1056,7 @@ static struct io_context *rq_ioc(struct bio *bio)
/**
* __get_request - get a free request
* @rl: request list to allocate from
- * @op: REQ_OP_READ/REQ_OP_WRITE
- * @op_flags: rq_flag_bits
+ * @op: operation and flags
* @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask
*
@@ -1068,23 +1067,22 @@ static struct io_context *rq_ioc(struct bio *bio)
* Returns ERR_PTR on failure, with @q->queue_lock held.
* Returns request pointer on success, with @q->queue_lock *not held*.
*/
-static struct request *__get_request(struct request_list *rl, int op,
- int op_flags, struct bio *bio,
- gfp_t gfp_mask)
+static struct request *__get_request(struct request_list *rl, unsigned int op,
+ struct bio *bio, gfp_t gfp_mask)
{
struct request_queue *q = rl->q;
struct request *rq;
struct elevator_type *et = q->elevator->type;
struct io_context *ioc = rq_ioc(bio);
struct io_cq *icq = NULL;
- const bool is_sync = rw_is_sync(op, op_flags) != 0;
+ const bool is_sync = op_is_sync(op);
int may_queue;
req_flags_t rq_flags = RQF_ALLOCED;
if (unlikely(blk_queue_dying(q)))
return ERR_PTR(-ENODEV);
- may_queue = elv_may_queue(q, op, op_flags);
+ may_queue = elv_may_queue(q, op);
if (may_queue == ELV_MQUEUE_NO)
goto rq_starved;
@@ -1154,7 +1152,7 @@ static struct request *__get_request(struct request_list *rl, int op,
blk_rq_init(q, rq);
blk_rq_set_rl(rq, rl);
- req_set_op_attrs(rq, op, op_flags);
+ rq->cmd_flags = op;
rq->rq_flags = rq_flags;
/* init elvpriv */
@@ -1232,8 +1230,7 @@ rq_starved:
/**
* get_request - get a free request
* @q: request_queue to allocate request from
- * @op: REQ_OP_READ/REQ_OP_WRITE
- * @op_flags: rq_flag_bits
+ * @op: operation and flags
* @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask
*
@@ -1244,18 +1241,17 @@ rq_starved:
* Returns ERR_PTR on failure, with @q->queue_lock held.
* Returns request pointer on success, with @q->queue_lock *not held*.
*/
-static struct request *get_request(struct request_queue *q, int op,
- int op_flags, struct bio *bio,
- gfp_t gfp_mask)
+static struct request *get_request(struct request_queue *q, unsigned int op,
+ struct bio *bio, gfp_t gfp_mask)
{
- const bool is_sync = rw_is_sync(op, op_flags) != 0;
+ const bool is_sync = op_is_sync(op);
DEFINE_WAIT(wait);
struct request_list *rl;
struct request *rq;
rl = blk_get_rl(q, bio); /* transferred to @rq on success */
retry:
- rq = __get_request(rl, op, op_flags, bio, gfp_mask);
+ rq = __get_request(rl, op, bio, gfp_mask);
if (!IS_ERR(rq))
return rq;
@@ -1297,7 +1293,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
create_io_context(gfp_mask, q->node);
spin_lock_irq(q->queue_lock);
- rq = get_request(q, rw, 0, NULL, gfp_mask);
+ rq = get_request(q, rw, NULL, gfp_mask);
if (IS_ERR(rq)) {
spin_unlock_irq(q->queue_lock);
return rq;
@@ -1446,7 +1442,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
*/
if (rq_flags & RQF_ALLOCED) {
struct request_list *rl = blk_rq_rl(req);
- bool sync = rw_is_sync(req_op(req), req->cmd_flags);
+ bool sync = op_is_sync(req->cmd_flags);
BUG_ON(!list_empty(&req->queuelist));
BUG_ON(ELV_ON_HASH(req));
@@ -1652,8 +1648,6 @@ out:
void init_request_from_bio(struct request *req, struct bio *bio)
{
req->cmd_type = REQ_TYPE_FS;
-
- req->cmd_flags |= bio->bi_opf & REQ_COMMON_MASK;
if (bio->bi_opf & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
@@ -1665,9 +1659,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
{
- const bool sync = !!(bio->bi_opf & REQ_SYNC);
struct blk_plug *plug;
- int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
+ int el_ret, where = ELEVATOR_INSERT_SORT;
struct request *req;
unsigned int request_count = 0;
@@ -1723,23 +1716,10 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
get_rq:
/*
- * This sync check and mask will be re-done in init_request_from_bio(),
- * but we need to set it earlier to expose the sync flag to the
- * rq allocator and io schedulers.
- */
- if (sync)
- rw_flags |= REQ_SYNC;
-
- /*
- * Add in META/PRIO flags, if set, before we get to the IO scheduler
- */
- rw_flags |= (bio->bi_opf & (REQ_META | REQ_PRIO));
-
- /*
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
- req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO);
+ req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
if (IS_ERR(req)) {
bio->bi_error = PTR_ERR(req);
bio_endio(bio);
@@ -2946,8 +2926,6 @@ EXPORT_SYMBOL_GPL(__blk_end_request_err);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
{
- req_set_op(rq, bio_op(bio));
-
if (bio_has_data(bio))
rq->nr_phys_segments = bio_phys_segments(q, bio);
@@ -3031,8 +3009,7 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
dst->cpu = src->cpu;
- req_set_op_attrs(dst, req_op(src),
- (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE);
+ dst->cmd_flags = src->cmd_flags | REQ_NOMERGE;
dst->cmd_type = src->cmd_type;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
@@ -3537,8 +3514,11 @@ EXPORT_SYMBOL(blk_set_runtime_active);
int __init blk_dev_init(void)
{
- BUILD_BUG_ON(__REQ_NR_BITS > 8 *
+ BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
+ BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
FIELD_SIZEOF(struct request, cmd_flags));
+ BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
+ FIELD_SIZEOF(struct bio, bi_opf));
/* used for unplugging and affects IO latency/throughput - HIGHPRI */
kblockd_workqueue = alloc_workqueue("kblockd",
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 3990b9cfbda5..95f1d4d357df 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -330,7 +330,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
}
flush_rq->cmd_type = REQ_TYPE_FS;
- req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH);
+ flush_rq->cmd_flags = REQ_OP_FLUSH | WRITE_FLUSH;
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
flush_rq->rq_disk = first_rq->rq_disk;
flush_rq->end_io = flush_end_io;
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 46fe9248410d..18abda862915 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -29,7 +29,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = *biop;
unsigned int granularity;
- enum req_op op;
+ unsigned int op;
int alignment;
sector_t bs_mask;
diff --git a/block/blk-map.c b/block/blk-map.c
index 2c5ae5fef473..0173a72a8aa9 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -16,6 +16,8 @@
int blk_rq_append_bio(struct request *rq, struct bio *bio)
{
if (!rq->bio) {
+ rq->cmd_flags &= REQ_OP_MASK;
+ rq->cmd_flags |= (bio->bi_opf & REQ_OP_MASK);
blk_rq_bio_prep(rq->q, rq, bio);
} else {
if (!ll_back_merge_fn(rq->q, rq, bio))
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b49c6658eb05..2da1a0ee3318 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -139,14 +139,13 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
EXPORT_SYMBOL(blk_mq_can_queue);
static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
- struct request *rq, int op,
- unsigned int op_flags)
+ struct request *rq, unsigned int op)
{
INIT_LIST_HEAD(&rq->queuelist);
/* csd/requeue_work/fifo_time is initialized before use */
rq->q = q;
rq->mq_ctx = ctx;
- req_set_op_attrs(rq, op, op_flags);
+ rq->cmd_flags = op;
if (blk_queue_io_stat(q))
rq->rq_flags |= RQF_IO_STAT;
/* do not touch atomic flags, it needs atomic ops against the timer */
@@ -183,11 +182,11 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
rq->end_io_data = NULL;
rq->next_rq = NULL;
- ctx->rq_dispatched[rw_is_sync(op, op_flags)]++;
+ ctx->rq_dispatched[op_is_sync(op)]++;
}
static struct request *
-__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
+__blk_mq_alloc_request(struct blk_mq_alloc_data *data, unsigned int op)
{
struct request *rq;
unsigned int tag;
@@ -202,7 +201,7 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
}
rq->tag = tag;
- blk_mq_rq_ctx_init(data->q, data->ctx, rq, op, op_flags);
+ blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
return rq;
}
@@ -225,7 +224,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
ctx = blk_mq_get_ctx(q);
hctx = blk_mq_map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
- rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
+ rq = __blk_mq_alloc_request(&alloc_data, rw);
blk_mq_put_ctx(ctx);
if (!rq) {
@@ -277,7 +276,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
- rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
+ rq = __blk_mq_alloc_request(&alloc_data, rw);
if (!rq) {
ret = -EWOULDBLOCK;
goto out_queue_exit;
@@ -1196,19 +1195,14 @@ static struct request *blk_mq_map_request(struct request_queue *q,
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
struct request *rq;
- int op = bio_data_dir(bio);
- int op_flags = 0;
blk_queue_enter_live(q);
ctx = blk_mq_get_ctx(q);
hctx = blk_mq_map_queue(q, ctx->cpu);
- if (rw_is_sync(bio_op(bio), bio->bi_opf))
- op_flags |= REQ_SYNC;
-
- trace_block_getrq(q, bio, op);
+ trace_block_getrq(q, bio, bio->bi_opf);
blk_mq_set_alloc_data(data, q, 0, ctx, hctx);
- rq = __blk_mq_alloc_request(data, op, op_flags);
+ rq = __blk_mq_alloc_request(data, bio->bi_opf);
data->hctx->queued++;
return rq;
@@ -1256,7 +1250,7 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
*/
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
- const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
+ const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
struct blk_mq_alloc_data data;
struct request *rq;
@@ -1350,7 +1344,7 @@ done:
*/
static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
{
- const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
+ const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
struct blk_plug *plug;
unsigned int request_count = 0;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 5e24d880306c..c96186adaa66 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -667,10 +667,10 @@ static inline void cfqg_put(struct cfq_group *cfqg)
} while (0)
static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
- struct cfq_group *curr_cfqg, int op,
- int op_flags)
+ struct cfq_group *curr_cfqg,
+ unsigned int op)
{
- blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, 1);
+ blkg_rwstat_add(&cfqg->stats.queued, op, 1);
cfqg_stats_end_empty_time(&cfqg->stats);
cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
}
@@ -684,30 +684,29 @@ static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
#endif
}
-static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op,
- int op_flags)
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
+ unsigned int op)
{
- blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, -1);
+ blkg_rwstat_add(&cfqg->stats.queued, op, -1);
}
-static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op,
- int op_flags)
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
+ unsigned int op)
{
- blkg_rwstat_add(&cfqg->stats.merged, op, op_flags, 1);
+ blkg_rwstat_add(&cfqg->stats.merged, op, 1);
}
static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
- uint64_t start_time, uint64_t io_start_time, int op,
- int op_flags)
+ uint64_t start_time, uint64_t io_start_time,
+ unsigned int op)
{
struct cfqg_stats *stats = &cfqg->stats;
unsigned long long now = sched_clock();
if (time_after64(now, io_start_time))
- blkg_rwstat_add(&stats->service_time, op, op_flags,
- now - io_start_time);
+ blkg_rwstat_add(&stats->service_time, op, now - io_start_time);
if (time_after64(io_start_time, start_time))
- blkg_rwstat_add(&stats->wait_time, op, op_flags,
+ blkg_rwstat_add(&stats->wait_time, op,
io_start_time - start_time);
}
@@ -786,16 +785,16 @@ static inline void cfqg_put(struct cfq_group *cfqg) { }
#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
- struct cfq_group *curr_cfqg, int op, int op_flags) { }
+ struct cfq_group *curr_cfqg, unsigned int op) { }
static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
uint64_t time, unsigned long unaccounted_time) { }
-static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op,
- int op_flags) { }
-static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op,
- int op_flags) { }
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
+ unsigned int op) { }
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
+ unsigned int op) { }
static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
- uint64_t start_time, uint64_t io_start_time, int op,
- int op_flags) { }
+ uint64_t start_time, uint64_t io_start_time,
+ unsigned int op) { }
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
@@ -2474,10 +2473,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
{
elv_rb_del(&cfqq->sort_list, rq);
cfqq->queued[rq_is_sync(rq)]--;
- cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags);
+ cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
cfq_add_rq_rb(rq);
cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
- req_op(rq), rq->cmd_flags);
+ rq->cmd_flags);
}
static struct request *
@@ -2530,7 +2529,7 @@ static void cfq_remove_request(struct request *rq)
cfq_del_rq_rb(rq);
cfqq->cfqd->rq_queued--;
- cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags);
+ cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
if (rq->cmd_flags & REQ_PRIO) {
WARN_ON(!cfqq->prio_pending);
cfqq->prio_pending--;
@@ -2565,7 +2564,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
static void cfq_bio_merged(struct request_queue *q, struct request *req,
struct bio *bio)
{
- cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_opf);
+ cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_opf);
}
static void
@@ -2588,7 +2587,7 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
if (cfqq->next_rq == next)
cfqq->next_rq = rq;
cfq_remove_request(next);
- cfqg_stats_update_io_merged(RQ_CFQG(rq), req_op(next), next->cmd_flags);
+ cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
cfqq = RQ_CFQQ(next);
/*
@@ -4142,7 +4141,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_add_rq_rb(rq);
- cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, req_op(rq),
+ cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
rq->cmd_flags);
cfq_rq_enqueued(cfqd, cfqq, rq);
}
@@ -4240,8 +4239,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
cfqq->dispatched--;
(RQ_CFQG(rq))->dispatched--;
cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
- rq_io_start_time_ns(rq), req_op(rq),
- rq->cmd_flags);
+ rq_io_start_time_ns(rq), rq->cmd_flags);
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
@@ -4319,14 +4317,14 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
cfq_schedule_dispatch(cfqd);
}
-static void cfqq_boost_on_prio(struct cfq_queue *cfqq, int op_flags)
+static void cfqq_boost_on_prio(struct cfq_queue *cfqq, unsigned int op)
{
/*
* If REQ_PRIO is set, boost class and prio level, if it's below
* BE/NORM. If prio is not set, restore the potentially boosted
* class/prio level.
*/
- if (!(op_flags & REQ_PRIO)) {
+ if (!(op & REQ_PRIO)) {
cfqq->ioprio_class = cfqq->org_ioprio_class;
cfqq->ioprio = cfqq->org_ioprio;
} else {
@@ -4347,7 +4345,7 @@ static inline int __cfq_may_queue(struct cfq_queue *cfqq)
return ELV_MQUEUE_MAY;
}
-static int cfq_may_queue(struct request_queue *q, int op, int op_flags)
+static int cfq_may_queue(struct request_queue *q, unsigned int op)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
@@ -4364,10 +4362,10 @@ static int cfq_may_queue(struct request_queue *q, int op, int op_flags)
if (!cic)
return ELV_MQUEUE_MAY;
- cfqq = cic_to_cfqq(cic, rw_is_sync(op, op_flags));
+ cfqq = cic_to_cfqq(cic, op_is_sync(op));
if (cfqq) {
cfq_init_prio_data(cfqq, cic);
- cfqq_boost_on_prio(cfqq, op_flags);
+ cfqq_boost_on_prio(cfqq, op);
return __cfq_may_queue(cfqq);
}
diff --git a/block/elevator.c b/block/elevator.c
index ac80f89a0842..a18a5db274e4 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -714,12 +714,12 @@ void elv_put_request(struct request_queue *q, struct request *rq)
e->type->ops.elevator_put_req_fn(rq);
}
-int elv_may_queue(struct request_queue *q, int op, int op_flags)
+int elv_may_queue(struct request_queue *q, unsigned int op)
{
struct elevator_queue *e = q->elevator;
if (e->type->ops.elevator_may_queue_fn)
- return e->type->ops.elevator_may_queue_fn(q, op, op_flags);
+ return e->type->ops.elevator_may_queue_fn(q, op);
return ELV_MQUEUE_MAY;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index a2768835d394..68a9eb4f3f36 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1135,7 +1135,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev;
- bio_set_op_attrs(clone, bio_op(io->base_bio), bio_flags(io->base_bio));
+ clone->bi_opf = io->base_bio->bi_opf;
}
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index cef1f78031d4..65738b0aad36 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1031,8 +1031,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
} else if (rq_data_dir(rq) == READ) {
SCpnt->cmnd[0] = READ_6;
} else {
- scmd_printk(KERN_ERR, SCpnt, "Unknown command %llu,%llx\n",
- req_op(rq), (unsigned long long) rq->cmd_flags);
+ scmd_printk(KERN_ERR, SCpnt, "Unknown command %d\n", req_op(rq));
goto out;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 2b790bda7998..9a377079af26 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8427,7 +8427,7 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
if (!bio)
return -ENOMEM;
- bio_set_op_attrs(bio, bio_op(orig_bio), bio_flags(orig_bio));
+ bio->bi_opf = orig_bio->bi_opf;
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
btrfs_io_bio(bio)->logical = file_offset;
@@ -8465,8 +8465,7 @@ next_block:
start_sector, GFP_NOFS);
if (!bio)
goto out_err;
- bio_set_op_attrs(bio, bio_op(orig_bio),
- bio_flags(orig_bio));
+ bio->bi_opf = orig_bio->bi_opf;
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
btrfs_io_bio(bio)->logical = file_offset;
diff --git a/fs/buffer.c b/fs/buffer.c
index b205a629001d..a29335867e30 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3118,7 +3118,7 @@ EXPORT_SYMBOL(submit_bh);
/**
* ll_rw_block: low-level access to block devices (DEPRECATED)
* @op: whether to %READ or %WRITE
- * @op_flags: rq_flag_bits
+ * @op_flags: req_flag_bits
* @nr: number of &struct buffer_heads in the array
* @bhs: array of pointers to &struct buffer_head
*
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 9e8de18a168a..2cf4f7f09e32 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -688,7 +688,7 @@ struct f2fs_io_info {
struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */
enum page_type type; /* contains DATA/NODE/META/META_FLUSH */
int op; /* contains REQ_OP_ */
- int op_flags; /* rq_flag_bits */
+ int op_flags; /* req_flag_bits */
block_t new_blkaddr; /* new block address to be written */
block_t old_blkaddr; /* old block address before Cow */
struct page *page; /* page to be written */
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 49d5a1b61b06..b1f9144b42c7 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -231,7 +231,7 @@ static void gfs2_end_log_write(struct bio *bio)
* gfs2_log_flush_bio - Submit any pending log bio
* @sdp: The superblock
* @op: REQ_OP
- * @op_flags: rq_flag_bits
+ * @op_flags: req_flag_bits
*
* Submit any pending part-built or full bio to the block device. If
* there is no pending bio, then this is a no-op.
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 3bf5d33800ab..ddaf28d0988f 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -581,15 +581,14 @@ static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
/**
* blkg_rwstat_add - add a value to a blkg_rwstat
* @rwstat: target blkg_rwstat
- * @op: REQ_OP
- * @op_flags: rq_flag_bits
+ * @op: REQ_OP and flags
* @val: value to add
*
* Add @val to @rwstat. The counters are chosen according to @rw. The
* caller is responsible for synchronizing calls to this function.
*/
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
- int op, int op_flags, uint64_t val)
+ unsigned int op, uint64_t val)
{
struct percpu_counter *cnt;
@@ -600,7 +599,7 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
__percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
- if (op_flags & REQ_SYNC)
+ if (op & REQ_SYNC)
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
else
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
@@ -705,9 +704,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
if (!throtl) {
blkg = blkg ?: q->root_blkg;
- blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_opf,
+ blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
bio->bi_iter.bi_size);
- blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_opf, 1);
+ blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
}
rcu_read_unlock();
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index ec69a8fe3b29..dca972d67548 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -88,24 +88,6 @@ struct bio {
struct bio_vec bi_inline_vecs[0];
};
-#define BIO_OP_SHIFT (8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS)
-#define bio_flags(bio) ((bio)->bi_opf & ((1 << BIO_OP_SHIFT) - 1))
-#define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT)
-
-#define bio_set_op_attrs(bio, op, op_flags) do { \
- if (__builtin_constant_p(op)) \
- BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS)); \
- else \
- WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS)); \
- if (__builtin_constant_p(op_flags)) \
- BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \
- else \
- WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \
- (bio)->bi_opf = bio_flags(bio); \
- (bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT); \
- (bio)->bi_opf |= (op_flags); \
-} while (0)
-
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
/*
@@ -147,26 +129,40 @@ struct bio {
#endif /* CONFIG_BLOCK */
/*
- * Request flags. For use in the cmd_flags field of struct request, and in
- * bi_opf of struct bio. Note that some flags are only valid in either one.
+ * Operations and flags common to the bio and request structures.
+ * We use 8 bits for encoding the operation, and the remaining 24 for flags.
*/
-enum rq_flag_bits {
- /* common flags */
- __REQ_FAILFAST_DEV, /* no driver retries of device errors */
+#define REQ_OP_BITS 8
+#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
+#define REQ_FLAG_BITS 24
+
+enum req_opf {
+ REQ_OP_READ,
+ REQ_OP_WRITE,
+ REQ_OP_DISCARD, /* request to discard sectors */
+ REQ_OP_SECURE_ERASE, /* request to securely erase sectors */
+ REQ_OP_WRITE_SAME, /* write same block many times */
+ REQ_OP_FLUSH, /* request for cache flush */
+ REQ_OP_ZONE_REPORT, /* Get zone information */
+ REQ_OP_ZONE_RESET, /* Reset a zone write pointer */
+
+ REQ_OP_LAST,
+};
+
+enum req_flag_bits {
+ __REQ_FAILFAST_DEV = /* no driver retries of device errors */
+ REQ_OP_BITS,
__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
__REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
-
__REQ_SYNC, /* request is sync (sync write or read) */
__REQ_META, /* metadata io request */
__REQ_PRIO, /* boost priority in cfq */
-
__REQ_NOMERGE, /* don't touch this for merging */
__REQ_NOIDLE, /* don't anticipate more IO after this one */
__REQ_INTEGRITY, /* I/O includes block integrity payload */
__REQ_FUA, /* forced unit access */
__REQ_PREFLUSH, /* request for cache flush */
__REQ_RAHEAD, /* read ahead, can fail anytime */
-
__REQ_NR_BITS, /* stops here */
};
@@ -176,37 +172,32 @@ enum rq_flag_bits {
#define REQ_SYNC (1ULL << __REQ_SYNC)
#define REQ_META (1ULL << __REQ_META)
#define REQ_PRIO (1ULL << __REQ_PRIO)
+#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
#define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
+#define REQ_FUA (1ULL << __REQ_FUA)
+#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
+#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
-#define REQ_COMMON_MASK \
- (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \
- REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE | REQ_RAHEAD)
-#define REQ_CLONE_MASK REQ_COMMON_MASK
-/* This mask is used for both bio and request merge checking */
#define REQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
-#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
-#define REQ_FUA (1ULL << __REQ_FUA)
-#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
-#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
+#define bio_op(bio) \
+ ((bio)->bi_opf & REQ_OP_MASK)
+#define req_op(req) \
+ ((req)->cmd_flags & REQ_OP_MASK)
-enum req_op {
- REQ_OP_READ,
- REQ_OP_WRITE,
- REQ_OP_DISCARD, /* request to discard sectors */
- REQ_OP_SECURE_ERASE, /* request to securely erase sectors */
- REQ_OP_WRITE_SAME, /* write same block many times */
- REQ_OP_FLUSH, /* request for cache flush */
- REQ_OP_ZONE_REPORT, /* Get zone information */
- REQ_OP_ZONE_RESET, /* Reset a zone write pointer */
-};
+/* obsolete, don't use in new code */
+#define bio_set_op_attrs(bio, op, op_flags) \
+ ((bio)->bi_opf |= (op | op_flags))
-#define REQ_OP_BITS 3
+static inline bool op_is_sync(unsigned int op)
+{
+ return (op & REQ_OP_MASK) == REQ_OP_READ || (op & REQ_SYNC);
+}
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b4415feac679..8396da2bb698 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -142,7 +142,7 @@ struct request {
int cpu;
unsigned cmd_type;
- u64 cmd_flags;
+ unsigned int cmd_flags; /* op and common flags */
req_flags_t rq_flags;
unsigned long atomic_flags;
@@ -244,20 +244,6 @@ struct request {
struct request *next_rq;
};
-#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS)
-#define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT)
-
-#define req_set_op(req, op) do { \
- WARN_ON(op >= (1 << REQ_OP_BITS)); \
- (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \
- (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \
-} while (0)
-
-#define req_set_op_attrs(req, op, flags) do { \
- req_set_op(req, op); \
- (req)->cmd_flags |= flags; \
-} while (0)
-
static inline unsigned short req_get_ioprio(struct request *req)
{
return req->ioprio;
@@ -741,17 +727,9 @@ static inline unsigned int blk_queue_zone_size(struct request_queue *q)
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}
-/*
- * We regard a request as sync, if either a read or a sync write
- */
-static inline bool rw_is_sync(int op, unsigned int rw_flags)
-{
- return op == REQ_OP_READ || (rw_flags & REQ_SYNC);
-}
-
static inline bool rq_is_sync(struct request *rq)
{
- return rw_is_sync(req_op(rq), rq->cmd_flags);
+ return op_is_sync(rq->cmd_flags);
}
static inline bool blk_rl_full(struct request_list *rl, bool sync)
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index cceb72f9e29f..e417f080219a 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -118,7 +118,7 @@ static inline int blk_cmd_buf_len(struct request *rq)
}
extern void blk_dump_cmd(char *buf, struct request *rq);
-extern void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes);
+extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
index b91b023deffb..a52c6580cc9a 100644
--- a/include/linux/dm-io.h
+++ b/include/linux/dm-io.h
@@ -58,7 +58,7 @@ struct dm_io_notify {
struct dm_io_client;
struct dm_io_request {
int bi_op; /* REQ_OP */
- int bi_op_flags; /* rq_flag_bits */
+ int bi_op_flags; /* req_flag_bits */
struct dm_io_memory mem; /* Memory to use for io */
struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
struct dm_io_client *client; /* Client memory handler */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index e7f358d2e5fc..f219c9aed360 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -30,7 +30,7 @@ typedef int (elevator_dispatch_fn) (struct request_queue *, int);
typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
-typedef int (elevator_may_queue_fn) (struct request_queue *, int, int);
+typedef int (elevator_may_queue_fn) (struct request_queue *, unsigned int);
typedef void (elevator_init_icq_fn) (struct io_cq *);
typedef void (elevator_exit_icq_fn) (struct io_cq *);
@@ -139,7 +139,7 @@ extern struct request *elv_former_request(struct request_queue *, struct request
extern struct request *elv_latter_request(struct request_queue *, struct request *);
extern int elv_register_queue(struct request_queue *q);
extern void elv_unregister_queue(struct request_queue *q);
-extern int elv_may_queue(struct request_queue *, int, int);
+extern int elv_may_queue(struct request_queue *, unsigned int);
extern void elv_completed_request(struct request_queue *, struct request *);
extern int elv_set_request(struct request_queue *q, struct request *rq,
struct bio *bio, gfp_t gfp_mask);
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index d336b890e31f..df3e9ae5ad8d 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -27,8 +27,7 @@ DECLARE_EVENT_CLASS(bcache_request,
__entry->sector = bio->bi_iter.bi_sector;
__entry->orig_sector = bio->bi_iter.bi_sector - 16;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
),
TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
@@ -102,8 +101,7 @@ DECLARE_EVENT_CLASS(bcache_bio,
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
),
TP_printk("%d,%d %s %llu + %u",
@@ -138,8 +136,7 @@ TRACE_EVENT(bcache_read,
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
__entry->cache_hit = hit;
__entry->bypass = bypass;
),
@@ -170,8 +167,7 @@ TRACE_EVENT(bcache_write,
__entry->inode = inode;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
__entry->writeback = writeback;
__entry->bypass = bypass;
),
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 8f3a163b8166..3e02e3a25413 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -84,8 +84,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
0 : blk_rq_sectors(rq);
__entry->errors = rq->errors;
- blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags,
- blk_rq_bytes(rq));
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
blk_dump_cmd(__get_str(cmd), rq);
),
@@ -163,7 +162,7 @@ TRACE_EVENT(block_rq_complete,
__entry->nr_sector = nr_bytes >> 9;
__entry->errors = rq->errors;
- blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, nr_bytes);
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
blk_dump_cmd(__get_str(cmd), rq);
),
@@ -199,8 +198,7 @@ DECLARE_EVENT_CLASS(block_rq,
__entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
blk_rq_bytes(rq) : 0;
- blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags,
- blk_rq_bytes(rq));
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
blk_dump_cmd(__get_str(cmd), rq);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -274,8 +272,7 @@ TRACE_EVENT(block_bio_bounce,
bio->bi_bdev->bd_dev : 0;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -313,8 +310,7 @@ TRACE_EVENT(block_bio_complete,
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
__entry->error = error;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
),
TP_printk("%d,%d %s %llu + %u [%d]",
@@ -341,8 +337,7 @@ DECLARE_EVENT_CLASS(block_bio_merge,
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -409,8 +404,7 @@ TRACE_EVENT(block_bio_queue,
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -438,7 +432,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
__entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
__entry->sector = bio ? bio->bi_iter.bi_sector : 0;
__entry->nr_sector = bio ? bio_sectors(bio) : 0;
- blk_fill_rwbs(__entry->rwbs, bio ? bio_op(bio) : 0,
+ blk_fill_rwbs(__entry->rwbs,
bio ? bio->bi_opf : 0, __entry->nr_sector);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -573,8 +567,7 @@ TRACE_EVENT(block_split,
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->new_sector = new_sector;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -617,8 +610,7 @@ TRACE_EVENT(block_bio_remap,
__entry->nr_sector = bio_sectors(bio);
__entry->old_dev = dev;
__entry->old_sector = from;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
@@ -664,8 +656,7 @@ TRACE_EVENT(block_rq_remap,
__entry->old_dev = dev;
__entry->old_sector = from;
__entry->nr_bios = blk_rq_count_bios(rq);
- blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags,
- blk_rq_bytes(rq));
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index dbafc5df03f3..95cecbf67f5c 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1777,14 +1777,14 @@ void blk_dump_cmd(char *buf, struct request *rq)
}
}
-void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes)
+void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
{
int i = 0;
- if (rw & REQ_PREFLUSH)
+ if (op & REQ_PREFLUSH)
rwbs[i++] = 'F';
- switch (op) {
+ switch (op & REQ_OP_MASK) {
case REQ_OP_WRITE:
case REQ_OP_WRITE_SAME:
rwbs[i++] = 'W';
@@ -1806,13 +1806,13 @@ void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes)
rwbs[i++] = 'N';
}
- if (rw & REQ_FUA)
+ if (op & REQ_FUA)
rwbs[i++] = 'F';
- if (rw & REQ_RAHEAD)
+ if (op & REQ_RAHEAD)
rwbs[i++] = 'A';
- if (rw & REQ_SYNC)
+ if (op & REQ_SYNC)
rwbs[i++] = 'S';
- if (rw & REQ_META)
+ if (op & REQ_META)
rwbs[i++] = 'M';
rwbs[i] = '\0';