summaryrefslogtreecommitdiff
path: root/block/blk-merge.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c40
1 files changed, 0 insertions, 40 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index ea6968313b4a..49d0d0da0bf2 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -153,22 +153,6 @@ static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
}
-static struct bio *blk_bio_write_same_split(struct request_queue *q,
- struct bio *bio,
- struct bio_set *bs,
- unsigned *nsegs)
-{
- *nsegs = 1;
-
- if (!q->limits.max_write_same_sectors)
- return NULL;
-
- if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
- return NULL;
-
- return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
-}
-
/*
* Return the maximum number of sectors from the start of a bio that may be
* submitted as a single request to a block device. If enough sectors remain,
@@ -352,10 +336,6 @@ void __blk_queue_split(struct request_queue *q, struct bio **bio,
split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
nr_segs);
break;
- case REQ_OP_WRITE_SAME:
- split = blk_bio_write_same_split(q, *bio, &q->bio_split,
- nr_segs);
- break;
default:
split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
break;
@@ -415,8 +395,6 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
return 1;
case REQ_OP_WRITE_ZEROES:
return 0;
- case REQ_OP_WRITE_SAME:
- return 1;
}
rq_for_each_bvec(bv, rq, iter)
@@ -554,8 +532,6 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
- else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
- nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
else if (rq->bio)
nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
@@ -762,13 +738,6 @@ static enum elv_merge blk_try_req_merge(struct request *req,
return ELEVATOR_NO_MERGE;
}
-static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
-{
- if (bio_page(a) == bio_page(b) && bio_offset(a) == bio_offset(b))
- return true;
- return false;
-}
-
/*
* For non-mq, this has to be called with the request spinlock acquired.
* For mq with scheduling, the appropriate queue wide lock should be held.
@@ -785,10 +754,6 @@ static struct request *attempt_merge(struct request_queue *q,
if (rq_data_dir(req) != rq_data_dir(next))
return NULL;
- if (req_op(req) == REQ_OP_WRITE_SAME &&
- !blk_write_same_mergeable(req->bio, next->bio))
- return NULL;
-
/*
* Don't allow merge of different write hints, or for a hint with
* non-hint IO.
@@ -921,11 +886,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (!bio_crypt_rq_ctx_compatible(rq, bio))
return false;
- /* must be using the same buffer */
- if (req_op(rq) == REQ_OP_WRITE_SAME &&
- !blk_write_same_mergeable(rq->bio, bio))
- return false;
-
/*
* Don't allow merge of different write hints, or for a hint with
* non-hint IO.