summaryrefslogtreecommitdiff
path: root/block/bfq-iosched.c
diff options
context:
space:
mode:
authorPaolo Valente <paolo.valente@linaro.org>2019-01-29 12:06:37 +0100
committerJens Axboe <axboe@kernel.dk>2019-01-31 12:50:24 -0700
commit02a6d787f4afc8be2d3d52ab0a1df0c6a2d99e7b (patch)
tree8fc56a5fc067c413ff910421c8f2d9dbfc915256 /block/bfq-iosched.c
parentb3c3498112ffafa5f613bb482f9723996bfd5e4f (diff)
block, bfq: do not overcharge writes in asymmetric scenarios
Writes tend to starve reads. bfq counters this problem by overcharging writes with an inflated service w.r.t. the actual service (number of sector written) they receive. Yet his overcharging is useless, and actually causes unfairness in the opposite direction, when bfq happens to be enforcing strong I/O control. bfq does this enforcing when the scenario is asymmetric, i.e., when some bfq_queue or group of bfq_queues is to be granted a different bandwidth than some other bfq_queue or group of bfq_queues. So, in such a scenario, this commit disables write overcharging. Signed-off-by: Paolo Valente <paolo.valente@linaro.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/bfq-iosched.c')
-rw-r--r--block/bfq-iosched.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 2ab53d93ba12..06268449d2ca 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -888,7 +888,8 @@ static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
static unsigned long bfq_serv_to_charge(struct request *rq,
struct bfq_queue *bfqq)
{
- if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
+ if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 ||
+ !bfq_symmetric_scenario(bfqq->bfqd))
return blk_rq_sectors(rq);
return blk_rq_sectors(rq) * bfq_async_charge_factor;