summaryrefslogtreecommitdiff
path: root/drivers/md/bcache/writeback.c
diff options
context:
space:
mode:
authorMichael Lyle <mlyle@lyle.org>2017-10-13 16:35:39 -0700
committerJens Axboe <axboe@kernel.dk>2017-10-16 09:07:26 -0600
commita8500fc816b19795756d27c762daa5e19f5e1b6f (patch)
tree38d73cfab43258b7d24a1f9ea7502dc87e93c425 /drivers/md/bcache/writeback.c
parente41166c5c44e30dbd620f7c77a27efe5d5cc551a (diff)
bcache: rearrange writeback main thread ratelimit
The time spent searching for things to write back "counts" for the actual rate achieved, so don't flush the accumulated rate with each chunk. This will maintain better fidelity to user-commanded rates, but it may slightly increase the burstiness of writeback. The writeback lock needs improvement to help mitigate this. Signed-off-by: Michael Lyle <mlyle@lyle.org> Reviewed-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/md/bcache/writeback.c')
-rw-r--r--drivers/md/bcache/writeback.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 897d28050656..9b770b13bdf6 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -440,6 +440,8 @@ static int bch_writeback_thread(void *arg)
struct cached_dev *dc = arg;
bool searched_full_index;
+ bch_ratelimit_reset(&dc->writeback_rate);
+
while (!kthread_should_stop()) {
down_write(&dc->writeback_lock);
if (!atomic_read(&dc->has_dirty) ||
@@ -467,7 +469,6 @@ static int bch_writeback_thread(void *arg)
up_write(&dc->writeback_lock);
- bch_ratelimit_reset(&dc->writeback_rate);
read_dirty(dc);
if (searched_full_index) {
@@ -477,6 +478,8 @@ static int bch_writeback_thread(void *arg)
!kthread_should_stop() &&
!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
delay = schedule_timeout_interruptible(delay);
+
+ bch_ratelimit_reset(&dc->writeback_rate);
}
}