summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2017-05-11 08:22:31 -0400
committerMike Snitzer <snitzer@redhat.com>2017-05-14 21:54:33 -0400
commit701e03e4e180f0cd97d4139a32e2b2d879d12da2 (patch)
tree891165d167eedd13ab433a86377d9f46b2d44e97 /drivers
parent6cf4cc8f8b3b7bc9e3c04a7eab44b985d50029fc (diff)
dm cache: track all IO to the cache rather than just the origin device's IO
IO tracking used to throttle writebacks when the origin device is busy. Even if all the IO is going to the fast device, writebacks can significantly degrade performance. So track all IO to gauge whether the cache is busy or not. Otherwise, synthetic IO tests (e.g. fio) that might send all IO to the fast device wouldn't cause writebacks to get throttled. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-cache-target.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 0760ba409c21..232078e48167 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -477,7 +477,7 @@ struct cache {
spinlock_t invalidation_lock;
struct list_head invalidation_requests;
- struct io_tracker origin_tracker;
+ struct io_tracker tracker;
struct work_struct commit_ws;
struct batcher committer;
@@ -904,8 +904,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
static bool accountable_bio(struct cache *cache, struct bio *bio)
{
- return ((bio->bi_bdev == cache->origin_dev->bdev) &&
- bio_op(bio) != REQ_OP_DISCARD);
+ return bio_op(bio) != REQ_OP_DISCARD;
}
static void accounted_begin(struct cache *cache, struct bio *bio)
@@ -915,7 +914,7 @@ static void accounted_begin(struct cache *cache, struct bio *bio)
if (accountable_bio(cache, bio)) {
pb->len = bio_sectors(bio);
- iot_io_begin(&cache->origin_tracker, pb->len);
+ iot_io_begin(&cache->tracker, pb->len);
}
}
@@ -924,7 +923,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio)
size_t pb_data_size = get_per_bio_data_size(cache);
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
- iot_io_end(&cache->origin_tracker, pb->len);
+ iot_io_end(&cache->tracker, pb->len);
}
static void accounted_request(struct cache *cache, struct bio *bio)
@@ -1725,7 +1724,7 @@ enum busy {
static enum busy spare_migration_bandwidth(struct cache *cache)
{
- bool idle = iot_idle_for(&cache->origin_tracker, HZ);
+ bool idle = iot_idle_for(&cache->tracker, HZ);
sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
cache->sectors_per_block;
@@ -2720,7 +2719,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
batcher_init(&cache->committer, commit_op, cache,
issue_op, cache, cache->wq);
- iot_init(&cache->origin_tracker);
+ iot_init(&cache->tracker);
init_rwsem(&cache->background_work_lock);
prevent_background_work(cache);
@@ -2944,7 +2943,7 @@ static void cache_postsuspend(struct dm_target *ti)
cancel_delayed_work(&cache->waker);
flush_workqueue(cache->wq);
- WARN_ON(cache->origin_tracker.in_flight);
+ WARN_ON(cache->tracker.in_flight);
/*
* If it's a flush suspend there won't be any deferred bios, so this