diff options
Diffstat (limited to 'drivers/md/dm-cache-target.c')
| -rw-r--r-- | drivers/md/dm-cache-target.c | 3280 |
1 files changed, 2094 insertions, 1186 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 0df3ec085ebb..a10d75a562db 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Red Hat. All rights reserved. * @@ -5,15 +6,19 @@ */ #include "dm.h" -#include "dm-bio-prison.h" +#include "dm-bio-prison-v2.h" #include "dm-bio-record.h" #include "dm-cache-metadata.h" +#include "dm-io-tracker.h" +#include "dm-cache-background-tracker.h" #include <linux/dm-io.h> #include <linux/dm-kcopyd.h> +#include <linux/jiffies.h> #include <linux/init.h> #include <linux/mempool.h> #include <linux/module.h> +#include <linux/rwsem.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -37,51 +42,242 @@ DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle, /*----------------------------------------------------------------*/ -static size_t bitset_size_in_bytes(unsigned nr_entries) +/* + * Represents a chunk of future work. 'input' allows continuations to pass + * values between themselves, typically error values. + */ +struct continuation { + struct work_struct ws; + blk_status_t input; +}; + +static inline void init_continuation(struct continuation *k, + void (*fn)(struct work_struct *)) +{ + INIT_WORK(&k->ws, fn); + k->input = 0; +} + +static inline void queue_continuation(struct workqueue_struct *wq, + struct continuation *k) +{ + queue_work(wq, &k->ws); +} + +/*----------------------------------------------------------------*/ + +/* + * The batcher collects together pieces of work that need a particular + * operation to occur before they can proceed (typically a commit). + */ +struct batcher { + /* + * The operation that everyone is waiting for. + */ + blk_status_t (*commit_op)(void *context); + void *commit_context; + + /* + * This is how bios should be issued once the commit op is complete + * (accounted_request). + */ + void (*issue_op)(struct bio *bio, void *context); + void *issue_context; + + /* + * Queued work gets put on here after commit. + */ + struct workqueue_struct *wq; + + spinlock_t lock; + struct list_head work_items; + struct bio_list bios; + struct work_struct commit_work; + + bool commit_scheduled; +}; + +static void __commit(struct work_struct *_ws) +{ + struct batcher *b = container_of(_ws, struct batcher, commit_work); + blk_status_t r; + struct list_head work_items; + struct work_struct *ws, *tmp; + struct continuation *k; + struct bio *bio; + struct bio_list bios; + + INIT_LIST_HEAD(&work_items); + bio_list_init(&bios); + + /* + * We have to grab these before the commit_op to avoid a race + * condition. + */ + spin_lock_irq(&b->lock); + list_splice_init(&b->work_items, &work_items); + bio_list_merge_init(&bios, &b->bios); + b->commit_scheduled = false; + spin_unlock_irq(&b->lock); + + r = b->commit_op(b->commit_context); + + list_for_each_entry_safe(ws, tmp, &work_items, entry) { + k = container_of(ws, struct continuation, ws); + k->input = r; + INIT_LIST_HEAD(&ws->entry); /* to avoid a WARN_ON */ + queue_work(b->wq, ws); + } + + while ((bio = bio_list_pop(&bios))) { + if (r) { + bio->bi_status = r; + bio_endio(bio); + } else + b->issue_op(bio, b->issue_context); + } +} + +static void batcher_init(struct batcher *b, + blk_status_t (*commit_op)(void *), + void *commit_context, + void (*issue_op)(struct bio *bio, void *), + void *issue_context, + struct workqueue_struct *wq) +{ + b->commit_op = commit_op; + b->commit_context = commit_context; + b->issue_op = issue_op; + b->issue_context = issue_context; + b->wq = wq; + + spin_lock_init(&b->lock); + INIT_LIST_HEAD(&b->work_items); + bio_list_init(&b->bios); + INIT_WORK(&b->commit_work, __commit); + b->commit_scheduled = false; +} + +static void async_commit(struct batcher *b) +{ + queue_work(b->wq, &b->commit_work); +} + +static void continue_after_commit(struct batcher *b, struct continuation *k) +{ + bool commit_scheduled; + + spin_lock_irq(&b->lock); + commit_scheduled = b->commit_scheduled; + list_add_tail(&k->ws.entry, &b->work_items); + spin_unlock_irq(&b->lock); + + if (commit_scheduled) + async_commit(b); +} + +/* + * Bios are errored if commit failed. + */ +static void issue_after_commit(struct batcher *b, struct bio *bio) { - return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG); + bool commit_scheduled; + + spin_lock_irq(&b->lock); + commit_scheduled = b->commit_scheduled; + bio_list_add(&b->bios, bio); + spin_unlock_irq(&b->lock); + + if (commit_scheduled) + async_commit(b); } -static unsigned long *alloc_bitset(unsigned nr_entries) +/* + * Call this if some urgent work is waiting for the commit to complete. + */ +static void schedule_commit(struct batcher *b) { - size_t s = bitset_size_in_bytes(nr_entries); - return vzalloc(s); + bool immediate; + + spin_lock_irq(&b->lock); + immediate = !list_empty(&b->work_items) || !bio_list_empty(&b->bios); + b->commit_scheduled = true; + spin_unlock_irq(&b->lock); + + if (immediate) + async_commit(b); } -static void clear_bitset(void *bitset, unsigned nr_entries) +/* + * There are a couple of places where we let a bio run, but want to do some + * work before calling its endio function. We do this by temporarily + * changing the endio fn. + */ +struct dm_hook_info { + bio_end_io_t *bi_end_io; +}; + +static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio, + bio_end_io_t *bi_end_io, void *bi_private) { - size_t s = bitset_size_in_bytes(nr_entries); - memset(bitset, 0, s); + h->bi_end_io = bio->bi_end_io; + + bio->bi_end_io = bi_end_io; + bio->bi_private = bi_private; } -static void free_bitset(unsigned long *bits) +static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) { - vfree(bits); + bio->bi_end_io = h->bi_end_io; } /*----------------------------------------------------------------*/ -#define PRISON_CELLS 1024 #define MIGRATION_POOL_SIZE 128 #define COMMIT_PERIOD HZ #define MIGRATION_COUNT_WINDOW 10 /* - * The block size of the device holding cache data must be >= 32KB + * The block size of the device holding cache data must be + * between 32KB and 1GB. */ #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT) +#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT) -/* - * FIXME: the cache is read/write for the time being. - */ -enum cache_mode { +enum cache_metadata_mode { CM_WRITE, /* metadata may be changed */ CM_READ_ONLY, /* metadata may not be changed */ + CM_FAIL +}; + +enum cache_io_mode { + /* + * Data is written to cached blocks only. These blocks are marked + * dirty. If you lose the cache device you will lose data. + * Potential performance increase for both reads and writes. + */ + CM_IO_WRITEBACK, + + /* + * Data is written to both cache and origin. Blocks are never + * dirty. Potential performance benfit for reads only. + */ + CM_IO_WRITETHROUGH, + + /* + * A degraded mode useful for various cache coherency situations + * (eg, rolling back snapshots). Reads and writes always go to the + * origin. If a write goes to a cached oblock, then the cache + * block is invalidated. + */ + CM_IO_PASSTHROUGH }; struct cache_features { - enum cache_mode mode; - bool write_through:1; + enum cache_metadata_mode mode; + enum cache_io_mode io_mode; + unsigned int metadata_version; + bool discard_passdown:1; }; struct cache_stats { @@ -91,6 +287,7 @@ struct cache_stats { atomic_t write_miss; atomic_t demotion; atomic_t promotion; + atomic_t writeback; atomic_t copies_avoided; atomic_t cache_cell_clash; atomic_t commit_count; @@ -99,7 +296,15 @@ struct cache_stats { struct cache { struct dm_target *ti; - struct dm_target_callbacks callbacks; + spinlock_t lock; + + /* + * Fields for converting from sectors to blocks. + */ + int sectors_per_block_shift; + sector_t sectors_per_block; + + struct dm_cache_metadata *cmd; /* * Metadata is written to this device. @@ -117,11 +322,6 @@ struct cache { struct dm_dev *cache_dev; /* - * Cache features such as write-through. - */ - struct cache_features features; - - /* * Size of the origin device in _complete_ blocks and native sectors. */ dm_oblock_t origin_blocks; @@ -133,262 +333,277 @@ struct cache { dm_cblock_t cache_size; /* - * Fields for converting from sectors to blocks. + * Invalidation fields. */ - uint32_t sectors_per_block; - int sectors_per_block_shift; - - struct dm_cache_metadata *cmd; + spinlock_t invalidation_lock; + struct list_head invalidation_requests; - spinlock_t lock; - struct bio_list deferred_bios; - struct bio_list deferred_flush_bios; - struct bio_list deferred_writethrough_bios; - struct list_head quiesced_migrations; - struct list_head completed_migrations; - struct list_head need_commit_migrations; sector_t migration_threshold; - atomic_t nr_migrations; wait_queue_head_t migration_wait; + atomic_t nr_allocated_migrations; /* - * cache_size entries, dirty if set + * The number of in flight migrations that are performing + * background io. eg, promotion, writeback. */ - dm_cblock_t nr_dirty; - unsigned long *dirty_bitset; + atomic_t nr_io_migrations; + + struct bio_list deferred_bios; + + struct rw_semaphore quiesce_lock; /* * origin_blocks entries, discarded if set. */ - uint32_t discard_block_size; /* a power of 2 times sectors per block */ dm_dblock_t discard_nr_blocks; unsigned long *discard_bitset; + uint32_t discard_block_size; /* a power of 2 times sectors per block */ + + /* + * Rather than reconstructing the table line for the status we just + * save it and regurgitate. + */ + unsigned int nr_ctr_args; + const char **ctr_args; struct dm_kcopyd_client *copier; + struct work_struct deferred_bio_worker; + struct work_struct migration_worker; struct workqueue_struct *wq; - struct work_struct worker; - struct delayed_work waker; - unsigned long last_commit_jiffies; - - struct dm_bio_prison *prison; - struct dm_deferred_set *all_io_ds; + struct dm_bio_prison_v2 *prison; - mempool_t *migration_pool; - struct dm_cache_migration *next_migration; + /* + * cache_size entries, dirty if set + */ + unsigned long *dirty_bitset; + atomic_t nr_dirty; + unsigned int policy_nr_args; struct dm_cache_policy *policy; - unsigned policy_nr_args; + + /* + * Cache features such as write-through. + */ + struct cache_features features; + + struct cache_stats stats; bool need_tick_bio:1; bool sized:1; - bool quiescing:1; + bool invalidate:1; bool commit_requested:1; bool loaded_mappings:1; bool loaded_discards:1; - struct cache_stats stats; + struct rw_semaphore background_work_lock; + + struct batcher committer; + struct work_struct commit_ws; + + struct dm_io_tracker tracker; + + mempool_t migration_pool; + + struct bio_set bs; /* - * Rather than reconstructing the table line for the status we just - * save it and regurgitate. + * Cache_size entries. Set bits indicate blocks mapped beyond the + * target length, which are marked for invalidation. */ - unsigned nr_ctr_args; - const char **ctr_args; + unsigned long *invalid_bitset; }; struct per_bio_data { bool tick:1; - unsigned req_nr:2; - struct dm_deferred_entry *all_io_entry; - - /* - * writethrough fields. These MUST remain at the end of this - * structure and the 'cache' member must be the first as it - * is used to determine the offset of the writethrough fields. - */ - struct cache *cache; - dm_cblock_t cblock; - bio_end_io_t *saved_bi_end_io; - struct dm_bio_details bio_details; + unsigned int req_nr:2; + struct dm_bio_prison_cell_v2 *cell; + struct dm_hook_info hook_info; + sector_t len; }; struct dm_cache_migration { - struct list_head list; + struct continuation k; struct cache *cache; - unsigned long start_jiffies; - dm_oblock_t old_oblock; - dm_oblock_t new_oblock; - dm_cblock_t cblock; + struct policy_work *op; + struct bio *overwrite_bio; + struct dm_bio_prison_cell_v2 *cell; - bool err:1; - bool writeback:1; - bool demote:1; - bool promote:1; - - struct dm_bio_prison_cell *old_ocell; - struct dm_bio_prison_cell *new_ocell; + dm_cblock_t invalidate_cblock; + dm_oblock_t invalidate_oblock; }; -/* - * Processing a bio in the worker thread may require these memory - * allocations. We prealloc to avoid deadlocks (the same worker thread - * frees them back to the mempool). - */ -struct prealloc { - struct dm_cache_migration *mg; - struct dm_bio_prison_cell *cell1; - struct dm_bio_prison_cell *cell2; -}; +/*----------------------------------------------------------------*/ -static void wake_worker(struct cache *cache) +static bool writethrough_mode(struct cache *cache) { - queue_work(cache->wq, &cache->worker); + return cache->features.io_mode == CM_IO_WRITETHROUGH; } -/*----------------------------------------------------------------*/ +static bool writeback_mode(struct cache *cache) +{ + return cache->features.io_mode == CM_IO_WRITEBACK; +} -static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache) +static inline bool passthrough_mode(struct cache *cache) { - /* FIXME: change to use a local slab. */ - return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT); + return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH); } -static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell) +/*----------------------------------------------------------------*/ + +static void wake_deferred_bio_worker(struct cache *cache) { - dm_bio_prison_free_cell(cache->prison, cell); + queue_work(cache->wq, &cache->deferred_bio_worker); } -static int prealloc_data_structs(struct cache *cache, struct prealloc *p) +static void wake_migration_worker(struct cache *cache) { - if (!p->mg) { - p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); - if (!p->mg) - return -ENOMEM; - } + if (passthrough_mode(cache)) + return; - if (!p->cell1) { - p->cell1 = alloc_prison_cell(cache); - if (!p->cell1) - return -ENOMEM; - } + queue_work(cache->wq, &cache->migration_worker); +} - if (!p->cell2) { - p->cell2 = alloc_prison_cell(cache); - if (!p->cell2) - return -ENOMEM; - } +/*----------------------------------------------------------------*/ - return 0; +static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache) +{ + return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO); } -static void prealloc_free_structs(struct cache *cache, struct prealloc *p) +static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell) { - if (p->cell2) - free_prison_cell(cache, p->cell2); + dm_bio_prison_free_cell_v2(cache->prison, cell); +} + +static struct dm_cache_migration *alloc_migration(struct cache *cache) +{ + struct dm_cache_migration *mg; + + mg = mempool_alloc(&cache->migration_pool, GFP_NOIO); - if (p->cell1) - free_prison_cell(cache, p->cell1); + memset(mg, 0, sizeof(*mg)); - if (p->mg) - mempool_free(p->mg, cache->migration_pool); + mg->cache = cache; + atomic_inc(&cache->nr_allocated_migrations); + + return mg; } -static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p) +static void free_migration(struct dm_cache_migration *mg) { - struct dm_cache_migration *mg = p->mg; + struct cache *cache = mg->cache; - BUG_ON(!mg); - p->mg = NULL; + if (atomic_dec_and_test(&cache->nr_allocated_migrations)) + wake_up(&cache->migration_wait); - return mg; + mempool_free(mg, &cache->migration_pool); } -/* - * You must have a cell within the prealloc struct to return. If not this - * function will BUG() rather than returning NULL. - */ -static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p) +/*----------------------------------------------------------------*/ + +static inline dm_oblock_t oblock_succ(dm_oblock_t b) { - struct dm_bio_prison_cell *r = NULL; + return to_oblock(from_oblock(b) + 1ull); +} - if (p->cell1) { - r = p->cell1; - p->cell1 = NULL; +static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key_v2 *key) +{ + key->virtual = 0; + key->dev = 0; + key->block_begin = from_oblock(begin); + key->block_end = from_oblock(end); +} - } else if (p->cell2) { - r = p->cell2; - p->cell2 = NULL; - } else - BUG(); +/* + * We have two lock levels. Level 0, which is used to prevent WRITEs, and + * level 1 which prevents *both* READs and WRITEs. + */ +#define WRITE_LOCK_LEVEL 0 +#define READ_WRITE_LOCK_LEVEL 1 - return r; +static unsigned int lock_level(struct bio *bio) +{ + return bio_data_dir(bio) == WRITE ? + WRITE_LOCK_LEVEL : + READ_WRITE_LOCK_LEVEL; } /* - * You can't have more than two cells in a prealloc struct. BUG() will be - * called if you try and overfill. + *-------------------------------------------------------------- + * Per bio data + *-------------------------------------------------------------- */ -static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell) + +static struct per_bio_data *get_per_bio_data(struct bio *bio) { - if (!p->cell2) - p->cell2 = cell; + struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); - else if (!p->cell1) - p->cell1 = cell; + BUG_ON(!pb); + return pb; +} - else - BUG(); +static struct per_bio_data *init_per_bio_data(struct bio *bio) +{ + struct per_bio_data *pb = get_per_bio_data(bio); + + pb->tick = false; + pb->req_nr = dm_bio_get_target_bio_nr(bio); + pb->cell = NULL; + pb->len = 0; + + return pb; } /*----------------------------------------------------------------*/ -static void build_key(dm_oblock_t oblock, struct dm_cell_key *key) +static void defer_bio(struct cache *cache, struct bio *bio) { - key->virtual = 0; - key->dev = 0; - key->block = from_oblock(oblock); -} + spin_lock_irq(&cache->lock); + bio_list_add(&cache->deferred_bios, bio); + spin_unlock_irq(&cache->lock); -/* - * The caller hands in a preallocated cell, and a free function for it. - * The cell will be freed if there's an error, or if it wasn't used because - * a cell with that key already exists. - */ -typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell); + wake_deferred_bio_worker(cache); +} -static int bio_detain(struct cache *cache, dm_oblock_t oblock, - struct bio *bio, struct dm_bio_prison_cell *cell_prealloc, - cell_free_fn free_fn, void *free_context, - struct dm_bio_prison_cell **cell_result) +static void defer_bios(struct cache *cache, struct bio_list *bios) { - int r; - struct dm_cell_key key; - - build_key(oblock, &key); - r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result); - if (r) - free_fn(free_context, cell_prealloc); + spin_lock_irq(&cache->lock); + bio_list_merge_init(&cache->deferred_bios, bios); + spin_unlock_irq(&cache->lock); - return r; + wake_deferred_bio_worker(cache); } -static int get_cell(struct cache *cache, - dm_oblock_t oblock, - struct prealloc *structs, - struct dm_bio_prison_cell **cell_result) +/*----------------------------------------------------------------*/ + +static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio) { - int r; - struct dm_cell_key key; - struct dm_bio_prison_cell *cell_prealloc; + bool r; + struct per_bio_data *pb; + struct dm_cell_key_v2 key; + dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL); + struct dm_bio_prison_cell_v2 *cell_prealloc, *cell; - cell_prealloc = prealloc_get_cell(structs); + cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */ - build_key(oblock, &key); - r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result); - if (r) - prealloc_put_cell(structs, cell_prealloc); + build_key(oblock, end, &key); + r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell); + if (!r) { + /* + * Failed to get the lock. + */ + free_prison_cell(cache, cell_prealloc); + return r; + } + + if (cell != cell_prealloc) + free_prison_cell(cache, cell_prealloc); + + pb = get_per_bio_data(bio); + pb->cell = cell; return r; } @@ -400,22 +615,33 @@ static bool is_dirty(struct cache *cache, dm_cblock_t b) return test_bit(from_cblock(b), cache->dirty_bitset); } -static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) +static void set_dirty(struct cache *cache, dm_cblock_t cblock) { if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { - cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1); - policy_set_dirty(cache->policy, oblock); + atomic_inc(&cache->nr_dirty); + policy_set_dirty(cache->policy, cblock); } } -static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) +/* + * These two are called when setting after migrations to force the policy + * and dirty bitset to be in sync. + */ +static void force_set_dirty(struct cache *cache, dm_cblock_t cblock) +{ + if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) + atomic_inc(&cache->nr_dirty); + policy_set_dirty(cache->policy, cblock); +} + +static void force_clear_dirty(struct cache *cache, dm_cblock_t cblock) { if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { - policy_clear_dirty(cache->policy, oblock); - cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1); - if (!from_cblock(cache->nr_dirty)) + if (atomic_dec_return(&cache->nr_dirty) == 0) dm_table_event(cache->ti->table); } + + policy_clear_dirty(cache->policy, cblock); } /*----------------------------------------------------------------*/ @@ -425,10 +651,6 @@ static bool block_size_is_power_of_two(struct cache *cache) return cache->sectors_per_block_shift >= 0; } -/* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */ -#if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 -__always_inline -#endif static dm_block_t block_div(dm_block_t b, uint32_t n) { do_div(b, n); @@ -436,49 +658,48 @@ static dm_block_t block_div(dm_block_t b, uint32_t n) return b; } -static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) +static dm_block_t oblocks_per_dblock(struct cache *cache) { - uint32_t discard_blocks = cache->discard_block_size; - dm_block_t b = from_oblock(oblock); + dm_block_t oblocks = cache->discard_block_size; - if (!block_size_is_power_of_two(cache)) - discard_blocks = discard_blocks / cache->sectors_per_block; + if (block_size_is_power_of_two(cache)) + oblocks >>= cache->sectors_per_block_shift; else - discard_blocks >>= cache->sectors_per_block_shift; + oblocks = block_div(oblocks, cache->sectors_per_block); - b = block_div(b, discard_blocks); + return oblocks; +} - return to_dblock(b); +static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) +{ + return to_dblock(block_div(from_oblock(oblock), + oblocks_per_dblock(cache))); } static void set_discard(struct cache *cache, dm_dblock_t b) { - unsigned long flags; - + BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks)); atomic_inc(&cache->stats.discard_count); - spin_lock_irqsave(&cache->lock, flags); + spin_lock_irq(&cache->lock); set_bit(from_dblock(b), cache->discard_bitset); - spin_unlock_irqrestore(&cache->lock, flags); + spin_unlock_irq(&cache->lock); } static void clear_discard(struct cache *cache, dm_dblock_t b) { - unsigned long flags; - - spin_lock_irqsave(&cache->lock, flags); + spin_lock_irq(&cache->lock); clear_bit(from_dblock(b), cache->discard_bitset); - spin_unlock_irqrestore(&cache->lock, flags); + spin_unlock_irq(&cache->lock); } static bool is_discarded(struct cache *cache, dm_dblock_t b) { int r; - unsigned long flags; - spin_lock_irqsave(&cache->lock, flags); + spin_lock_irq(&cache->lock); r = test_bit(from_dblock(b), cache->discard_bitset); - spin_unlock_irqrestore(&cache->lock, flags); + spin_unlock_irq(&cache->lock); return r; } @@ -486,114 +707,60 @@ static bool is_discarded(struct cache *cache, dm_dblock_t b) static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b) { int r; - unsigned long flags; - spin_lock_irqsave(&cache->lock, flags); + spin_lock_irq(&cache->lock); r = test_bit(from_dblock(oblock_to_dblock(cache, b)), cache->discard_bitset); - spin_unlock_irqrestore(&cache->lock, flags); + spin_unlock_irq(&cache->lock); return r; } -/*----------------------------------------------------------------*/ - -static void load_stats(struct cache *cache) -{ - struct dm_cache_statistics stats; - - dm_cache_metadata_get_stats(cache->cmd, &stats); - atomic_set(&cache->stats.read_hit, stats.read_hits); - atomic_set(&cache->stats.read_miss, stats.read_misses); - atomic_set(&cache->stats.write_hit, stats.write_hits); - atomic_set(&cache->stats.write_miss, stats.write_misses); -} - -static void save_stats(struct cache *cache) -{ - struct dm_cache_statistics stats; - - stats.read_hits = atomic_read(&cache->stats.read_hit); - stats.read_misses = atomic_read(&cache->stats.read_miss); - stats.write_hits = atomic_read(&cache->stats.write_hit); - stats.write_misses = atomic_read(&cache->stats.write_miss); - - dm_cache_metadata_set_stats(cache->cmd, &stats); -} - -/*---------------------------------------------------------------- - * Per bio data - *--------------------------------------------------------------*/ - /* - * If using writeback, leave out struct per_bio_data's writethrough fields. - */ -#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache)) -#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data)) - -static size_t get_per_bio_data_size(struct cache *cache) -{ - return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; -} - -static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) -{ - struct per_bio_data *pb = dm_per_bio_data(bio, data_size); - BUG_ON(!pb); - return pb; -} - -static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size) -{ - struct per_bio_data *pb = get_per_bio_data(bio, data_size); - - pb->tick = false; - pb->req_nr = dm_bio_get_target_bio_nr(bio); - pb->all_io_entry = NULL; - - return pb; -} - -/*---------------------------------------------------------------- + * ------------------------------------------------------------- * Remapping - *--------------------------------------------------------------*/ + *-------------------------------------------------------------- + */ static void remap_to_origin(struct cache *cache, struct bio *bio) { - bio->bi_bdev = cache->origin_dev->bdev; + bio_set_dev(bio, cache->origin_dev->bdev); } static void remap_to_cache(struct cache *cache, struct bio *bio, dm_cblock_t cblock) { - sector_t bi_sector = bio->bi_sector; + sector_t bi_sector = bio->bi_iter.bi_sector; + sector_t block = from_cblock(cblock); - bio->bi_bdev = cache->cache_dev->bdev; + bio_set_dev(bio, cache->cache_dev->bdev); if (!block_size_is_power_of_two(cache)) - bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) + - sector_div(bi_sector, cache->sectors_per_block); + bio->bi_iter.bi_sector = + (block * cache->sectors_per_block) + + sector_div(bi_sector, cache->sectors_per_block); else - bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) | - (bi_sector & (cache->sectors_per_block - 1)); + bio->bi_iter.bi_sector = + (block << cache->sectors_per_block_shift) | + (bi_sector & (cache->sectors_per_block - 1)); } static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) { - unsigned long flags; - size_t pb_data_size = get_per_bio_data_size(cache); - struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); + struct per_bio_data *pb; - spin_lock_irqsave(&cache->lock, flags); - if (cache->need_tick_bio && - !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) { + spin_lock_irq(&cache->lock); + if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) && + bio_op(bio) != REQ_OP_DISCARD) { + pb = get_per_bio_data(bio); pb->tick = true; cache->need_tick_bio = false; } - spin_unlock_irqrestore(&cache->lock, flags); + spin_unlock_irq(&cache->lock); } static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, - dm_oblock_t oblock) + dm_oblock_t oblock) { + // FIXME: check_if_tick_bio_needed() is called way too much through this interface check_if_tick_bio_needed(cache, bio); remap_to_origin(cache, bio); if (bio_data_dir(bio) == WRITE) @@ -603,16 +770,17 @@ static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, dm_oblock_t oblock, dm_cblock_t cblock) { + check_if_tick_bio_needed(cache, bio); remap_to_cache(cache, bio, cblock); if (bio_data_dir(bio) == WRITE) { - set_dirty(cache, oblock, cblock); + set_dirty(cache, cblock); clear_discard(cache, oblock_to_dblock(cache, oblock)); } } static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) { - sector_t block_nr = bio->bi_sector; + sector_t block_nr = bio->bi_iter.bi_sector; if (!block_size_is_power_of_two(cache)) (void) sector_div(block_nr, cache->sectors_per_block); @@ -622,825 +790,1074 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) return to_oblock(block_nr); } -static int bio_triggers_commit(struct cache *cache, struct bio *bio) +static bool accountable_bio(struct cache *cache, struct bio *bio) { - return bio->bi_rw & (REQ_FLUSH | REQ_FUA); + return bio_op(bio) != REQ_OP_DISCARD; } -static void issue(struct cache *cache, struct bio *bio) +static void accounted_begin(struct cache *cache, struct bio *bio) { - unsigned long flags; + struct per_bio_data *pb; - if (!bio_triggers_commit(cache, bio)) { - generic_make_request(bio); - return; + if (accountable_bio(cache, bio)) { + pb = get_per_bio_data(bio); + pb->len = bio_sectors(bio); + dm_iot_io_begin(&cache->tracker, pb->len); } - - /* - * Batch together any bios that trigger commits and then issue a - * single commit for them in do_worker(). - */ - spin_lock_irqsave(&cache->lock, flags); - cache->commit_requested = true; - bio_list_add(&cache->deferred_flush_bios, bio); - spin_unlock_irqrestore(&cache->lock, flags); } -static void defer_writethrough_bio(struct cache *cache, struct bio *bio) +static void accounted_complete(struct cache *cache, struct bio *bio) { - unsigned long flags; + struct per_bio_data *pb = get_per_bio_data(bio); - spin_lock_irqsave(&cache->lock, flags); - bio_list_add(&cache->deferred_writethrough_bios, bio); - spin_unlock_irqrestore(&cache->lock, flags); - - wake_worker(cache); + dm_iot_io_end(&cache->tracker, pb->len); } -static void writethrough_endio(struct bio *bio, int err) +static void accounted_request(struct cache *cache, struct bio *bio) { - struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); - bio->bi_end_io = pb->saved_bi_end_io; - - if (err) { - bio_endio(bio, err); - return; - } + accounted_begin(cache, bio); + dm_submit_bio_remap(bio, NULL); +} - dm_bio_restore(&pb->bio_details, bio); - remap_to_cache(pb->cache, bio, pb->cblock); +static void issue_op(struct bio *bio, void *context) +{ + struct cache *cache = context; - /* - * We can't issue this bio directly, since we're in interrupt - * context. So it gets put on a bio list for processing by the - * worker thread. - */ - defer_writethrough_bio(pb->cache, bio); + accounted_request(cache, bio); } /* * When running in writethrough mode we need to send writes to clean blocks - * to both the cache and origin devices. In future we'd like to clone the - * bio and send them in parallel, but for now we're doing them in - * series as this is easier. + * to both the cache and origin devices. Clone the bio and send them in parallel. */ -static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, - dm_oblock_t oblock, dm_cblock_t cblock) +static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio, + dm_oblock_t oblock, dm_cblock_t cblock) { - struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); + struct bio *origin_bio = bio_alloc_clone(cache->origin_dev->bdev, bio, + GFP_NOIO, &cache->bs); - pb->cache = cache; - pb->cblock = cblock; - pb->saved_bi_end_io = bio->bi_end_io; - dm_bio_record(&pb->bio_details, bio); - bio->bi_end_io = writethrough_endio; + BUG_ON(!origin_bio); - remap_to_origin_clear_discard(pb->cache, bio, oblock); -} + bio_chain(origin_bio, bio); -/*---------------------------------------------------------------- - * Migration processing - * - * Migration covers moving data from the origin device to the cache, or - * vice versa. - *--------------------------------------------------------------*/ -static void free_migration(struct dm_cache_migration *mg) -{ - mempool_free(mg, mg->cache->migration_pool); -} + if (bio_data_dir(origin_bio) == WRITE) + clear_discard(cache, oblock_to_dblock(cache, oblock)); + submit_bio(origin_bio); -static void inc_nr_migrations(struct cache *cache) -{ - atomic_inc(&cache->nr_migrations); + remap_to_cache(cache, bio, cblock); } -static void dec_nr_migrations(struct cache *cache) +/* + *-------------------------------------------------------------- + * Failure modes + *-------------------------------------------------------------- + */ +static enum cache_metadata_mode get_cache_mode(struct cache *cache) { - atomic_dec(&cache->nr_migrations); - - /* - * Wake the worker in case we're suspending the target. - */ - wake_up(&cache->migration_wait); + return cache->features.mode; } -static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, - bool holder) +static const char *cache_device_name(struct cache *cache) { - (holder ? dm_cell_release : dm_cell_release_no_holder) - (cache->prison, cell, &cache->deferred_bios); - free_prison_cell(cache, cell); + return dm_table_device_name(cache->ti->table); } -static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, - bool holder) +static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode) { - unsigned long flags; - - spin_lock_irqsave(&cache->lock, flags); - __cell_defer(cache, cell, holder); - spin_unlock_irqrestore(&cache->lock, flags); + static const char *descs[] = { + "write", + "read-only", + "fail" + }; - wake_worker(cache); + dm_table_event(cache->ti->table); + DMINFO("%s: switching cache to %s mode", + cache_device_name(cache), descs[(int)mode]); } -static void cleanup_migration(struct dm_cache_migration *mg) +static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode) { - dec_nr_migrations(mg->cache); - free_migration(mg); -} + bool needs_check; + enum cache_metadata_mode old_mode = get_cache_mode(cache); -static void migration_failure(struct dm_cache_migration *mg) -{ - struct cache *cache = mg->cache; + if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) { + DMERR("%s: unable to read needs_check flag, setting failure mode.", + cache_device_name(cache)); + new_mode = CM_FAIL; + } - if (mg->writeback) { - DMWARN_LIMIT("writeback failed; couldn't copy block"); - set_dirty(cache, mg->old_oblock, mg->cblock); - cell_defer(cache, mg->old_ocell, false); + if (new_mode == CM_WRITE && needs_check) { + DMERR("%s: unable to switch cache to write mode until repaired.", + cache_device_name(cache)); + if (old_mode != new_mode) + new_mode = old_mode; + else + new_mode = CM_READ_ONLY; + } - } else if (mg->demote) { - DMWARN_LIMIT("demotion failed; couldn't copy block"); - policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock); + /* Never move out of fail mode */ + if (old_mode == CM_FAIL) + new_mode = CM_FAIL; - cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1); - if (mg->promote) - cell_defer(cache, mg->new_ocell, 1); - } else { - DMWARN_LIMIT("promotion failed; couldn't copy block"); - policy_remove_mapping(cache->policy, mg->new_oblock); - cell_defer(cache, mg->new_ocell, 1); + switch (new_mode) { + case CM_FAIL: + case CM_READ_ONLY: + dm_cache_metadata_set_read_only(cache->cmd); + break; + + case CM_WRITE: + dm_cache_metadata_set_read_write(cache->cmd); + break; } - cleanup_migration(mg); + cache->features.mode = new_mode; + + if (new_mode != old_mode) + notify_mode_switch(cache, new_mode); } -static void migration_success_pre_commit(struct dm_cache_migration *mg) +static void abort_transaction(struct cache *cache) { - unsigned long flags; - struct cache *cache = mg->cache; + const char *dev_name = cache_device_name(cache); - if (mg->writeback) { - cell_defer(cache, mg->old_ocell, false); - clear_dirty(cache, mg->old_oblock, mg->cblock); - cleanup_migration(mg); + if (get_cache_mode(cache) >= CM_READ_ONLY) return; - } else if (mg->demote) { - if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) { - DMWARN_LIMIT("demotion failed; couldn't update on disk metadata"); - policy_force_mapping(cache->policy, mg->new_oblock, - mg->old_oblock); - if (mg->promote) - cell_defer(cache, mg->new_ocell, true); - cleanup_migration(mg); - return; - } - } else { - if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) { - DMWARN_LIMIT("promotion failed; couldn't update on disk metadata"); - policy_remove_mapping(cache->policy, mg->new_oblock); - cleanup_migration(mg); - return; - } + DMERR_LIMIT("%s: aborting current metadata transaction", dev_name); + if (dm_cache_metadata_abort(cache->cmd)) { + DMERR("%s: failed to abort metadata transaction", dev_name); + set_cache_mode(cache, CM_FAIL); } - spin_lock_irqsave(&cache->lock, flags); - list_add_tail(&mg->list, &cache->need_commit_migrations); - cache->commit_requested = true; - spin_unlock_irqrestore(&cache->lock, flags); + if (dm_cache_metadata_set_needs_check(cache->cmd)) { + DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name); + set_cache_mode(cache, CM_FAIL); + } } -static void migration_success_post_commit(struct dm_cache_migration *mg) +static void metadata_operation_failed(struct cache *cache, const char *op, int r) { - unsigned long flags; - struct cache *cache = mg->cache; - - if (mg->writeback) { - DMWARN("writeback unexpectedly triggered commit"); - return; - - } else if (mg->demote) { - cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1); - - if (mg->promote) { - mg->demote = false; + DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d", + cache_device_name(cache), op, r); + abort_transaction(cache); + set_cache_mode(cache, CM_READ_ONLY); +} - spin_lock_irqsave(&cache->lock, flags); - list_add_tail(&mg->list, &cache->quiesced_migrations); - spin_unlock_irqrestore(&cache->lock, flags); +/*----------------------------------------------------------------*/ - } else - cleanup_migration(mg); +static void load_stats(struct cache *cache) +{ + struct dm_cache_statistics stats; - } else { - cell_defer(cache, mg->new_ocell, true); - clear_dirty(cache, mg->new_oblock, mg->cblock); - cleanup_migration(mg); - } + dm_cache_metadata_get_stats(cache->cmd, &stats); + atomic_set(&cache->stats.read_hit, stats.read_hits); + atomic_set(&cache->stats.read_miss, stats.read_misses); + atomic_set(&cache->stats.write_hit, stats.write_hits); + atomic_set(&cache->stats.write_miss, stats.write_misses); } -static void copy_complete(int read_err, unsigned long write_err, void *context) +static void save_stats(struct cache *cache) { - unsigned long flags; - struct dm_cache_migration *mg = (struct dm_cache_migration *) context; - struct cache *cache = mg->cache; + struct dm_cache_statistics stats; - if (read_err || write_err) - mg->err = true; + if (get_cache_mode(cache) >= CM_READ_ONLY) + return; - spin_lock_irqsave(&cache->lock, flags); - list_add_tail(&mg->list, &cache->completed_migrations); - spin_unlock_irqrestore(&cache->lock, flags); + stats.read_hits = atomic_read(&cache->stats.read_hit); + stats.read_misses = atomic_read(&cache->stats.read_miss); + stats.write_hits = atomic_read(&cache->stats.write_hit); + stats.write_misses = atomic_read(&cache->stats.write_miss); - wake_worker(cache); + dm_cache_metadata_set_stats(cache->cmd, &stats); } -static void issue_copy_real(struct dm_cache_migration *mg) +static void update_stats(struct cache_stats *stats, enum policy_operation op) { - int r; - struct dm_io_region o_region, c_region; - struct cache *cache = mg->cache; - - o_region.bdev = cache->origin_dev->bdev; - o_region.count = cache->sectors_per_block; + switch (op) { + case POLICY_PROMOTE: + atomic_inc(&stats->promotion); + break; - c_region.bdev = cache->cache_dev->bdev; - c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block; - c_region.count = cache->sectors_per_block; + case POLICY_DEMOTE: + atomic_inc(&stats->demotion); + break; - if (mg->writeback || mg->demote) { - /* demote */ - o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block; - r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg); - } else { - /* promote */ - o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block; - r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg); + case POLICY_WRITEBACK: + atomic_inc(&stats->writeback); + break; } - - if (r < 0) - migration_failure(mg); } -static void avoid_copy(struct dm_cache_migration *mg) +/* + *--------------------------------------------------------------------- + * Migration processing + * + * Migration covers moving data from the origin device to the cache, or + * vice versa. + *--------------------------------------------------------------------- + */ +static void inc_io_migrations(struct cache *cache) { - atomic_inc(&mg->cache->stats.copies_avoided); - migration_success_pre_commit(mg); + atomic_inc(&cache->nr_io_migrations); } -static void issue_copy(struct dm_cache_migration *mg) +static void dec_io_migrations(struct cache *cache) { - bool avoid; - struct cache *cache = mg->cache; - - if (mg->writeback || mg->demote) - avoid = !is_dirty(cache, mg->cblock) || - is_discarded_oblock(cache, mg->old_oblock); - else - avoid = is_discarded_oblock(cache, mg->new_oblock); - - avoid ? avoid_copy(mg) : issue_copy_real(mg); + atomic_dec(&cache->nr_io_migrations); } -static void complete_migration(struct dm_cache_migration *mg) +static bool discard_or_flush(struct bio *bio) { - if (mg->err) - migration_failure(mg); - else - migration_success_pre_commit(mg); + return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf); } -static void process_migrations(struct cache *cache, struct list_head *head, - void (*fn)(struct dm_cache_migration *)) +static void calc_discard_block_range(struct cache *cache, struct bio *bio, + dm_dblock_t *b, dm_dblock_t *e) { - unsigned long flags; - struct list_head list; - struct dm_cache_migration *mg, *tmp; + sector_t sb = bio->bi_iter.bi_sector; + sector_t se = bio_end_sector(bio); - INIT_LIST_HEAD(&list); - spin_lock_irqsave(&cache->lock, flags); - list_splice_init(head, &list); - spin_unlock_irqrestore(&cache->lock, flags); + *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size)); - list_for_each_entry_safe(mg, tmp, &list, list) - fn(mg); + if (se - sb < cache->discard_block_size) + *e = *b; + else + *e = to_dblock(block_div(se, cache->discard_block_size)); } -static void __queue_quiesced_migration(struct dm_cache_migration *mg) +/*----------------------------------------------------------------*/ + +static void prevent_background_work(struct cache *cache) { - list_add_tail(&mg->list, &mg->cache->quiesced_migrations); + lockdep_off(); + down_write(&cache->background_work_lock); + lockdep_on(); } -static void queue_quiesced_migration(struct dm_cache_migration *mg) +static void allow_background_work(struct cache *cache) { - unsigned long flags; - struct cache *cache = mg->cache; - - spin_lock_irqsave(&cache->lock, flags); - __queue_quiesced_migration(mg); - spin_unlock_irqrestore(&cache->lock, flags); - - wake_worker(cache); + lockdep_off(); + up_write(&cache->background_work_lock); + lockdep_on(); } -static void queue_quiesced_migrations(struct cache *cache, struct list_head *work) +static bool background_work_begin(struct cache *cache) { - unsigned long flags; - struct dm_cache_migration *mg, *tmp; + bool r; - spin_lock_irqsave(&cache->lock, flags); - list_for_each_entry_safe(mg, tmp, work, list) - __queue_quiesced_migration(mg); - spin_unlock_irqrestore(&cache->lock, flags); + lockdep_off(); + r = down_read_trylock(&cache->background_work_lock); + lockdep_on(); - wake_worker(cache); + return r; } -static void check_for_quiesced_migrations(struct cache *cache, - struct per_bio_data *pb) +static void background_work_end(struct cache *cache) { - struct list_head work; - - if (!pb->all_io_entry) - return; + lockdep_off(); + up_read(&cache->background_work_lock); + lockdep_on(); +} - INIT_LIST_HEAD(&work); - if (pb->all_io_entry) - dm_deferred_entry_dec(pb->all_io_entry, &work); +/*----------------------------------------------------------------*/ - if (!list_empty(&work)) - queue_quiesced_migrations(cache, &work); +static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) +{ + return (bio_data_dir(bio) == WRITE) && + (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); } -static void quiesce_migration(struct dm_cache_migration *mg) +static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block) { - if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list)) - queue_quiesced_migration(mg); + return writeback_mode(cache) && + (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio)); } -static void promote(struct cache *cache, struct prealloc *structs, - dm_oblock_t oblock, dm_cblock_t cblock, - struct dm_bio_prison_cell *cell) +static void quiesce(struct dm_cache_migration *mg, + void (*continuation)(struct work_struct *)) { - struct dm_cache_migration *mg = prealloc_get_migration(structs); + init_continuation(&mg->k, continuation); + dm_cell_quiesce_v2(mg->cache->prison, mg->cell, &mg->k.ws); +} - mg->err = false; - mg->writeback = false; - mg->demote = false; - mg->promote = true; - mg->cache = cache; - mg->new_oblock = oblock; - mg->cblock = cblock; - mg->old_ocell = NULL; - mg->new_ocell = cell; - mg->start_jiffies = jiffies; +static struct dm_cache_migration *ws_to_mg(struct work_struct *ws) +{ + struct continuation *k = container_of(ws, struct continuation, ws); - inc_nr_migrations(cache); - quiesce_migration(mg); + return container_of(k, struct dm_cache_migration, k); } -static void writeback(struct cache *cache, struct prealloc *structs, - dm_oblock_t oblock, dm_cblock_t cblock, - struct dm_bio_prison_cell *cell) +static void copy_complete(int read_err, unsigned long write_err, void *context) { - struct dm_cache_migration *mg = prealloc_get_migration(structs); + struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k); - mg->err = false; - mg->writeback = true; - mg->demote = false; - mg->promote = false; - mg->cache = cache; - mg->old_oblock = oblock; - mg->cblock = cblock; - mg->old_ocell = cell; - mg->new_ocell = NULL; - mg->start_jiffies = jiffies; + if (read_err || write_err) + mg->k.input = BLK_STS_IOERR; - inc_nr_migrations(cache); - quiesce_migration(mg); + queue_continuation(mg->cache->wq, &mg->k); } -static void demote_then_promote(struct cache *cache, struct prealloc *structs, - dm_oblock_t old_oblock, dm_oblock_t new_oblock, - dm_cblock_t cblock, - struct dm_bio_prison_cell *old_ocell, - struct dm_bio_prison_cell *new_ocell) +static void copy(struct dm_cache_migration *mg, bool promote) { - struct dm_cache_migration *mg = prealloc_get_migration(structs); + struct dm_io_region o_region, c_region; + struct cache *cache = mg->cache; - mg->err = false; - mg->writeback = false; - mg->demote = true; - mg->promote = true; - mg->cache = cache; - mg->old_oblock = old_oblock; - mg->new_oblock = new_oblock; - mg->cblock = cblock; - mg->old_ocell = old_ocell; - mg->new_ocell = new_ocell; - mg->start_jiffies = jiffies; + o_region.bdev = cache->origin_dev->bdev; + o_region.sector = from_oblock(mg->op->oblock) * cache->sectors_per_block; + o_region.count = cache->sectors_per_block; + + c_region.bdev = cache->cache_dev->bdev; + c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block; + c_region.count = cache->sectors_per_block; + + if (promote) + dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k); + else + dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k); +} - inc_nr_migrations(cache); - quiesce_migration(mg); +static void bio_drop_shared_lock(struct cache *cache, struct bio *bio) +{ + struct per_bio_data *pb = get_per_bio_data(bio); + + if (pb->cell && dm_cell_put_v2(cache->prison, pb->cell)) + free_prison_cell(cache, pb->cell); + pb->cell = NULL; } -/*---------------------------------------------------------------- - * bio processing - *--------------------------------------------------------------*/ -static void defer_bio(struct cache *cache, struct bio *bio) +static void overwrite_endio(struct bio *bio) { - unsigned long flags; + struct dm_cache_migration *mg = bio->bi_private; + struct cache *cache = mg->cache; + struct per_bio_data *pb = get_per_bio_data(bio); - spin_lock_irqsave(&cache->lock, flags); - bio_list_add(&cache->deferred_bios, bio); - spin_unlock_irqrestore(&cache->lock, flags); + dm_unhook_bio(&pb->hook_info, bio); - wake_worker(cache); + if (bio->bi_status) + mg->k.input = bio->bi_status; + + queue_continuation(cache->wq, &mg->k); } -static void process_flush_bio(struct cache *cache, struct bio *bio) +static void overwrite(struct dm_cache_migration *mg, + void (*continuation)(struct work_struct *)) { - size_t pb_data_size = get_per_bio_data_size(cache); - struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); + struct bio *bio = mg->overwrite_bio; + struct per_bio_data *pb = get_per_bio_data(bio); - BUG_ON(bio->bi_size); - if (!pb->req_nr) - remap_to_origin(cache, bio); + dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg); + + /* + * The overwrite bio is part of the copy operation, as such it does + * not set/clear discard or dirty flags. + */ + if (mg->op->op == POLICY_PROMOTE) + remap_to_cache(mg->cache, bio, mg->op->cblock); else - remap_to_cache(cache, bio, 0); + remap_to_origin(mg->cache, bio); - issue(cache, bio); + init_continuation(&mg->k, continuation); + accounted_request(mg->cache, bio); } /* - * People generally discard large parts of a device, eg, the whole device - * when formatting. Splitting these large discards up into cache block - * sized ios and then quiescing (always neccessary for discard) takes too - * long. + * Migration steps: * - * We keep it simple, and allow any size of discard to come in, and just - * mark off blocks on the discard bitset. No passdown occurs! - * - * To implement passdown we need to change the bio_prison such that a cell - * can have a key that spans many blocks. + * 1) exclusive lock preventing WRITEs + * 2) quiesce + * 3) copy or issue overwrite bio + * 4) upgrade to exclusive lock preventing READs and WRITEs + * 5) quiesce + * 6) update metadata and commit + * 7) unlock */ -static void process_discard_bio(struct cache *cache, struct bio *bio) +static void mg_complete(struct dm_cache_migration *mg, bool success) { - dm_block_t start_block = dm_sector_div_up(bio->bi_sector, - cache->discard_block_size); - dm_block_t end_block = bio->bi_sector + bio_sectors(bio); - dm_block_t b; + struct bio_list bios; + struct cache *cache = mg->cache; + struct policy_work *op = mg->op; + dm_cblock_t cblock = op->cblock; + + if (success) + update_stats(&cache->stats, op->op); + + switch (op->op) { + case POLICY_PROMOTE: + clear_discard(cache, oblock_to_dblock(cache, op->oblock)); + policy_complete_background_work(cache->policy, op, success); + + if (mg->overwrite_bio) { + if (success) + force_set_dirty(cache, cblock); + else if (mg->k.input) + mg->overwrite_bio->bi_status = mg->k.input; + else + mg->overwrite_bio->bi_status = BLK_STS_IOERR; + bio_endio(mg->overwrite_bio); + } else { + if (success) + force_clear_dirty(cache, cblock); + dec_io_migrations(cache); + } + break; + + case POLICY_DEMOTE: + /* + * We clear dirty here to update the nr_dirty counter. + */ + if (success) + force_clear_dirty(cache, cblock); + policy_complete_background_work(cache->policy, op, success); + dec_io_migrations(cache); + break; + + case POLICY_WRITEBACK: + if (success) + force_clear_dirty(cache, cblock); + policy_complete_background_work(cache->policy, op, success); + dec_io_migrations(cache); + break; + } - end_block = block_div(end_block, cache->discard_block_size); + bio_list_init(&bios); + if (mg->cell) { + if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios)) + free_prison_cell(cache, mg->cell); + } - for (b = start_block; b < end_block; b++) - set_discard(cache, to_dblock(b)); + free_migration(mg); + defer_bios(cache, &bios); + wake_migration_worker(cache); - bio_endio(bio, 0); + background_work_end(cache); } -static bool spare_migration_bandwidth(struct cache *cache) +static void mg_success(struct work_struct *ws) { - sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) * - cache->sectors_per_block; - return current_volume < cache->migration_threshold; -} + struct dm_cache_migration *mg = ws_to_mg(ws); -static bool is_writethrough_io(struct cache *cache, struct bio *bio, - dm_cblock_t cblock) -{ - return bio_data_dir(bio) == WRITE && - cache->features.write_through && !is_dirty(cache, cblock); + mg_complete(mg, mg->k.input == 0); } -static void inc_hit_counter(struct cache *cache, struct bio *bio) +static void mg_update_metadata(struct work_struct *ws) { - atomic_inc(bio_data_dir(bio) == READ ? - &cache->stats.read_hit : &cache->stats.write_hit); + int r; + struct dm_cache_migration *mg = ws_to_mg(ws); + struct cache *cache = mg->cache; + struct policy_work *op = mg->op; + + switch (op->op) { + case POLICY_PROMOTE: + r = dm_cache_insert_mapping(cache->cmd, op->cblock, op->oblock); + if (r) { + DMERR_LIMIT("%s: migration failed; couldn't insert mapping", + cache_device_name(cache)); + metadata_operation_failed(cache, "dm_cache_insert_mapping", r); + + mg_complete(mg, false); + return; + } + mg_complete(mg, true); + break; + + case POLICY_DEMOTE: + r = dm_cache_remove_mapping(cache->cmd, op->cblock); + if (r) { + DMERR_LIMIT("%s: migration failed; couldn't update on disk metadata", + cache_device_name(cache)); + metadata_operation_failed(cache, "dm_cache_remove_mapping", r); + + mg_complete(mg, false); + return; + } + + /* + * It would be nice if we only had to commit when a REQ_FLUSH + * comes through. But there's one scenario that we have to + * look out for: + * + * - vblock x in a cache block + * - domotion occurs + * - cache block gets reallocated and over written + * - crash + * + * When we recover, because there was no commit the cache will + * rollback to having the data for vblock x in the cache block. + * But the cache block has since been overwritten, so it'll end + * up pointing to data that was never in 'x' during the history + * of the device. + * + * To avoid this issue we require a commit as part of the + * demotion operation. + */ + init_continuation(&mg->k, mg_success); + continue_after_commit(&cache->committer, &mg->k); + schedule_commit(&cache->committer); + break; + + case POLICY_WRITEBACK: + mg_complete(mg, true); + break; + } } -static void inc_miss_counter(struct cache *cache, struct bio *bio) +static void mg_update_metadata_after_copy(struct work_struct *ws) { - atomic_inc(bio_data_dir(bio) == READ ? - &cache->stats.read_miss : &cache->stats.write_miss); + struct dm_cache_migration *mg = ws_to_mg(ws); + + /* + * Did the copy succeed? + */ + if (mg->k.input) + mg_complete(mg, false); + else + mg_update_metadata(ws); } -static void process_bio(struct cache *cache, struct prealloc *structs, - struct bio *bio) +static void mg_upgrade_lock(struct work_struct *ws) { int r; - bool release_cell = true; - dm_oblock_t block = get_bio_block(cache, bio); - struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; - struct policy_result lookup_result; - size_t pb_data_size = get_per_bio_data_size(cache); - struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); - bool discarded_block = is_discarded_oblock(cache, block); - bool can_migrate = discarded_block || spare_migration_bandwidth(cache); + struct dm_cache_migration *mg = ws_to_mg(ws); /* - * Check to see if that block is currently migrating. + * Did the copy succeed? */ - cell_prealloc = prealloc_get_cell(structs); - r = bio_detain(cache, block, bio, cell_prealloc, - (cell_free_fn) prealloc_put_cell, - structs, &new_ocell); - if (r > 0) - return; - - r = policy_map(cache->policy, block, true, can_migrate, discarded_block, - bio, &lookup_result); + if (mg->k.input) + mg_complete(mg, false); - if (r == -EWOULDBLOCK) - /* migration has been denied */ - lookup_result.op = POLICY_MISS; + else { + /* + * Now we want the lock to prevent both reads and writes. + */ + r = dm_cell_lock_promote_v2(mg->cache->prison, mg->cell, + READ_WRITE_LOCK_LEVEL); + if (r < 0) + mg_complete(mg, false); - switch (lookup_result.op) { - case POLICY_HIT: - inc_hit_counter(cache, bio); - pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); + else if (r) + quiesce(mg, mg_update_metadata); - if (is_writethrough_io(cache, bio, lookup_result.cblock)) - remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); else - remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); + mg_update_metadata(ws); + } +} - issue(cache, bio); - break; +static void mg_full_copy(struct work_struct *ws) +{ + struct dm_cache_migration *mg = ws_to_mg(ws); + struct cache *cache = mg->cache; + struct policy_work *op = mg->op; + bool is_policy_promote = (op->op == POLICY_PROMOTE); - case POLICY_MISS: - inc_miss_counter(cache, bio); - pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); - remap_to_origin_clear_discard(cache, bio, block); - issue(cache, bio); - break; + if ((!is_policy_promote && !is_dirty(cache, op->cblock)) || + is_discarded_oblock(cache, op->oblock)) { + mg_upgrade_lock(ws); + return; + } - case POLICY_NEW: - atomic_inc(&cache->stats.promotion); - promote(cache, structs, block, lookup_result.cblock, new_ocell); - release_cell = false; - break; + init_continuation(&mg->k, mg_upgrade_lock); + copy(mg, is_policy_promote); +} - case POLICY_REPLACE: - cell_prealloc = prealloc_get_cell(structs); - r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc, - (cell_free_fn) prealloc_put_cell, - structs, &old_ocell); - if (r > 0) { +static void mg_copy(struct work_struct *ws) +{ + struct dm_cache_migration *mg = ws_to_mg(ws); + + if (mg->overwrite_bio) { + /* + * No exclusive lock was held when we last checked if the bio + * was optimisable. So we have to check again in case things + * have changed (eg, the block may no longer be discarded). + */ + if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) { /* - * We have to be careful to avoid lock inversion of - * the cells. So we back off, and wait for the - * old_ocell to become free. + * Fallback to a real full copy after doing some tidying up. */ - policy_force_mapping(cache->policy, block, - lookup_result.old_oblock); - atomic_inc(&cache->stats.cache_cell_clash); - break; - } - atomic_inc(&cache->stats.demotion); - atomic_inc(&cache->stats.promotion); + bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio); - demote_then_promote(cache, structs, lookup_result.old_oblock, - block, lookup_result.cblock, - old_ocell, new_ocell); - release_cell = false; - break; + BUG_ON(rb); /* An exclusive lock must _not_ be held for this block */ + mg->overwrite_bio = NULL; + inc_io_migrations(mg->cache); + mg_full_copy(ws); + return; + } - default: - DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__, - (unsigned) lookup_result.op); - bio_io_error(bio); - } + /* + * It's safe to do this here, even though it's new data + * because all IO has been locked out of the block. + * + * mg_lock_writes() already took READ_WRITE_LOCK_LEVEL + * so _not_ using mg_upgrade_lock() as continutation. + */ + overwrite(mg, mg_update_metadata_after_copy); - if (release_cell) - cell_defer(cache, new_ocell, false); + } else + mg_full_copy(ws); } -static int need_commit_due_to_time(struct cache *cache) +static int mg_lock_writes(struct dm_cache_migration *mg) { - return jiffies < cache->last_commit_jiffies || - jiffies > cache->last_commit_jiffies + COMMIT_PERIOD; + int r; + struct dm_cell_key_v2 key; + struct cache *cache = mg->cache; + struct dm_bio_prison_cell_v2 *prealloc; + + prealloc = alloc_prison_cell(cache); + + /* + * Prevent writes to the block, but allow reads to continue. + * Unless we're using an overwrite bio, in which case we lock + * everything. + */ + build_key(mg->op->oblock, oblock_succ(mg->op->oblock), &key); + r = dm_cell_lock_v2(cache->prison, &key, + mg->overwrite_bio ? READ_WRITE_LOCK_LEVEL : WRITE_LOCK_LEVEL, + prealloc, &mg->cell); + if (r < 0) { + free_prison_cell(cache, prealloc); + mg_complete(mg, false); + return r; + } + + if (mg->cell != prealloc) + free_prison_cell(cache, prealloc); + + if (r == 0) + mg_copy(&mg->k.ws); + else + quiesce(mg, mg_copy); + + return 0; } -static int commit_if_needed(struct cache *cache) +static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio) { - if (dm_cache_changed_this_transaction(cache->cmd) && - (cache->commit_requested || need_commit_due_to_time(cache))) { - atomic_inc(&cache->stats.commit_count); - cache->last_commit_jiffies = jiffies; - cache->commit_requested = false; - return dm_cache_commit(cache->cmd, false); + struct dm_cache_migration *mg; + + if (!background_work_begin(cache)) { + policy_complete_background_work(cache->policy, op, false); + return -EPERM; } - return 0; + mg = alloc_migration(cache); + + mg->op = op; + mg->overwrite_bio = bio; + + if (!bio) + inc_io_migrations(cache); + + return mg_lock_writes(mg); } -static void process_deferred_bios(struct cache *cache) +/* + *-------------------------------------------------------------- + * invalidation processing + *-------------------------------------------------------------- + */ + +static void invalidate_complete(struct dm_cache_migration *mg, bool success) { - unsigned long flags; struct bio_list bios; - struct bio *bio; - struct prealloc structs; + struct cache *cache = mg->cache; - memset(&structs, 0, sizeof(structs)); bio_list_init(&bios); + if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios)) + free_prison_cell(cache, mg->cell); - spin_lock_irqsave(&cache->lock, flags); - bio_list_merge(&bios, &cache->deferred_bios); - bio_list_init(&cache->deferred_bios); - spin_unlock_irqrestore(&cache->lock, flags); + if (!success && mg->overwrite_bio) + bio_io_error(mg->overwrite_bio); + + free_migration(mg); + defer_bios(cache, &bios); + + background_work_end(cache); +} - while (!bio_list_empty(&bios)) { +static void invalidate_completed(struct work_struct *ws) +{ + struct dm_cache_migration *mg = ws_to_mg(ws); + + invalidate_complete(mg, !mg->k.input); +} + +static int invalidate_cblock(struct cache *cache, dm_cblock_t cblock) +{ + int r; + + r = policy_invalidate_mapping(cache->policy, cblock); + if (!r) { + r = dm_cache_remove_mapping(cache->cmd, cblock); + if (r) { + DMERR_LIMIT("%s: invalidation failed; couldn't update on disk metadata", + cache_device_name(cache)); + metadata_operation_failed(cache, "dm_cache_remove_mapping", r); + } + + } else if (r == -ENODATA) { /* - * If we've got no free migration structs, and processing - * this bio might require one, we pause until there are some - * prepared mappings to process. + * Harmless, already unmapped. */ - if (prealloc_data_structs(cache, &structs)) { - spin_lock_irqsave(&cache->lock, flags); - bio_list_merge(&cache->deferred_bios, &bios); - spin_unlock_irqrestore(&cache->lock, flags); - break; - } + r = 0; + + } else + DMERR("%s: policy_invalidate_mapping failed", cache_device_name(cache)); - bio = bio_list_pop(&bios); + return r; +} - if (bio->bi_rw & REQ_FLUSH) - process_flush_bio(cache, bio); - else if (bio->bi_rw & REQ_DISCARD) - process_discard_bio(cache, bio); - else - process_bio(cache, &structs, bio); +static void invalidate_remove(struct work_struct *ws) +{ + int r; + struct dm_cache_migration *mg = ws_to_mg(ws); + struct cache *cache = mg->cache; + + r = invalidate_cblock(cache, mg->invalidate_cblock); + if (r) { + invalidate_complete(mg, false); + return; } - prealloc_free_structs(cache, &structs); + init_continuation(&mg->k, invalidate_completed); + continue_after_commit(&cache->committer, &mg->k); + remap_to_origin_clear_discard(cache, mg->overwrite_bio, mg->invalidate_oblock); + mg->overwrite_bio = NULL; + schedule_commit(&cache->committer); } -static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) +static int invalidate_lock(struct dm_cache_migration *mg) { - unsigned long flags; - struct bio_list bios; - struct bio *bio; + int r; + struct dm_cell_key_v2 key; + struct cache *cache = mg->cache; + struct dm_bio_prison_cell_v2 *prealloc; - bio_list_init(&bios); + prealloc = alloc_prison_cell(cache); + + build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key); + r = dm_cell_lock_v2(cache->prison, &key, + READ_WRITE_LOCK_LEVEL, prealloc, &mg->cell); + if (r < 0) { + free_prison_cell(cache, prealloc); + invalidate_complete(mg, false); + return r; + } + + if (mg->cell != prealloc) + free_prison_cell(cache, prealloc); + + if (r) + quiesce(mg, invalidate_remove); - spin_lock_irqsave(&cache->lock, flags); - bio_list_merge(&bios, &cache->deferred_flush_bios); - bio_list_init(&cache->deferred_flush_bios); - spin_unlock_irqrestore(&cache->lock, flags); + else { + /* + * We can't call invalidate_remove() directly here because we + * might still be in request context. + */ + init_continuation(&mg->k, invalidate_remove); + queue_work(cache->wq, &mg->k.ws); + } - while ((bio = bio_list_pop(&bios))) - submit_bios ? generic_make_request(bio) : bio_io_error(bio); + return 0; } -static void process_deferred_writethrough_bios(struct cache *cache) +static int invalidate_start(struct cache *cache, dm_cblock_t cblock, + dm_oblock_t oblock, struct bio *bio) { - unsigned long flags; - struct bio_list bios; - struct bio *bio; + struct dm_cache_migration *mg; - bio_list_init(&bios); + if (!background_work_begin(cache)) + return -EPERM; - spin_lock_irqsave(&cache->lock, flags); - bio_list_merge(&bios, &cache->deferred_writethrough_bios); - bio_list_init(&cache->deferred_writethrough_bios); - spin_unlock_irqrestore(&cache->lock, flags); + mg = alloc_migration(cache); - while ((bio = bio_list_pop(&bios))) - generic_make_request(bio); + mg->overwrite_bio = bio; + mg->invalidate_cblock = cblock; + mg->invalidate_oblock = oblock; + + return invalidate_lock(mg); } -static void writeback_some_dirty_blocks(struct cache *cache) +/* + *-------------------------------------------------------------- + * bio processing + *-------------------------------------------------------------- + */ + +enum busy { + IDLE, + BUSY +}; + +static enum busy spare_migration_bandwidth(struct cache *cache) { - int r = 0; - dm_oblock_t oblock; + bool idle = dm_iot_idle_for(&cache->tracker, HZ); + sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * + cache->sectors_per_block; + + if (idle && current_volume <= cache->migration_threshold) + return IDLE; + else + return BUSY; +} + +static void inc_hit_counter(struct cache *cache, struct bio *bio) +{ + atomic_inc(bio_data_dir(bio) == READ ? + &cache->stats.read_hit : &cache->stats.write_hit); +} + +static void inc_miss_counter(struct cache *cache, struct bio *bio) +{ + atomic_inc(bio_data_dir(bio) == READ ? + &cache->stats.read_miss : &cache->stats.write_miss); +} + +/*----------------------------------------------------------------*/ + +static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block, + bool *commit_needed) +{ + int r, data_dir; + bool rb, background_queued; dm_cblock_t cblock; - struct prealloc structs; - struct dm_bio_prison_cell *old_ocell; - memset(&structs, 0, sizeof(structs)); + *commit_needed = false; - while (spare_migration_bandwidth(cache)) { - if (prealloc_data_structs(cache, &structs)) - break; + rb = bio_detain_shared(cache, block, bio); + if (!rb) { + /* + * An exclusive lock is held for this block, so we have to + * wait. We set the commit_needed flag so the current + * transaction will be committed asap, allowing this lock + * to be dropped. + */ + *commit_needed = true; + return DM_MAPIO_SUBMITTED; + } - r = policy_writeback_work(cache->policy, &oblock, &cblock); - if (r) - break; + data_dir = bio_data_dir(bio); - r = get_cell(cache, oblock, &structs, &old_ocell); - if (r) { - policy_set_dirty(cache->policy, oblock); - break; + if (optimisable_bio(cache, bio, block)) { + struct policy_work *op = NULL; + + r = policy_lookup_with_work(cache->policy, block, &cblock, data_dir, true, &op); + if (unlikely(r && r != -ENOENT)) { + DMERR_LIMIT("%s: policy_lookup_with_work() failed with r = %d", + cache_device_name(cache), r); + bio_io_error(bio); + return DM_MAPIO_SUBMITTED; + } + + if (r == -ENOENT && op) { + bio_drop_shared_lock(cache, bio); + BUG_ON(op->op != POLICY_PROMOTE); + mg_start(cache, op, bio); + return DM_MAPIO_SUBMITTED; + } + } else { + r = policy_lookup(cache->policy, block, &cblock, data_dir, false, &background_queued); + if (unlikely(r && r != -ENOENT)) { + DMERR_LIMIT("%s: policy_lookup() failed with r = %d", + cache_device_name(cache), r); + bio_io_error(bio); + return DM_MAPIO_SUBMITTED; } - writeback(cache, &structs, oblock, cblock, old_ocell); + if (background_queued) + wake_migration_worker(cache); } - prealloc_free_structs(cache, &structs); -} + if (r == -ENOENT) { + struct per_bio_data *pb = get_per_bio_data(bio); -/*---------------------------------------------------------------- - * Main worker loop - *--------------------------------------------------------------*/ -static void start_quiescing(struct cache *cache) -{ - unsigned long flags; + /* + * Miss. + */ + inc_miss_counter(cache, bio); + if (pb->req_nr == 0) { + accounted_begin(cache, bio); + remap_to_origin_clear_discard(cache, bio, block); + } else { + /* + * This is a duplicate writethrough io that is no + * longer needed because the block has been demoted. + */ + bio_endio(bio); + return DM_MAPIO_SUBMITTED; + } + } else { + /* + * Hit. + */ + inc_hit_counter(cache, bio); + + /* + * Passthrough always maps to the origin, invalidating any + * cache blocks that are written to. + */ + if (passthrough_mode(cache)) { + if (bio_data_dir(bio) == WRITE) { + bio_drop_shared_lock(cache, bio); + atomic_inc(&cache->stats.demotion); + invalidate_start(cache, cblock, block, bio); + } else + remap_to_origin_clear_discard(cache, bio, block); + } else { + if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) && + !is_dirty(cache, cblock)) { + remap_to_origin_and_cache(cache, bio, block, cblock); + accounted_begin(cache, bio); + } else + remap_to_cache_dirty(cache, bio, block, cblock); + } + } + + /* + * dm core turns FUA requests into a separate payload and FLUSH req. + */ + if (bio->bi_opf & REQ_FUA) { + /* + * issue_after_commit will call accounted_begin a second time. So + * we call accounted_complete() to avoid double accounting. + */ + accounted_complete(cache, bio); + issue_after_commit(&cache->committer, bio); + *commit_needed = true; + return DM_MAPIO_SUBMITTED; + } - spin_lock_irqsave(&cache->lock, flags); - cache->quiescing = 1; - spin_unlock_irqrestore(&cache->lock, flags); + return DM_MAPIO_REMAPPED; } -static void stop_quiescing(struct cache *cache) +static bool process_bio(struct cache *cache, struct bio *bio) { - unsigned long flags; + bool commit_needed; - spin_lock_irqsave(&cache->lock, flags); - cache->quiescing = 0; - spin_unlock_irqrestore(&cache->lock, flags); + if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED) + dm_submit_bio_remap(bio, NULL); + + return commit_needed; } -static bool is_quiescing(struct cache *cache) +/* + * A non-zero return indicates read_only or fail_io mode. + */ +static int commit(struct cache *cache, bool clean_shutdown) { int r; - unsigned long flags; - spin_lock_irqsave(&cache->lock, flags); - r = cache->quiescing; - spin_unlock_irqrestore(&cache->lock, flags); + if (get_cache_mode(cache) >= CM_READ_ONLY) + return -EINVAL; + + atomic_inc(&cache->stats.commit_count); + r = dm_cache_commit(cache->cmd, clean_shutdown); + if (r) + metadata_operation_failed(cache, "dm_cache_commit", r); return r; } -static void wait_for_migrations(struct cache *cache) +/* + * Used by the batcher. + */ +static blk_status_t commit_op(void *context) { - wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations)); -} + struct cache *cache = context; -static void stop_worker(struct cache *cache) -{ - cancel_delayed_work(&cache->waker); - flush_workqueue(cache->wq); + if (dm_cache_changed_this_transaction(cache->cmd)) + return errno_to_blk_status(commit(cache, false)); + + return 0; } -static void requeue_deferred_io(struct cache *cache) +/*----------------------------------------------------------------*/ + +static bool process_flush_bio(struct cache *cache, struct bio *bio) { - struct bio *bio; - struct bio_list bios; + struct per_bio_data *pb = get_per_bio_data(bio); - bio_list_init(&bios); - bio_list_merge(&bios, &cache->deferred_bios); - bio_list_init(&cache->deferred_bios); + if (!pb->req_nr) + remap_to_origin(cache, bio); + else + remap_to_cache(cache, bio, 0); - while ((bio = bio_list_pop(&bios))) - bio_endio(bio, DM_ENDIO_REQUEUE); + issue_after_commit(&cache->committer, bio); + return true; } -static int more_work(struct cache *cache) +static bool process_discard_bio(struct cache *cache, struct bio *bio) { - if (is_quiescing(cache)) - return !list_empty(&cache->quiesced_migrations) || - !list_empty(&cache->completed_migrations) || - !list_empty(&cache->need_commit_migrations); - else - return !bio_list_empty(&cache->deferred_bios) || - !bio_list_empty(&cache->deferred_flush_bios) || - !bio_list_empty(&cache->deferred_writethrough_bios) || - !list_empty(&cache->quiesced_migrations) || - !list_empty(&cache->completed_migrations) || - !list_empty(&cache->need_commit_migrations); + dm_dblock_t b, e; + + /* + * FIXME: do we need to lock the region? Or can we just assume the + * user wont be so foolish as to issue discard concurrently with + * other IO? + */ + calc_discard_block_range(cache, bio, &b, &e); + while (b != e) { + set_discard(cache, b); + b = to_dblock(from_dblock(b) + 1); + } + + if (cache->features.discard_passdown) { + remap_to_origin(cache, bio); + dm_submit_bio_remap(bio, NULL); + } else + bio_endio(bio); + + return false; } -static void do_worker(struct work_struct *ws) +static void process_deferred_bios(struct work_struct *ws) { - struct cache *cache = container_of(ws, struct cache, worker); + struct cache *cache = container_of(ws, struct cache, deferred_bio_worker); - do { - if (!is_quiescing(cache)) - process_deferred_bios(cache); + bool commit_needed = false; + struct bio_list bios; + struct bio *bio; - process_migrations(cache, &cache->quiesced_migrations, issue_copy); - process_migrations(cache, &cache->completed_migrations, complete_migration); + bio_list_init(&bios); - writeback_some_dirty_blocks(cache); + spin_lock_irq(&cache->lock); + bio_list_merge_init(&bios, &cache->deferred_bios); + spin_unlock_irq(&cache->lock); - process_deferred_writethrough_bios(cache); + while ((bio = bio_list_pop(&bios))) { + if (bio->bi_opf & REQ_PREFLUSH) + commit_needed = process_flush_bio(cache, bio) || commit_needed; - if (commit_if_needed(cache)) { - process_deferred_flush_bios(cache, false); + else if (bio_op(bio) == REQ_OP_DISCARD) + commit_needed = process_discard_bio(cache, bio) || commit_needed; - /* - * FIXME: rollback metadata or just go into a - * failure mode and error everything - */ - } else { - process_deferred_flush_bios(cache, true); - process_migrations(cache, &cache->need_commit_migrations, - migration_success_post_commit); - } - } while (more_work(cache)); + else + commit_needed = process_bio(cache, bio) || commit_needed; + cond_resched(); + } + + if (commit_needed) + schedule_commit(&cache->committer); +} + +/* + *-------------------------------------------------------------- + * Main worker loop + *-------------------------------------------------------------- + */ +static void requeue_deferred_bios(struct cache *cache) +{ + struct bio *bio; + struct bio_list bios; + + bio_list_init(&bios); + bio_list_merge_init(&bios, &cache->deferred_bios); + + while ((bio = bio_list_pop(&bios))) { + bio->bi_status = BLK_STS_DM_REQUEUE; + bio_endio(bio); + cond_resched(); + } } /* @@ -1450,50 +1867,57 @@ static void do_worker(struct work_struct *ws) static void do_waker(struct work_struct *ws) { struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); - policy_tick(cache->policy); - wake_worker(cache); + + policy_tick(cache->policy, true); + wake_migration_worker(cache); + schedule_commit(&cache->committer); queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); } -/*----------------------------------------------------------------*/ - -static int is_congested(struct dm_dev *dev, int bdi_bits) +static void check_migrations(struct work_struct *ws) { - struct request_queue *q = bdev_get_queue(dev->bdev); - return bdi_congested(&q->backing_dev_info, bdi_bits); -} + int r; + struct policy_work *op; + struct cache *cache = container_of(ws, struct cache, migration_worker); + enum busy b; -static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) -{ - struct cache *cache = container_of(cb, struct cache, callbacks); + for (;;) { + b = spare_migration_bandwidth(cache); + + r = policy_get_background_work(cache->policy, b == IDLE, &op); + if (r == -ENODATA) + break; + + if (r) { + DMERR_LIMIT("%s: policy_background_work failed", + cache_device_name(cache)); + break; + } - return is_congested(cache->origin_dev, bdi_bits) || - is_congested(cache->cache_dev, bdi_bits); + r = mg_start(cache, op, NULL); + if (r) + break; + + cond_resched(); + } } -/*---------------------------------------------------------------- +/* + *-------------------------------------------------------------- * Target methods - *--------------------------------------------------------------*/ + *-------------------------------------------------------------- + */ /* * This function gets called on the error paths of the constructor, so we * have to cope with a partially initialised struct. */ -static void destroy(struct cache *cache) +static void __destroy(struct cache *cache) { - unsigned i; - - if (cache->next_migration) - mempool_free(cache->next_migration, cache->migration_pool); - - if (cache->migration_pool) - mempool_destroy(cache->migration_pool); - - if (cache->all_io_ds) - dm_deferred_set_destroy(cache->all_io_ds); + mempool_exit(&cache->migration_pool); if (cache->prison) - dm_bio_prison_destroy(cache->prison); + dm_bio_prison_destroy_v2(cache->prison); if (cache->wq) destroy_workqueue(cache->wq); @@ -1504,6 +1928,9 @@ static void destroy(struct cache *cache) if (cache->discard_bitset) free_bitset(cache->discard_bitset); + if (cache->invalid_bitset) + free_bitset(cache->invalid_bitset); + if (cache->copier) dm_kcopyd_client_destroy(cache->copier); @@ -1522,11 +1949,22 @@ static void destroy(struct cache *cache) if (cache->policy) dm_cache_policy_destroy(cache->policy); + bioset_exit(&cache->bs); + + kfree(cache); +} + +static void destroy(struct cache *cache) +{ + unsigned int i; + + cancel_delayed_work_sync(&cache->waker); + for (i = 0; i < cache->nr_ctr_args ; i++) kfree(cache->ctr_args[i]); kfree(cache->ctr_args); - kfree(cache); + __destroy(cache); } static void cache_dtr(struct dm_target *ti) @@ -1538,7 +1976,7 @@ static void cache_dtr(struct dm_target *ti) static sector_t get_dev_size(struct dm_dev *dev) { - return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; + return bdev_nr_sectors(dev->bdev); } /*----------------------------------------------------------------*/ @@ -1581,7 +2019,6 @@ struct cache_args { sector_t cache_sectors; struct dm_dev *origin_dev; - sector_t origin_sectors; uint32_t block_size; @@ -1621,13 +2058,12 @@ static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as, { int r; sector_t metadata_dev_size; - char b[BDEVNAME_SIZE]; if (!at_least_one_arg(as, error)) return -EINVAL; - r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, - &ca->metadata_dev); + r = dm_get_device(ca->ti, dm_shift_arg(as), + BLK_OPEN_READ | BLK_OPEN_WRITE, &ca->metadata_dev); if (r) { *error = "Error opening metadata device"; return r; @@ -1635,8 +2071,8 @@ static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as, metadata_dev_size = get_dev_size(ca->metadata_dev); if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING) - DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", - bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS); + DMWARN("Metadata device %pg is larger than %u sectors: excess space will not be used.", + ca->metadata_dev->bdev, THIN_METADATA_MAX_SECTORS); return 0; } @@ -1649,8 +2085,8 @@ static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as, if (!at_least_one_arg(as, error)) return -EINVAL; - r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, - &ca->cache_dev); + r = dm_get_device(ca->ti, dm_shift_arg(as), + BLK_OPEN_READ | BLK_OPEN_WRITE, &ca->cache_dev); if (r) { *error = "Error opening cache device"; return r; @@ -1663,20 +2099,21 @@ static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as, static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as, char **error) { + sector_t origin_sectors; int r; if (!at_least_one_arg(as, error)) return -EINVAL; - r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, - &ca->origin_dev); + r = dm_get_device(ca->ti, dm_shift_arg(as), + BLK_OPEN_READ | BLK_OPEN_WRITE, &ca->origin_dev); if (r) { *error = "Error opening origin device"; return r; } - ca->origin_sectors = get_dev_size(ca->origin_dev); - if (ca->ti->len > ca->origin_sectors) { + origin_sectors = get_dev_size(ca->origin_dev); + if (ca->ti->len > origin_sectors) { *error = "Device size larger than cached device"; return -EINVAL; } @@ -1687,24 +2124,25 @@ static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as, static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as, char **error) { - unsigned long tmp; + unsigned long block_size; if (!at_least_one_arg(as, error)) return -EINVAL; - if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp || - tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS || - tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) { + if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size || + block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS || + block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS || + block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) { *error = "Invalid data block size"; return -EINVAL; } - if (tmp > ca->cache_sectors) { + if (block_size > ca->cache_sectors) { *error = "Data block size is larger than the cache device"; return -EINVAL; } - ca->block_size = tmp; + ca->block_size = block_size; return 0; } @@ -1712,18 +2150,20 @@ static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as, static void init_features(struct cache_features *cf) { cf->mode = CM_WRITE; - cf->write_through = false; + cf->io_mode = CM_IO_WRITEBACK; + cf->metadata_version = 1; + cf->discard_passdown = true; } static int parse_features(struct cache_args *ca, struct dm_arg_set *as, char **error) { - static struct dm_arg _args[] = { - {0, 1, "Invalid number of cache feature arguments"}, + static const struct dm_arg _args[] = { + {0, 3, "Invalid number of cache feature arguments"}, }; - int r; - unsigned argc; + int r, mode_ctr = 0; + unsigned int argc; const char *arg; struct cache_features *cf = &ca->features; @@ -1736,11 +2176,26 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as, while (argc--) { arg = dm_shift_arg(as); - if (!strcasecmp(arg, "writeback")) - cf->write_through = false; + if (!strcasecmp(arg, "writeback")) { + cf->io_mode = CM_IO_WRITEBACK; + mode_ctr++; + } + + else if (!strcasecmp(arg, "writethrough")) { + cf->io_mode = CM_IO_WRITETHROUGH; + mode_ctr++; + } + + else if (!strcasecmp(arg, "passthrough")) { + cf->io_mode = CM_IO_PASSTHROUGH; + mode_ctr++; + } - else if (!strcasecmp(arg, "writethrough")) - cf->write_through = true; + else if (!strcasecmp(arg, "metadata2")) + cf->metadata_version = 2; + + else if (!strcasecmp(arg, "no_discard_passdown")) + cf->discard_passdown = false; else { *error = "Unrecognised cache feature requested"; @@ -1748,13 +2203,18 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as, } } + if (mode_ctr > 1) { + *error = "Duplicate cache io_mode features requested"; + return -EINVAL; + } + return 0; } static int parse_policy(struct cache_args *ca, struct dm_arg_set *as, char **error) { - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 1024, "Invalid number of policy arguments"}, }; @@ -1813,7 +2273,7 @@ static int parse_cache_args(struct cache_args *ca, int argc, char **argv, /*----------------------------------------------------------------*/ -static struct kmem_cache *migration_cache; +static struct kmem_cache *migration_cache = NULL; #define NOT_CORE_OPTION 1 @@ -1869,22 +2329,23 @@ static int set_config_values(struct cache *cache, int argc, const char **argv) static int create_cache_policy(struct cache *cache, struct cache_args *ca, char **error) { - cache->policy = dm_cache_policy_create(ca->policy_name, - cache->cache_size, - cache->origin_sectors, - cache->sectors_per_block); - if (!cache->policy) { + struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name, + cache->cache_size, + cache->origin_sectors, + cache->sectors_per_block); + if (IS_ERR(p)) { *error = "Error creating cache's policy"; - return -ENOMEM; + return PTR_ERR(p); } + cache->policy = p; + BUG_ON(!cache->policy); return 0; } /* - * We want the discard block size to be a power of two, at least the size - * of the cache block size, and have no more than 2^14 discard blocks - * across the origin. + * We want the discard block size to be at least the size of the cache + * block size and have no more than 2^14 discard blocks across the origin. */ #define MAX_DISCARD_BLOCKS (1 << 14) @@ -1899,9 +2360,7 @@ static bool too_many_discard_blocks(sector_t discard_block_size, static sector_t calculate_discard_block_size(sector_t cache_block_size, sector_t origin_size) { - sector_t discard_block_size; - - discard_block_size = roundup_pow_of_two(cache_block_size); + sector_t discard_block_size = cache_block_size; if (origin_size) while (too_many_discard_blocks(discard_block_size, origin_size)) @@ -1910,6 +2369,19 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size, return discard_block_size; } +static void set_cache_size(struct cache *cache, dm_cblock_t size) +{ + dm_block_t nr_blocks = from_cblock(size); + + if (nr_blocks > (1 << 20) && cache->cache_size != size) + DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n" + "All these mappings can consume a lot of kernel memory, and take some time to read/write.\n" + "Please consider increasing the cache block size to reduce the overall cache block count.", + (unsigned long long) nr_blocks); + + cache->cache_size = size; +} + #define DEFAULT_MIGRATION_THRESHOLD 2048 static int cache_create(struct cache_args *ca, struct cache **result) @@ -1928,18 +2400,22 @@ static int cache_create(struct cache_args *ca, struct cache **result) cache->ti = ca->ti; ti->private = cache; + ti->accounts_remapped_io = true; ti->num_flush_bios = 2; ti->flush_supported = true; ti->num_discard_bios = 1; ti->discards_supported = true; - ti->discard_zeroes_data_unsupported = true; - cache->features = ca->features; - ti->per_bio_data_size = get_per_bio_data_size(cache); + ti->per_io_data_size = sizeof(struct per_bio_data); - cache->callbacks.congested_fn = cache_is_congested; - dm_table_add_target_callbacks(ti->table, &cache->callbacks); + cache->features = ca->features; + if (writethrough_mode(cache)) { + /* Create bioset for writethrough bios issued to origin */ + r = bioset_init(&cache->bs, BIO_POOL_SIZE, 0, 0); + if (r) + goto bad; + } cache->metadata_dev = ca->metadata_dev; cache->origin_dev = ca->origin_dev; @@ -1947,8 +2423,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL; - /* FIXME: factor out this whole section */ - origin_blocks = cache->origin_sectors = ca->origin_sectors; + origin_blocks = cache->origin_sectors = ti->len; origin_blocks = block_div(origin_blocks, ca->block_size); cache->origin_blocks = to_oblock(origin_blocks); @@ -1963,10 +2438,10 @@ static int cache_create(struct cache_args *ca, struct cache **result) cache->sectors_per_block_shift = -1; cache_size = block_div(cache_size, ca->block_size); - cache->cache_size = to_cblock(cache_size); + set_cache_size(cache, to_cblock(cache_size)); } else { cache->sectors_per_block_shift = __ffs(ca->block_size); - cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift); + set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift)); } r = create_cache_policy(cache, ca, error); @@ -1984,26 +2459,47 @@ static int cache_create(struct cache_args *ca, struct cache **result) cmd = dm_cache_metadata_open(cache->metadata_dev->bdev, ca->block_size, may_format, - dm_cache_policy_get_hint_size(cache->policy)); + dm_cache_policy_get_hint_size(cache->policy), + ca->features.metadata_version); if (IS_ERR(cmd)) { *error = "Error creating metadata object"; r = PTR_ERR(cmd); goto bad; } cache->cmd = cmd; + set_cache_mode(cache, CM_WRITE); + if (get_cache_mode(cache) != CM_WRITE) { + *error = "Unable to get write access to metadata, please check/repair metadata."; + r = -EINVAL; + goto bad; + } + + if (passthrough_mode(cache)) { + bool all_clean; + + r = dm_cache_metadata_all_clean(cache->cmd, &all_clean); + if (r) { + *error = "dm_cache_metadata_all_clean() failed"; + goto bad; + } + + if (!all_clean) { + *error = "Cannot enter passthrough mode unless all blocks are clean"; + r = -EINVAL; + goto bad; + } + + policy_allow_migrations(cache->policy, false); + } spin_lock_init(&cache->lock); bio_list_init(&cache->deferred_bios); - bio_list_init(&cache->deferred_flush_bios); - bio_list_init(&cache->deferred_writethrough_bios); - INIT_LIST_HEAD(&cache->quiesced_migrations); - INIT_LIST_HEAD(&cache->completed_migrations); - INIT_LIST_HEAD(&cache->need_commit_migrations); - atomic_set(&cache->nr_migrations, 0); + atomic_set(&cache->nr_allocated_migrations, 0); + atomic_set(&cache->nr_io_migrations, 0); init_waitqueue_head(&cache->migration_wait); r = -ENOMEM; - cache->nr_dirty = 0; + atomic_set(&cache->nr_dirty, 0); cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); if (!cache->dirty_bitset) { *error = "could not allocate dirty bitset"; @@ -2014,7 +2510,8 @@ static int cache_create(struct cache_args *ca, struct cache **result) cache->discard_block_size = calculate_discard_block_size(cache->sectors_per_block, cache->origin_sectors); - cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks); + cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors, + cache->discard_block_size)); cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); if (!cache->discard_bitset) { *error = "could not allocate discard bitset"; @@ -2022,6 +2519,13 @@ static int cache_create(struct cache_args *ca, struct cache **result) } clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); + cache->invalid_bitset = alloc_bitset(from_cblock(cache->cache_size)); + if (!cache->invalid_bitset) { + *error = "could not allocate bitset for invalid blocks"; + goto bad; + } + clear_bitset(cache->invalid_bitset, from_cblock(cache->cache_size)); + cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); if (IS_ERR(cache->copier)) { *error = "could not create kcopyd client"; @@ -2029,39 +2533,31 @@ static int cache_create(struct cache_args *ca, struct cache **result) goto bad; } - cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); + cache->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0); if (!cache->wq) { *error = "could not create workqueue for metadata object"; goto bad; } - INIT_WORK(&cache->worker, do_worker); + INIT_WORK(&cache->deferred_bio_worker, process_deferred_bios); + INIT_WORK(&cache->migration_worker, check_migrations); INIT_DELAYED_WORK(&cache->waker, do_waker); - cache->last_commit_jiffies = jiffies; - cache->prison = dm_bio_prison_create(PRISON_CELLS); + cache->prison = dm_bio_prison_create_v2(cache->wq); if (!cache->prison) { *error = "could not create bio prison"; goto bad; } - cache->all_io_ds = dm_deferred_set_create(); - if (!cache->all_io_ds) { - *error = "could not create all_io deferred set"; - goto bad; - } - - cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE, - migration_cache); - if (!cache->migration_pool) { + r = mempool_init_slab_pool(&cache->migration_pool, MIGRATION_POOL_SIZE, + migration_cache); + if (r) { *error = "Error creating cache's migration mempool"; goto bad; } - cache->next_migration = NULL; - cache->need_tick_bio = true; cache->sized = false; - cache->quiescing = false; + cache->invalidate = false; cache->commit_requested = false; cache->loaded_mappings = false; cache->loaded_discards = false; @@ -2075,17 +2571,26 @@ static int cache_create(struct cache_args *ca, struct cache **result) atomic_set(&cache->stats.commit_count, 0); atomic_set(&cache->stats.discard_count, 0); + spin_lock_init(&cache->invalidation_lock); + INIT_LIST_HEAD(&cache->invalidation_requests); + + batcher_init(&cache->committer, commit_op, cache, + issue_op, cache, cache->wq); + dm_iot_init(&cache->tracker); + + init_rwsem(&cache->background_work_lock); + prevent_background_work(cache); + *result = cache; return 0; - bad: - destroy(cache); + __destroy(cache); return r; } static int copy_ctr_args(struct cache *cache, int argc, const char **argv) { - unsigned i; + unsigned int i; const char **copy; copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL); @@ -2107,7 +2612,7 @@ static int copy_ctr_args(struct cache *cache, int argc, const char **argv) return 0; } -static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv) +static int cache_ctr(struct dm_target *ti, unsigned int argc, char **argv) { int r = -EINVAL; struct cache_args *ca; @@ -2130,198 +2635,125 @@ static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv) r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); if (r) { - destroy(cache); + __destroy(cache); goto out; } ti->private = cache; - out: destroy_cache_args(ca); return r; } +/*----------------------------------------------------------------*/ + static int cache_map(struct dm_target *ti, struct bio *bio) { struct cache *cache = ti->private; int r; + bool commit_needed; dm_oblock_t block = get_bio_block(cache, bio); - size_t pb_data_size = get_per_bio_data_size(cache); - bool can_migrate = false; - bool discarded_block; - struct dm_bio_prison_cell *cell; - struct policy_result lookup_result; - struct per_bio_data *pb; - if (from_oblock(block) > from_oblock(cache->origin_blocks)) { + init_per_bio_data(bio); + if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { /* * This can only occur if the io goes to a partial block at * the end of the origin device. We don't cache these. * Just remap to the origin and carry on. */ - remap_to_origin_clear_discard(cache, bio, block); + remap_to_origin(cache, bio); + accounted_begin(cache, bio); return DM_MAPIO_REMAPPED; } - pb = init_per_bio_data(bio, pb_data_size); - - if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { + if (discard_or_flush(bio)) { defer_bio(cache, bio); return DM_MAPIO_SUBMITTED; } - /* - * Check to see if that block is currently migrating. - */ - cell = alloc_prison_cell(cache); - if (!cell) { - defer_bio(cache, bio); - return DM_MAPIO_SUBMITTED; - } - - r = bio_detain(cache, block, bio, cell, - (cell_free_fn) free_prison_cell, - cache, &cell); - if (r) { - if (r < 0) - defer_bio(cache, bio); - - return DM_MAPIO_SUBMITTED; - } - - discarded_block = is_discarded_oblock(cache, block); - - r = policy_map(cache->policy, block, false, can_migrate, discarded_block, - bio, &lookup_result); - if (r == -EWOULDBLOCK) { - cell_defer(cache, cell, true); - return DM_MAPIO_SUBMITTED; + r = map_bio(cache, bio, block, &commit_needed); + if (commit_needed) + schedule_commit(&cache->committer); - } else if (r) { - DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r); - bio_io_error(bio); - return DM_MAPIO_SUBMITTED; - } - - switch (lookup_result.op) { - case POLICY_HIT: - inc_hit_counter(cache, bio); - pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); - - if (is_writethrough_io(cache, bio, lookup_result.cblock)) - remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); - else - remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); - - cell_defer(cache, cell, false); - break; - - case POLICY_MISS: - inc_miss_counter(cache, bio); - pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); - - if (pb->req_nr != 0) { - /* - * This is a duplicate writethrough io that is no - * longer needed because the block has been demoted. - */ - bio_endio(bio, 0); - cell_defer(cache, cell, false); - return DM_MAPIO_SUBMITTED; - } else { - remap_to_origin_clear_discard(cache, bio, block); - cell_defer(cache, cell, false); - } - break; - - default: - DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__, - (unsigned) lookup_result.op); - bio_io_error(bio); - return DM_MAPIO_SUBMITTED; - } - - return DM_MAPIO_REMAPPED; + return r; } -static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) +static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error) { struct cache *cache = ti->private; unsigned long flags; - size_t pb_data_size = get_per_bio_data_size(cache); - struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); + struct per_bio_data *pb = get_per_bio_data(bio); if (pb->tick) { - policy_tick(cache->policy); + policy_tick(cache->policy, false); spin_lock_irqsave(&cache->lock, flags); cache->need_tick_bio = true; spin_unlock_irqrestore(&cache->lock, flags); } - check_for_quiesced_migrations(cache, pb); + bio_drop_shared_lock(cache, bio); + accounted_complete(cache, bio); - return 0; + return DM_ENDIO_DONE; } static int write_dirty_bitset(struct cache *cache) { - unsigned i, r; + int r; - for (i = 0; i < from_cblock(cache->cache_size); i++) { - r = dm_cache_set_dirty(cache->cmd, to_cblock(i), - is_dirty(cache, to_cblock(i))); - if (r) - return r; - } + if (get_cache_mode(cache) >= CM_READ_ONLY) + return -EINVAL; - return 0; + r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cache->cache_size), cache->dirty_bitset); + if (r) + metadata_operation_failed(cache, "dm_cache_set_dirty_bits", r); + + return r; } static int write_discard_bitset(struct cache *cache) { - unsigned i, r; + unsigned int i, r; + + if (get_cache_mode(cache) >= CM_READ_ONLY) + return -EINVAL; r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, cache->discard_nr_blocks); if (r) { - DMERR("could not resize on-disk discard bitset"); + DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache)); + metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r); return r; } for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { r = dm_cache_set_discard(cache->cmd, to_dblock(i), is_discarded(cache, to_dblock(i))); - if (r) + if (r) { + metadata_operation_failed(cache, "dm_cache_set_discard", r); return r; + } } return 0; } -static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, - uint32_t hint) -{ - struct cache *cache = context; - return dm_cache_save_hint(cache->cmd, cblock, hint); -} - static int write_hints(struct cache *cache) { int r; - r = dm_cache_begin_hints(cache->cmd, cache->policy); + if (get_cache_mode(cache) >= CM_READ_ONLY) + return -EINVAL; + + r = dm_cache_write_hints(cache->cmd, cache->policy); if (r) { - DMERR("dm_cache_begin_hints failed"); + metadata_operation_failed(cache, "dm_cache_write_hints", r); return r; } - r = policy_walk_mappings(cache->policy, save_hint, cache); - if (r) - DMERR("policy_walk_mappings failed"); - - return r; + return 0; } /* @@ -2333,26 +2765,26 @@ static bool sync_metadata(struct cache *cache) r1 = write_dirty_bitset(cache); if (r1) - DMERR("could not write dirty bitset"); + DMERR("%s: could not write dirty bitset", cache_device_name(cache)); r2 = write_discard_bitset(cache); if (r2) - DMERR("could not write discard bitset"); + DMERR("%s: could not write discard bitset", cache_device_name(cache)); save_stats(cache); r3 = write_hints(cache); if (r3) - DMERR("could not write hints"); + DMERR("%s: could not write hints", cache_device_name(cache)); /* * If writing the above metadata failed, we still commit, but don't * set the clean shutdown flag. This will effectively force every * dirty bit to be set on reload. */ - r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3); + r4 = commit(cache, !r1 && !r2 && !r3); if (r4) - DMERR("could not write cache metadata. Data loss may occur."); + DMERR("%s: could not write cache metadata", cache_device_name(cache)); return !r1 && !r2 && !r3 && !r4; } @@ -2361,44 +2793,223 @@ static void cache_postsuspend(struct dm_target *ti) { struct cache *cache = ti->private; - start_quiescing(cache); - wait_for_migrations(cache); - stop_worker(cache); - requeue_deferred_io(cache); - stop_quiescing(cache); + prevent_background_work(cache); + BUG_ON(atomic_read(&cache->nr_io_migrations)); + + cancel_delayed_work_sync(&cache->waker); + drain_workqueue(cache->wq); + WARN_ON(cache->tracker.in_flight); - (void) sync_metadata(cache); + /* + * If it's a flush suspend there won't be any deferred bios, so this + * call is harmless. + */ + requeue_deferred_bios(cache); + + if (get_cache_mode(cache) == CM_WRITE) + (void) sync_metadata(cache); } static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock, bool dirty, uint32_t hint, bool hint_valid) { - int r; struct cache *cache = context; - r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid); - if (r) - return r; + if (dirty) { + set_bit(from_cblock(cblock), cache->dirty_bitset); + atomic_inc(&cache->nr_dirty); + } else + clear_bit(from_cblock(cblock), cache->dirty_bitset); - if (dirty) - set_dirty(cache, oblock, cblock); - else - clear_dirty(cache, oblock, cblock); + return policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid); +} - return 0; +static int load_filtered_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock, + bool dirty, uint32_t hint, bool hint_valid) +{ + struct cache *cache = context; + + if (from_oblock(oblock) >= from_oblock(cache->origin_blocks)) { + if (dirty) { + DMERR("%s: unable to shrink origin; cache block %u is dirty", + cache_device_name(cache), from_cblock(cblock)); + return -EFBIG; + } + set_bit(from_cblock(cblock), cache->invalid_bitset); + return 0; + } + + return load_mapping(context, oblock, cblock, dirty, hint, hint_valid); +} + +/* + * The discard block size in the on disk metadata is not + * necessarily the same as we're currently using. So we have to + * be careful to only set the discarded attribute if we know it + * covers a complete block of the new size. + */ +struct discard_load_info { + struct cache *cache; + + /* + * These blocks are sized using the on disk dblock size, rather + * than the current one. + */ + dm_block_t block_size; + dm_block_t discard_begin, discard_end; +}; + +static void discard_load_info_init(struct cache *cache, + struct discard_load_info *li) +{ + li->cache = cache; + li->discard_begin = li->discard_end = 0; +} + +static void set_discard_range(struct discard_load_info *li) +{ + sector_t b, e; + + if (li->discard_begin == li->discard_end) + return; + + /* + * Convert to sectors. + */ + b = li->discard_begin * li->block_size; + e = li->discard_end * li->block_size; + + /* + * Then convert back to the current dblock size. + */ + b = dm_sector_div_up(b, li->cache->discard_block_size); + sector_div(e, li->cache->discard_block_size); + + /* + * The origin may have shrunk, so we need to check we're still in + * bounds. + */ + if (e > from_dblock(li->cache->discard_nr_blocks)) + e = from_dblock(li->cache->discard_nr_blocks); + + for (; b < e; b++) + set_discard(li->cache, to_dblock(b)); } static int load_discard(void *context, sector_t discard_block_size, dm_dblock_t dblock, bool discard) { - struct cache *cache = context; + struct discard_load_info *li = context; - /* FIXME: handle mis-matched block size */ + li->block_size = discard_block_size; - if (discard) - set_discard(cache, dblock); - else - clear_discard(cache, dblock); + if (discard) { + if (from_dblock(dblock) == li->discard_end) + /* + * We're already in a discard range, just extend it. + */ + li->discard_end = li->discard_end + 1ULL; + + else { + /* + * Emit the old range and start a new one. + */ + set_discard_range(li); + li->discard_begin = from_dblock(dblock); + li->discard_end = li->discard_begin + 1ULL; + } + } else { + set_discard_range(li); + li->discard_begin = li->discard_end = 0; + } + + return 0; +} + +static dm_cblock_t get_cache_dev_size(struct cache *cache) +{ + sector_t size = get_dev_size(cache->cache_dev); + (void) sector_div(size, cache->sectors_per_block); + return to_cblock(size); +} + +static bool can_resume(struct cache *cache) +{ + /* + * Disallow retrying the resume operation for devices that failed the + * first resume attempt, as the failure leaves the policy object partially + * initialized. Retrying could trigger BUG_ON when loading cache mappings + * into the incomplete policy object. + */ + if (cache->sized && !cache->loaded_mappings) { + if (get_cache_mode(cache) != CM_WRITE) + DMERR("%s: unable to resume a failed-loaded cache, please check metadata.", + cache_device_name(cache)); + else + DMERR("%s: unable to resume cache due to missing proper cache table reload", + cache_device_name(cache)); + return false; + } + + return true; +} + +static bool can_resize(struct cache *cache, dm_cblock_t new_size) +{ + if (from_cblock(new_size) > from_cblock(cache->cache_size)) { + DMERR("%s: unable to extend cache due to missing cache table reload", + cache_device_name(cache)); + return false; + } + + /* + * We can't drop a dirty block when shrinking the cache. + */ + if (cache->loaded_mappings) { + new_size = to_cblock(find_next_bit(cache->dirty_bitset, + from_cblock(cache->cache_size), + from_cblock(new_size))); + if (new_size != cache->cache_size) { + DMERR("%s: unable to shrink cache; cache block %llu is dirty", + cache_device_name(cache), + (unsigned long long) from_cblock(new_size)); + return false; + } + } + + return true; +} + +static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size) +{ + int r; + + r = dm_cache_resize(cache->cmd, new_size); + if (r) { + DMERR("%s: could not resize cache metadata", cache_device_name(cache)); + metadata_operation_failed(cache, "dm_cache_resize", r); + return r; + } + + set_cache_size(cache, new_size); + + return 0; +} + +static int truncate_oblocks(struct cache *cache) +{ + uint32_t nr_blocks = from_cblock(cache->cache_size); + uint32_t i; + int r; + + for_each_set_bit(i, cache->invalid_bitset, nr_blocks) { + r = dm_cache_remove_mapping(cache->cmd, to_cblock(i)); + if (r) { + DMERR_LIMIT("%s: invalidation failed; couldn't update on disk metadata", + cache_device_name(cache)); + return r; + } + } return 0; } @@ -2407,29 +3018,45 @@ static int cache_preresume(struct dm_target *ti) { int r = 0; struct cache *cache = ti->private; - sector_t actual_cache_size = get_dev_size(cache->cache_dev); - (void) sector_div(actual_cache_size, cache->sectors_per_block); + dm_cblock_t csize = get_cache_dev_size(cache); + + if (!can_resume(cache)) + return -EINVAL; /* * Check to see if the cache has resized. */ - if (from_cblock(cache->cache_size) != actual_cache_size || !cache->sized) { - cache->cache_size = to_cblock(actual_cache_size); + if (!cache->sized || csize != cache->cache_size) { + if (!can_resize(cache, csize)) + return -EINVAL; - r = dm_cache_resize(cache->cmd, cache->cache_size); - if (r) { - DMERR("could not resize cache metadata"); + r = resize_cache_dev(cache, csize); + if (r) return r; - } cache->sized = true; } if (!cache->loaded_mappings) { + /* + * The fast device could have been resized since the last + * failed preresume attempt. To be safe we start by a blank + * bitset for cache blocks. + */ + clear_bitset(cache->invalid_bitset, from_cblock(cache->cache_size)); + r = dm_cache_load_mappings(cache->cmd, cache->policy, - load_mapping, cache); + load_filtered_mapping, cache); + if (r) { + DMERR("%s: could not load cache mappings", cache_device_name(cache)); + if (r != -EFBIG) + metadata_operation_failed(cache, "dm_cache_load_mappings", r); + return r; + } + + r = truncate_oblocks(cache); if (r) { - DMERR("could not load cache mappings"); + metadata_operation_failed(cache, "dm_cache_remove_mapping", r); return r; } @@ -2437,11 +3064,23 @@ static int cache_preresume(struct dm_target *ti) } if (!cache->loaded_discards) { - r = dm_cache_load_discards(cache->cmd, load_discard, cache); + struct discard_load_info li; + + /* + * The discard bitset could have been resized, or the + * discard block size changed. To be safe we start by + * setting every dblock to not discarded. + */ + clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); + + discard_load_info_init(cache, &li); + r = dm_cache_load_discards(cache->cmd, load_discard, &li); if (r) { - DMERR("could not load origin discards"); + DMERR("%s: could not load origin discards", cache_device_name(cache)); + metadata_operation_failed(cache, "dm_cache_load_discards", r); return r; } + set_discard_range(&li); cache->loaded_discards = true; } @@ -2454,79 +3093,133 @@ static void cache_resume(struct dm_target *ti) struct cache *cache = ti->private; cache->need_tick_bio = true; + allow_background_work(cache); do_waker(&cache->waker.work); } +static void emit_flags(struct cache *cache, char *result, + unsigned int maxlen, ssize_t *sz_ptr) +{ + ssize_t sz = *sz_ptr; + struct cache_features *cf = &cache->features; + unsigned int count = (cf->metadata_version == 2) + !cf->discard_passdown + 1; + + DMEMIT("%u ", count); + + if (cf->metadata_version == 2) + DMEMIT("metadata2 "); + + if (writethrough_mode(cache)) + DMEMIT("writethrough "); + + else if (passthrough_mode(cache)) + DMEMIT("passthrough "); + + else if (writeback_mode(cache)) + DMEMIT("writeback "); + + else { + DMEMIT("unknown "); + DMERR("%s: internal error: unknown io mode: %d", + cache_device_name(cache), (int) cf->io_mode); + } + + if (!cf->discard_passdown) + DMEMIT("no_discard_passdown "); + + *sz_ptr = sz; +} + /* * Status format: * - * <#used metadata blocks>/<#total metadata blocks> + * <metadata block size> <#used metadata blocks>/<#total metadata blocks> + * <cache block size> <#used cache blocks>/<#total cache blocks> * <#read hits> <#read misses> <#write hits> <#write misses> - * <#demotions> <#promotions> <#blocks in cache> <#dirty> + * <#demotions> <#promotions> <#dirty> * <#features> <features>* * <#core args> <core args> - * <#policy args> <policy args>* + * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check> */ static void cache_status(struct dm_target *ti, status_type_t type, - unsigned status_flags, char *result, unsigned maxlen) + unsigned int status_flags, char *result, unsigned int maxlen) { int r = 0; - unsigned i; + unsigned int i; ssize_t sz = 0; dm_block_t nr_free_blocks_metadata = 0; dm_block_t nr_blocks_metadata = 0; char buf[BDEVNAME_SIZE]; struct cache *cache = ti->private; dm_cblock_t residency; + bool needs_check; switch (type) { case STATUSTYPE_INFO: - /* Commit to ensure statistics aren't out-of-date */ - if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) { - r = dm_cache_commit(cache->cmd, false); - if (r) - DMERR("could not commit metadata for accurate status"); + if (get_cache_mode(cache) == CM_FAIL) { + DMEMIT("Fail"); + break; } - r = dm_cache_get_free_metadata_block_count(cache->cmd, - &nr_free_blocks_metadata); + /* Commit to ensure statistics aren't out-of-date */ + if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) + (void) commit(cache, false); + + r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata); if (r) { - DMERR("could not get metadata free block count"); + DMERR("%s: dm_cache_get_free_metadata_block_count returned %d", + cache_device_name(cache), r); goto err; } r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata); if (r) { - DMERR("could not get metadata device size"); + DMERR("%s: dm_cache_get_metadata_dev_size returned %d", + cache_device_name(cache), r); goto err; } residency = policy_residency(cache->policy); - DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ", + DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ", + (unsigned int)DM_CACHE_METADATA_BLOCK_SIZE, (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), (unsigned long long)nr_blocks_metadata, - (unsigned) atomic_read(&cache->stats.read_hit), - (unsigned) atomic_read(&cache->stats.read_miss), - (unsigned) atomic_read(&cache->stats.write_hit), - (unsigned) atomic_read(&cache->stats.write_miss), - (unsigned) atomic_read(&cache->stats.demotion), - (unsigned) atomic_read(&cache->stats.promotion), + (unsigned long long)cache->sectors_per_block, (unsigned long long) from_cblock(residency), - cache->nr_dirty); + (unsigned long long) from_cblock(cache->cache_size), + (unsigned int) atomic_read(&cache->stats.read_hit), + (unsigned int) atomic_read(&cache->stats.read_miss), + (unsigned int) atomic_read(&cache->stats.write_hit), + (unsigned int) atomic_read(&cache->stats.write_miss), + (unsigned int) atomic_read(&cache->stats.demotion), + (unsigned int) atomic_read(&cache->stats.promotion), + (unsigned long) atomic_read(&cache->nr_dirty)); - if (cache->features.write_through) - DMEMIT("1 writethrough "); - else - DMEMIT("0 "); + emit_flags(cache, result, maxlen, &sz); DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); + + DMEMIT("%s ", dm_cache_policy_get_name(cache->policy)); if (sz < maxlen) { - r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz); + r = policy_emit_config_values(cache->policy, result, maxlen, &sz); if (r) - DMERR("policy_emit_config_values returned %d", r); + DMERR("%s: policy_emit_config_values returned %d", + cache_device_name(cache), r); } + if (get_cache_mode(cache) == CM_READ_ONLY) + DMEMIT("ro "); + else + DMEMIT("rw "); + + r = dm_cache_metadata_needs_check(cache->cmd, &needs_check); + + if (r || needs_check) + DMEMIT("needs_check "); + else + DMEMIT("- "); + break; case STATUSTYPE_TABLE: @@ -2541,6 +3234,30 @@ static void cache_status(struct dm_target *ti, status_type_t type, DMEMIT(" %s", cache->ctr_args[i]); if (cache->nr_ctr_args) DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]); + break; + + case STATUSTYPE_IMA: + DMEMIT_TARGET_NAME_VERSION(ti->type); + if (get_cache_mode(cache) == CM_FAIL) + DMEMIT(",metadata_mode=fail"); + else if (get_cache_mode(cache) == CM_READ_ONLY) + DMEMIT(",metadata_mode=ro"); + else + DMEMIT(",metadata_mode=rw"); + + format_dev_t(buf, cache->metadata_dev->bdev->bd_dev); + DMEMIT(",cache_metadata_device=%s", buf); + format_dev_t(buf, cache->cache_dev->bdev->bd_dev); + DMEMIT(",cache_device=%s", buf); + format_dev_t(buf, cache->origin_dev->bdev->bd_dev); + DMEMIT(",cache_origin_device=%s", buf); + DMEMIT(",writethrough=%c", writethrough_mode(cache) ? 'y' : 'n'); + DMEMIT(",writeback=%c", writeback_mode(cache) ? 'y' : 'n'); + DMEMIT(",passthrough=%c", passthrough_mode(cache) ? 'y' : 'n'); + DMEMIT(",metadata2=%c", cache->features.metadata_version == 2 ? 'y' : 'n'); + DMEMIT(",no_discard_passdown=%c", cache->features.discard_passdown ? 'n' : 'y'); + DMEMIT(";"); + break; } return; @@ -2550,14 +3267,165 @@ err: } /* - * Supports <key> <value>. + * Defines a range of cblocks, begin to (end - 1) are in the range. end is + * the one-past-the-end value. + */ +struct cblock_range { + dm_cblock_t begin; + dm_cblock_t end; +}; + +/* + * A cache block range can take two forms: + * + * i) A single cblock, eg. '3456' + * ii) A begin and end cblock with a dash between, eg. 123-234 + */ +static int parse_cblock_range(struct cache *cache, const char *str, + struct cblock_range *result) +{ + char dummy; + uint64_t b, e; + int r; + + /* + * Try and parse form (ii) first. + */ + r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy); + + if (r == 2) { + result->begin = to_cblock(b); + result->end = to_cblock(e); + return 0; + } + + /* + * That didn't work, try form (i). + */ + r = sscanf(str, "%llu%c", &b, &dummy); + + if (r == 1) { + result->begin = to_cblock(b); + result->end = to_cblock(from_cblock(result->begin) + 1u); + return 0; + } + + DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str); + return -EINVAL; +} + +static int validate_cblock_range(struct cache *cache, struct cblock_range *range) +{ + uint64_t b = from_cblock(range->begin); + uint64_t e = from_cblock(range->end); + uint64_t n = from_cblock(cache->cache_size); + + if (b >= n) { + DMERR("%s: begin cblock out of range: %llu >= %llu", + cache_device_name(cache), b, n); + return -EINVAL; + } + + if (e > n) { + DMERR("%s: end cblock out of range: %llu > %llu", + cache_device_name(cache), e, n); + return -EINVAL; + } + + if (b >= e) { + DMERR("%s: invalid cblock range: %llu >= %llu", + cache_device_name(cache), b, e); + return -EINVAL; + } + + return 0; +} + +static inline dm_cblock_t cblock_succ(dm_cblock_t b) +{ + return to_cblock(from_cblock(b) + 1); +} + +static int request_invalidation(struct cache *cache, struct cblock_range *range) +{ + int r = 0; + + /* + * We don't need to do any locking here because we know we're in + * passthrough mode. There's is potential for a race between an + * invalidation triggered by an io and an invalidation message. This + * is harmless, we must not worry if the policy call fails. + */ + while (range->begin != range->end) { + r = invalidate_cblock(cache, range->begin); + if (r) + return r; + + range->begin = cblock_succ(range->begin); + } + + cache->commit_requested = true; + return r; +} + +static int process_invalidate_cblocks_message(struct cache *cache, unsigned int count, + const char **cblock_ranges) +{ + int r = 0; + unsigned int i; + struct cblock_range range; + + if (!passthrough_mode(cache)) { + DMERR("%s: cache has to be in passthrough mode for invalidation", + cache_device_name(cache)); + return -EPERM; + } + + for (i = 0; i < count; i++) { + r = parse_cblock_range(cache, cblock_ranges[i], &range); + if (r) + break; + + r = validate_cblock_range(cache, &range); + if (r) + break; + + /* + * Pass begin and end origin blocks to the worker and wake it. + */ + r = request_invalidation(cache, &range); + if (r) + break; + } + + return r; +} + +/* + * Supports + * "<key> <value>" + * and + * "invalidate_cblocks [(<begin>)|(<begin>-<end>)]* * * The key migration_threshold is supported by the cache target core. */ -static int cache_message(struct dm_target *ti, unsigned argc, char **argv) +static int cache_message(struct dm_target *ti, unsigned int argc, char **argv, + char *result, unsigned int maxlen) { struct cache *cache = ti->private; + if (!argc) + return -EINVAL; + + if (get_cache_mode(cache) >= CM_READ_ONLY) { + DMERR("%s: unable to service cache target messages in READ_ONLY or FAIL mode", + cache_device_name(cache)); + return -EOPNOTSUPP; + } + + if (!strcasecmp(argv[0], "invalidate_cblocks")) + return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1); + if (argc != 2) return -EINVAL; @@ -2578,40 +3446,69 @@ static int cache_iterate_devices(struct dm_target *ti, } /* - * We assume I/O is going to the origin (which is the volume - * more likely to have restrictions e.g. by being striped). - * (Looking up the exact location of the data would be expensive - * and could always be out of date by the time the bio is submitted.) + * If discard_passdown was enabled verify that the origin device + * supports discards. Disable discard_passdown if not. */ -static int cache_bvec_merge(struct dm_target *ti, - struct bvec_merge_data *bvm, - struct bio_vec *biovec, int max_size) +static void disable_passdown_if_not_supported(struct cache *cache) { - struct cache *cache = ti->private; - struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev); + struct block_device *origin_bdev = cache->origin_dev->bdev; + struct queue_limits *origin_limits = bdev_limits(origin_bdev); + const char *reason = NULL; + + if (!cache->features.discard_passdown) + return; - if (!q->merge_bvec_fn) - return max_size; + if (!bdev_max_discard_sectors(origin_bdev)) + reason = "discard unsupported"; - bvm->bi_bdev = cache->origin_dev->bdev; - return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); + else if (origin_limits->max_discard_sectors < cache->sectors_per_block) + reason = "max discard sectors smaller than a block"; + + if (reason) { + DMWARN("Origin device (%pg) %s: Disabling discard passdown.", + origin_bdev, reason); + cache->features.discard_passdown = false; + } } static void set_discard_limits(struct cache *cache, struct queue_limits *limits) { + struct block_device *origin_bdev = cache->origin_dev->bdev; + struct queue_limits *origin_limits = bdev_limits(origin_bdev); + + if (!cache->features.discard_passdown) { + /* No passdown is done so setting own virtual limits */ + limits->max_hw_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024, + cache->origin_sectors); + limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; + return; + } + /* - * FIXME: these limits may be incompatible with the cache device + * cache_iterate_devices() is stacking both origin and fast device limits + * but discards aren't passed to fast device, so inherit origin's limits. */ - limits->max_discard_sectors = cache->discard_block_size * 1024; - limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; + limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors; + limits->discard_granularity = origin_limits->discard_granularity; + limits->discard_alignment = origin_limits->discard_alignment; } static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct cache *cache = ti->private; + uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; - blk_limits_io_min(limits, 0); - blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); + /* + * If the system-determined stacked limits are compatible with the + * cache's blocksize (io_opt is a factor) do not override them. + */ + if (io_opt_sectors < cache->sectors_per_block || + do_div(io_opt_sectors, cache->sectors_per_block)) { + limits->io_min = cache->sectors_per_block << SECTOR_SHIFT; + limits->io_opt = cache->sectors_per_block << SECTOR_SHIFT; + } + + disable_passdown_if_not_supported(cache); set_discard_limits(cache, limits); } @@ -2619,7 +3516,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type cache_target = { .name = "cache", - .version = {1, 1, 1}, + .version = {2, 3, 0}, .module = THIS_MODULE, .ctr = cache_ctr, .dtr = cache_dtr, @@ -2631,7 +3528,6 @@ static struct target_type cache_target = { .status = cache_status, .message = cache_message, .iterate_devices = cache_iterate_devices, - .merge = cache_bvec_merge, .io_hints = cache_io_hints, }; @@ -2639,25 +3535,37 @@ static int __init dm_cache_init(void) { int r; - r = dm_register_target(&cache_target); - if (r) { - DMERR("cache target registration failed: %d", r); - return r; - } - migration_cache = KMEM_CACHE(dm_cache_migration, 0); if (!migration_cache) { - dm_unregister_target(&cache_target); - return -ENOMEM; + r = -ENOMEM; + goto err; + } + + btracker_work_cache = kmem_cache_create("dm_cache_bt_work", + sizeof(struct bt_work), __alignof__(struct bt_work), 0, NULL); + if (!btracker_work_cache) { + r = -ENOMEM; + goto err; + } + + r = dm_register_target(&cache_target); + if (r) { + goto err; } return 0; + +err: + kmem_cache_destroy(migration_cache); + kmem_cache_destroy(btracker_work_cache); + return r; } static void __exit dm_cache_exit(void) { dm_unregister_target(&cache_target); kmem_cache_destroy(migration_cache); + kmem_cache_destroy(btracker_work_cache); } module_init(dm_cache_init); |
