summaryrefslogtreecommitdiff
path: root/drivers/md/dm-cache-target.c
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2014-11-06 14:38:01 +0000
committerMike Snitzer <snitzer@redhat.com>2014-11-10 15:25:30 -0500
commit08b184514f65d160ce66381dafca5962e3d8f785 (patch)
treef33736579d5c7401524f4deb9af1d3c31cd48ad3 /drivers/md/dm-cache-target.c
parent1bad9bc4ee899a108499e5eac6baafff018b4d0b (diff)
dm cache: revert "prevent corruption caused by discard_block_size > cache_block_size"
This reverts commit d132cc6d9e92424bb9d4fd35f5bd0e55d583f4be because we actually do want to allow the discard blocksize to be larger than the cache blocksize. Further dm-cache discard changes will make this possible. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r--drivers/md/dm-cache-target.c37
1 files changed, 34 insertions, 3 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index ced7fd4adddb..c2ca74374944 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -238,7 +238,7 @@ struct cache {
*/
dm_dblock_t discard_nr_blocks;
unsigned long *discard_bitset;
- uint32_t discard_block_size;
+ uint32_t discard_block_size; /* a power of 2 times sectors per block */
/*
* Rather than reconstructing the table line for the status we just
@@ -2197,6 +2197,35 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
return 0;
}
+/*
+ * We want the discard block size to be a power of two, at least the size
+ * of the cache block size, and have no more than 2^14 discard blocks
+ * across the origin.
+ */
+#define MAX_DISCARD_BLOCKS (1 << 14)
+
+static bool too_many_discard_blocks(sector_t discard_block_size,
+ sector_t origin_size)
+{
+ (void) sector_div(origin_size, discard_block_size);
+
+ return origin_size > MAX_DISCARD_BLOCKS;
+}
+
+static sector_t calculate_discard_block_size(sector_t cache_block_size,
+ sector_t origin_size)
+{
+ sector_t discard_block_size;
+
+ discard_block_size = roundup_pow_of_two(cache_block_size);
+
+ if (origin_size)
+ while (too_many_discard_blocks(discard_block_size, origin_size))
+ discard_block_size *= 2;
+
+ return discard_block_size;
+}
+
#define DEFAULT_MIGRATION_THRESHOLD 2048
static int cache_create(struct cache_args *ca, struct cache **result)
@@ -2320,7 +2349,9 @@ static int cache_create(struct cache_args *ca, struct cache **result)
}
clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
- cache->discard_block_size = cache->sectors_per_block;
+ cache->discard_block_size =
+ calculate_discard_block_size(cache->sectors_per_block,
+ cache->origin_sectors);
cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
if (!cache->discard_bitset) {
@@ -3099,7 +3130,7 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
/*
* FIXME: these limits may be incompatible with the cache device
*/
- limits->max_discard_sectors = cache->discard_block_size;
+ limits->max_discard_sectors = cache->discard_block_size * 1024;
limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
}