summaryrefslogtreecommitdiff
path: root/drivers/md/dm-zoned-target.c
diff options
context:
space:
mode:
authorHannes Reinecke <hare@suse.de>2020-05-19 10:14:20 +0200
committerMike Snitzer <snitzer@redhat.com>2020-05-20 17:09:49 -0400
commit34f5affd04c4a16d9df19c369bcec6e873e57ffe (patch)
tree73da1a362f1bfd5a3f4d3bee186e81e536a46a2b /drivers/md/dm-zoned-target.c
parent489dc0f06a5837f87482c0ce61d830d24e17082e (diff)
dm zoned: separate random and cache zones
Instead of lumping emulated zones together with random zones we should be handling them as separate 'cache' zones. This improves code readability and allows an easier implementation of different cache policies. Also add additional allocation flags, to separate the type (cache, random, or sequential) from the purpose (eg reclaim). Also switch the allocation policy to not use random zones as buffer zones if cache zones are present. This avoids a performance drop when all cache zones are used. Signed-off-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-zoned-target.c')
-rw-r--r--drivers/md/dm-zoned-target.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index b586fc67d931..2770e293a97b 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -190,7 +190,8 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
DMDEBUG("(%s): READ chunk %llu -> %s zone %u, block %llu, %u blocks",
dmz_metadata_label(zmd),
(unsigned long long)dmz_bio_chunk(zmd, bio),
- (dmz_is_rnd(zone) ? "RND" : "SEQ"),
+ (dmz_is_rnd(zone) ? "RND" :
+ (dmz_is_cache(zone) ? "CACHE" : "SEQ")),
zone->id,
(unsigned long long)chunk_block, nr_blocks);
@@ -198,7 +199,8 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
bzone = zone->bzone;
while (chunk_block < end_block) {
nr_blocks = 0;
- if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) {
+ if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
+ chunk_block < zone->wp_block) {
/* Test block validity in the data zone */
ret = dmz_block_valid(zmd, zone, chunk_block);
if (ret < 0)
@@ -331,11 +333,13 @@ static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
DMDEBUG("(%s): WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
dmz_metadata_label(zmd),
(unsigned long long)dmz_bio_chunk(zmd, bio),
- (dmz_is_rnd(zone) ? "RND" : "SEQ"),
+ (dmz_is_rnd(zone) ? "RND" :
+ (dmz_is_cache(zone) ? "CACHE" : "SEQ")),
zone->id,
(unsigned long long)chunk_block, nr_blocks);
- if (dmz_is_rnd(zone) || chunk_block == zone->wp_block) {
+ if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
+ chunk_block == zone->wp_block) {
/*
* zone is a random zone or it is a sequential zone
* and the BIO is aligned to the zone write pointer:
@@ -381,7 +385,8 @@ static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
* Invalidate blocks in the data zone and its
* buffer zone if one is mapped.
*/
- if (dmz_is_rnd(zone) || chunk_block < zone->wp_block)
+ if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
+ chunk_block < zone->wp_block)
ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
if (ret == 0 && zone->bzone)
ret = dmz_invalidate_blocks(zmd, zone->bzone,
@@ -1065,8 +1070,10 @@ static void dmz_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
- DMEMIT("%u zones %u/%u random %u/%u sequential",
+ DMEMIT("%u zones %u/%u cache %u/%u random %u/%u sequential",
dmz_nr_zones(dmz->metadata),
+ dmz_nr_unmap_cache_zones(dmz->metadata),
+ dmz_nr_cache_zones(dmz->metadata),
dmz_nr_unmap_rnd_zones(dmz->metadata),
dmz_nr_rnd_zones(dmz->metadata),
dmz_nr_unmap_seq_zones(dmz->metadata),