summaryrefslogtreecommitdiff
path: root/drivers/md/dm-thin.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r--drivers/md/dm-thin.c114
1 files changed, 53 insertions, 61 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 2b13c949bd72..c84149ba4e38 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -401,8 +401,7 @@ static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t da
sector_t s = block_to_sectors(tc->pool, data_b);
sector_t len = block_to_sectors(tc->pool, data_e - data_b);
- return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOWAIT,
- &op->bio);
+ return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, &op->bio);
}
static void end_discard(struct discard_op *op, int r)
@@ -454,12 +453,13 @@ static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bi
cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
- if (r)
+ if (r) {
/*
* We reused an old cell; we can get rid of
* the new one.
*/
dm_bio_prison_free_cell(pool->prison, cell_prealloc);
+ }
return r;
}
@@ -592,12 +592,6 @@ struct dm_thin_endio_hook {
struct dm_bio_prison_cell *cell;
};
-static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
-{
- bio_list_merge(bios, master);
- bio_list_init(master);
-}
-
static void error_bio_list(struct bio_list *bios, blk_status_t error)
{
struct bio *bio;
@@ -616,7 +610,7 @@ static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
bio_list_init(&bios);
spin_lock_irq(&tc->lock);
- __merge_bio_list(&bios, master);
+ bio_list_merge_init(&bios, master);
spin_unlock_irq(&tc->lock);
error_bio_list(&bios, error);
@@ -645,8 +639,8 @@ static void requeue_io(struct thin_c *tc)
bio_list_init(&bios);
spin_lock_irq(&tc->lock);
- __merge_bio_list(&bios, &tc->deferred_bio_list);
- __merge_bio_list(&bios, &tc->retry_on_resume_list);
+ bio_list_merge_init(&bios, &tc->deferred_bio_list);
+ bio_list_merge_init(&bios, &tc->retry_on_resume_list);
spin_unlock_irq(&tc->lock);
error_bio_list(&bios, BLK_STS_DM_REQUEUE);
@@ -708,9 +702,10 @@ static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
(void) sector_div(e, pool->sectors_per_block);
}
- if (e < b)
+ if (e < b) {
/* Can happen if the bio is within a single block. */
e = b;
+ }
*begin = b;
*end = e;
@@ -722,13 +717,14 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
sector_t bi_sector = bio->bi_iter.bi_sector;
bio_set_dev(bio, tc->pool_dev->bdev);
- if (block_size_is_power_of_two(pool))
+ if (block_size_is_power_of_two(pool)) {
bio->bi_iter.bi_sector =
(block << pool->sectors_per_block_shift) |
(bi_sector & (pool->sectors_per_block - 1));
- else
+ } else {
bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
sector_div(bi_sector, pool->sectors_per_block);
+ }
}
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
@@ -1402,9 +1398,10 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
if (pool->pf.zero_new_blocks) {
if (io_overwrites_block(pool, bio))
remap_and_issue_overwrite(tc, bio, data_block, m);
- else
+ else {
ll_zero(tc, m, data_block * pool->sectors_per_block,
(data_block + 1) * pool->sectors_per_block);
+ }
} else
process_prepared_mapping(m);
}
@@ -1417,17 +1414,17 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
sector_t virt_block_begin = virt_block * pool->sectors_per_block;
sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
- if (virt_block_end <= tc->origin_size)
+ if (virt_block_end <= tc->origin_size) {
schedule_copy(tc, virt_block, tc->origin_dev,
virt_block, data_dest, cell, bio,
pool->sectors_per_block);
- else if (virt_block_begin < tc->origin_size)
+ } else if (virt_block_begin < tc->origin_size) {
schedule_copy(tc, virt_block, tc->origin_dev,
virt_block, data_dest, cell, bio,
tc->origin_size - virt_block_begin);
- else
+ } else
schedule_zero(tc, virt_block, data_dest, cell, bio);
}
@@ -2335,10 +2332,9 @@ static struct thin_c *get_first_thin(struct pool *pool)
struct thin_c *tc = NULL;
rcu_read_lock();
- if (!list_empty(&pool->active_thins)) {
- tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
+ tc = list_first_or_null_rcu(&pool->active_thins, struct thin_c, list);
+ if (tc)
thin_get(tc);
- }
rcu_read_unlock();
return tc;
@@ -2487,6 +2483,7 @@ static void pool_work_wait(struct pool_work *pw, struct pool *pool,
init_completion(&pw->complete);
queue_work(pool->wq, &pw->worker);
wait_for_completion(&pw->complete);
+ destroy_work_on_stack(&pw->worker);
}
/*----------------------------------------------------------------*/
@@ -2528,16 +2525,11 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
/*----------------------------------------------------------------*/
-static bool passdown_enabled(struct pool_c *pt)
-{
- return pt->adjusted_pf.discard_passdown;
-}
-
static void set_discard_callbacks(struct pool *pool)
{
struct pool_c *pt = pool->ti->private;
- if (passdown_enabled(pt)) {
+ if (pt->adjusted_pf.discard_passdown) {
pool->process_discard_cell = process_discard_cell_passdown;
pool->process_prepared_discard = process_prepared_discard_passdown_pt1;
pool->process_prepared_discard_pt2 = process_prepared_discard_passdown_pt2;
@@ -2846,11 +2838,11 @@ static bool is_factor(sector_t block_size, uint32_t n)
* If discard_passdown was enabled verify that the data device
* supports discards. Disable discard_passdown if not.
*/
-static void disable_passdown_if_not_supported(struct pool_c *pt)
+static void disable_discard_passdown_if_not_supported(struct pool_c *pt)
{
struct pool *pool = pt->pool;
struct block_device *data_bdev = pt->data_dev->bdev;
- struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
+ struct queue_limits *data_limits = bdev_limits(data_bdev);
const char *reason = NULL;
if (!pt->adjusted_pf.discard_passdown)
@@ -2956,7 +2948,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
if (IS_ERR(pmd)) {
*error = "Error creating metadata object";
- return (struct pool *)pmd;
+ return ERR_CAST(pmd);
}
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
@@ -3039,8 +3031,8 @@ static struct pool *pool_create(struct mapped_device *pool_md,
}
pool->cell_sort_array =
- vmalloc(array_size(CELL_SORT_ARRAY_SIZE,
- sizeof(*pool->cell_sort_array)));
+ vmalloc_array(CELL_SORT_ARRAY_SIZE,
+ sizeof(*pool->cell_sort_array));
if (!pool->cell_sort_array) {
*error = "Error allocating cell sort array";
err_p = ERR_PTR(-ENOMEM);
@@ -3301,7 +3293,7 @@ static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv)
unsigned long block_size;
dm_block_t low_water_blocks;
struct dm_dev *metadata_dev;
- fmode_t metadata_mode;
+ blk_mode_t metadata_mode;
/*
* FIXME Remove validation from scope of lock.
@@ -3334,7 +3326,8 @@ static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto out_unlock;
- metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
+ metadata_mode = BLK_OPEN_READ |
+ ((pf.mode == PM_READ_ONLY) ? 0 : BLK_OPEN_WRITE);
r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
if (r) {
ti->error = "Error opening metadata block device";
@@ -3342,7 +3335,7 @@ static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
warn_if_metadata_device_too_big(metadata_dev->bdev);
- r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
+ r = dm_get_device(ti, argv[1], BLK_OPEN_READ | BLK_OPEN_WRITE, &data_dev);
if (r) {
ti->error = "Error getting data device";
goto out_metadata;
@@ -3446,7 +3439,6 @@ out_unlock:
static int pool_map(struct dm_target *ti, struct bio *bio)
{
- int r;
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
@@ -3455,10 +3447,9 @@ static int pool_map(struct dm_target *ti, struct bio *bio)
*/
spin_lock_irq(&pool->lock);
bio_set_dev(bio, pt->data_dev->bdev);
- r = DM_MAPIO_REMAPPED;
spin_unlock_irq(&pool->lock);
- return r;
+ return DM_MAPIO_REMAPPED;
}
static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
@@ -4088,10 +4079,10 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
if (io_opt_sectors < pool->sectors_per_block ||
!is_factor(io_opt_sectors, pool->sectors_per_block)) {
if (is_factor(pool->sectors_per_block, limits->max_sectors))
- blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT);
+ limits->io_min = limits->max_sectors << SECTOR_SHIFT;
else
- blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT);
- blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
+ limits->io_min = pool->sectors_per_block << SECTOR_SHIFT;
+ limits->io_opt = pool->sectors_per_block << SECTOR_SHIFT;
}
/*
@@ -4099,28 +4090,29 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
* They get transferred to the live pool in bind_control_target()
* called from pool_preresume().
*/
- if (!pt->adjusted_pf.discard_enabled) {
+
+ if (pt->adjusted_pf.discard_enabled) {
+ disable_discard_passdown_if_not_supported(pt);
+ if (!pt->adjusted_pf.discard_passdown)
+ limits->max_hw_discard_sectors = 0;
+ /*
+ * The pool uses the same discard limits as the underlying data
+ * device. DM core has already set this up.
+ */
+ } else {
/*
* Must explicitly disallow stacking discard limits otherwise the
* block layer will stack them if pool's data device has support.
*/
limits->discard_granularity = 0;
- return;
}
-
- disable_passdown_if_not_supported(pt);
-
- /*
- * The pool uses the same discard limits as the underlying data
- * device. DM core has already set this up.
- */
}
static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
- DM_TARGET_IMMUTABLE,
- .version = {1, 23, 0},
+ DM_TARGET_IMMUTABLE | DM_TARGET_PASSES_CRYPTO,
+ .version = {1, 24, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -4223,7 +4215,7 @@ static int thin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_origin_dev;
}
- r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
+ r = dm_get_device(ti, argv[2], BLK_OPEN_READ, &origin_dev);
if (r) {
ti->error = "Error opening origin device";
goto bad_origin_dev;
@@ -4497,16 +4489,16 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
struct thin_c *tc = ti->private;
struct pool *pool = tc->pool;
- if (!pool->pf.discard_enabled)
- return;
-
- limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
- limits->max_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE;
+ if (pool->pf.discard_enabled) {
+ limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
+ limits->max_hw_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE;
+ }
}
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 23, 0},
+ .features = DM_TARGET_PASSES_CRYPTO,
+ .version = {1, 24, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
@@ -4567,5 +4559,5 @@ module_param_named(no_space_timeout, no_space_timeout_secs, uint, 0644);
MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
-MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");