summaryrefslogtreecommitdiff
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c739
1 files changed, 521 insertions, 218 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f0f118ab20fa..6c83ab940af7 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -11,6 +11,7 @@
#include "dm-uevent.h"
#include "dm-ima.h"
+#include <linux/bio-integrity.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -402,15 +403,16 @@ static void do_deferred_remove(struct work_struct *w)
dm_deferred_remove();
}
-static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int dm_blk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct mapped_device *md = bdev->bd_disk->private_data;
+ struct mapped_device *md = disk->private_data;
return dm_get_geometry(md, geo);
}
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
- struct block_device **bdev)
+ struct block_device **bdev, unsigned int cmd,
+ unsigned long arg, bool *forward)
{
struct dm_target *ti;
struct dm_table *map;
@@ -433,8 +435,8 @@ retry:
if (dm_suspended_md(md))
return -EAGAIN;
- r = ti->type->prepare_ioctl(ti, bdev);
- if (r == -ENOTCONN && !fatal_signal_pending(current)) {
+ r = ti->type->prepare_ioctl(ti, bdev, cmd, arg, forward);
+ if (r == -ENOTCONN && *forward && !fatal_signal_pending(current)) {
dm_put_live_table(md, *srcu_idx);
fsleep(10000);
goto retry;
@@ -453,9 +455,10 @@ static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
{
struct mapped_device *md = bdev->bd_disk->private_data;
int r, srcu_idx;
+ bool forward = true;
- r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
- if (r < 0)
+ r = dm_prepare_ioctl(md, &srcu_idx, &bdev, cmd, arg, &forward);
+ if (!forward || r < 0)
goto out;
if (r > 0) {
@@ -487,18 +490,13 @@ u64 dm_start_time_ns_from_clone(struct bio *bio)
}
EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
-static inline bool bio_is_flush_with_data(struct bio *bio)
-{
- return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size);
-}
-
static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio)
{
/*
* If REQ_PREFLUSH set, don't account payload, it will be
* submitted (and accounted) after this flush completes.
*/
- if (bio_is_flush_with_data(bio))
+ if (io->requeue_flush_with_data)
return 0;
if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
return io->sectors;
@@ -570,13 +568,15 @@ static void dm_end_io_acct(struct dm_io *io)
dm_io_acct(io, true);
}
-static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
+static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask)
{
struct dm_io *io;
struct dm_target_io *tio;
struct bio *clone;
- clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs);
+ clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs);
+ if (unlikely(!clone))
+ return NULL;
tio = clone_to_tio(clone);
tio->flags = 0;
dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO);
@@ -585,6 +585,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
io = container_of(tio, struct dm_io, tio);
io->magic = DM_IO_MAGIC;
io->status = BLK_STS_OK;
+ io->requeue_flush_with_data = false;
/* one ref is for submission, the other is for completion */
atomic_set(&io->io_count, 2);
@@ -643,7 +644,7 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
/* Set default bdev, but target must bio_set_dev() before issuing IO */
clone->bi_bdev = md->disk->part0;
- if (unlikely(ti->needs_bio_set_dev))
+ if (likely(ti != NULL) && unlikely(ti->needs_bio_set_dev))
bio_set_dev(clone, md->disk->part0);
if (len) {
@@ -715,24 +716,6 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
rcu_read_unlock();
}
-static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
- int *srcu_idx, blk_opf_t bio_opf)
-{
- if (bio_opf & REQ_NOWAIT)
- return dm_get_live_table_fast(md);
- else
- return dm_get_live_table(md, srcu_idx);
-}
-
-static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx,
- blk_opf_t bio_opf)
-{
- if (bio_opf & REQ_NOWAIT)
- dm_put_live_table_fast(md);
- else
- dm_put_live_table(md, srcu_idx);
-}
-
static char *_dm_claim_ptr = "I belong to device-mapper";
/*
@@ -742,6 +725,7 @@ static struct table_device *open_table_device(struct mapped_device *md,
dev_t dev, blk_mode_t mode)
{
struct table_device *td;
+ struct file *bdev_file;
struct block_device *bdev;
u64 part_off;
int r;
@@ -751,12 +735,14 @@ static struct table_device *open_table_device(struct mapped_device *md,
return ERR_PTR(-ENOMEM);
refcount_set(&td->count, 1);
- bdev = blkdev_get_by_dev(dev, mode, _dm_claim_ptr, NULL);
- if (IS_ERR(bdev)) {
- r = PTR_ERR(bdev);
+ bdev_file = bdev_file_open_by_dev(dev, mode, _dm_claim_ptr, NULL);
+ if (IS_ERR(bdev_file)) {
+ r = PTR_ERR(bdev_file);
goto out_free_td;
}
+ bdev = file_bdev(bdev_file);
+
/*
* We can be called before the dm disk is added. In that case we can't
* register the holder relation here. It will be done once add_disk was
@@ -770,13 +756,15 @@ static struct table_device *open_table_device(struct mapped_device *md,
td->dm_dev.mode = mode;
td->dm_dev.bdev = bdev;
- td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL);
+ td->dm_dev.bdev_file = bdev_file;
+ td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off,
+ NULL, NULL);
format_dev_t(td->dm_dev.name, dev);
list_add(&td->list, &md->table_devices);
return td;
out_blkdev_put:
- blkdev_put(bdev, _dm_claim_ptr);
+ __fput_sync(bdev_file);
out_free_td:
kfree(td);
return ERR_PTR(r);
@@ -789,7 +777,13 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
{
if (md->disk->slave_dir)
bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
- blkdev_put(td->dm_dev.bdev, _dm_claim_ptr);
+
+ /* Leverage async fput() if DMF_DEFERRED_REMOVE set */
+ if (unlikely(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
+ fput(td->dm_dev.bdev_file);
+ else
+ __fput_sync(td->dm_dev.bdev_file);
+
put_dax(td->dm_dev.dax_dev);
list_del(&td->list);
kfree(td);
@@ -950,6 +944,7 @@ static void __dm_io_complete(struct dm_io *io, bool first_stage)
struct mapped_device *md = io->md;
blk_status_t io_error;
bool requeued;
+ bool requeue_flush_with_data;
requeued = dm_handle_requeue(io, first_stage);
if (requeued && first_stage)
@@ -966,6 +961,7 @@ static void __dm_io_complete(struct dm_io *io, bool first_stage)
__dm_start_io_acct(io);
dm_end_io_acct(io);
}
+ requeue_flush_with_data = io->requeue_flush_with_data;
free_io(io);
smp_wmb();
this_cpu_dec(*md->pending_io);
@@ -978,7 +974,7 @@ static void __dm_io_complete(struct dm_io *io, bool first_stage)
if (requeued)
return;
- if (bio_is_flush_with_data(bio)) {
+ if (unlikely(requeue_flush_with_data)) {
/*
* Preflush done for flush with data, reissue
* without REQ_PREFLUSH.
@@ -1026,10 +1022,8 @@ static void dm_wq_requeue_work(struct work_struct *work)
*
* 2) io->orig_bio points to new cloned bio which matches the requeued dm_io.
*/
-static void dm_io_complete(struct dm_io *io)
+static inline void dm_io_complete(struct dm_io *io)
{
- bool first_requeue;
-
/*
* Only dm_io that has been split needs two stage requeue, otherwise
* we may run into long bio clone chain during suspend and OOM could
@@ -1038,12 +1032,7 @@ static void dm_io_complete(struct dm_io *io)
* Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they
* also aren't handled via the first stage requeue.
*/
- if (dm_io_flagged(io, DM_IO_WAS_SPLIT))
- first_requeue = true;
- else
- first_requeue = false;
-
- __dm_io_complete(io, first_requeue);
+ __dm_io_complete(io, dm_io_flagged(io, DM_IO_WAS_SPLIT));
}
/*
@@ -1086,22 +1075,6 @@ static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
return &md->queue->limits;
}
-void disable_discard(struct mapped_device *md)
-{
- struct queue_limits *limits = dm_get_queue_limits(md);
-
- /* device doesn't really support DISCARD, disable it */
- limits->max_discard_sectors = 0;
-}
-
-void disable_write_zeroes(struct mapped_device *md)
-{
- struct queue_limits *limits = dm_get_queue_limits(md);
-
- /* device doesn't really support WRITE ZEROES, disable it */
- limits->max_write_zeroes_sectors = 0;
-}
-
static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
{
return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
@@ -1112,17 +1085,17 @@ static void clone_endio(struct bio *bio)
blk_status_t error = bio->bi_status;
struct dm_target_io *tio = clone_to_tio(bio);
struct dm_target *ti = tio->ti;
- dm_endio_fn endio = ti->type->end_io;
+ dm_endio_fn endio = likely(ti != NULL) ? ti->type->end_io : NULL;
struct dm_io *io = tio->io;
struct mapped_device *md = io->md;
if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_DISCARD &&
!bdev_max_discard_sectors(bio->bi_bdev))
- disable_discard(md);
+ blk_queue_disable_discard(md->queue);
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!bdev_write_zeroes_sectors(bio->bi_bdev))
- disable_write_zeroes(md);
+ blk_queue_disable_write_zeroes(md->queue);
}
if (static_branch_unlikely(&zoned_enabled) &&
@@ -1159,7 +1132,7 @@ static void clone_endio(struct bio *bio)
}
if (static_branch_unlikely(&swap_bios_enabled) &&
- unlikely(swap_bios_limit(ti, bio)))
+ likely(ti != NULL) && unlikely(swap_bios_limit(ti, bio)))
up(&md->swap_bios_semaphore);
free_tio(bio);
@@ -1193,7 +1166,7 @@ static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
return len;
return min_t(sector_t, len,
min(max_sectors ? : queue_max_sectors(ti->table->md->queue),
- blk_chunk_sectors_left(target_offset, max_granularity)));
+ blk_boundary_sectors_left(target_offset, max_granularity)));
}
static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
@@ -1236,7 +1209,7 @@ static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
long nr_pages, enum dax_access_mode mode, void **kaddr,
- pfn_t *pfn)
+ unsigned long *pfn)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
@@ -1311,8 +1284,9 @@ out:
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
* allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
- * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by
- * __send_duplicate_bios().
+ * operations, zone append writes (native with REQ_OP_ZONE_APPEND or emulated
+ * with write BIOs flagged with BIO_EMULATES_ZONE_APPEND) and any bio serviced
+ * by __send_duplicate_bios().
*
* dm_accept_partial_bio informs the dm that the target only wants to process
* additional n_sectors sectors of the bio and the rest of the data should be
@@ -1345,11 +1319,19 @@ void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
unsigned int bio_sectors = bio_sectors(bio);
BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
- BUG_ON(op_is_zone_mgmt(bio_op(bio)));
- BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
BUG_ON(bio_sectors > *tio->len_ptr);
BUG_ON(n_sectors > bio_sectors);
+ if (static_branch_unlikely(&zoned_enabled) &&
+ unlikely(bdev_is_zoned(bio->bi_bdev))) {
+ enum req_op op = bio_op(bio);
+
+ BUG_ON(op_is_zone_mgmt(op));
+ BUG_ON(op == REQ_OP_WRITE);
+ BUG_ON(op == REQ_OP_WRITE_ZEROES);
+ BUG_ON(op == REQ_OP_ZONE_APPEND);
+ }
+
*tio->len_ptr -= bio_sectors - n_sectors;
bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
@@ -1433,17 +1415,11 @@ static void __map_bio(struct bio *clone)
down(&md->swap_bios_semaphore);
}
- if (static_branch_unlikely(&zoned_enabled)) {
- /*
- * Check if the IO needs a special mapping due to zone append
- * emulation on zoned target. In this case, dm_zone_map_bio()
- * calls the target map operation.
- */
- if (unlikely(dm_emulate_zone_append(md)))
- r = dm_zone_map_bio(tio);
- else
- r = ti->type->map(ti, clone);
- } else
+ if (likely(ti->type->map == linear_map))
+ r = linear_map(ti, clone);
+ else if (ti->type->map == stripe_map)
+ r = stripe_map(ti, clone);
+ else
r = ti->type->map(ti, clone);
switch (r) {
@@ -1497,7 +1473,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
for (try = 0; try < 2; try++) {
int bio_nr;
- if (try)
+ if (try && num_bios > 1)
mutex_lock(&ci->io->md->table_devices_lock);
for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
bio = alloc_tio(ci, ti, bio_nr, len,
@@ -1507,7 +1483,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
bio_list_add(blist, bio);
}
- if (try)
+ if (try && num_bios > 1)
mutex_unlock(&ci->io->md->table_devices_lock);
if (bio_nr == num_bios)
return;
@@ -1517,34 +1493,30 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
}
}
-static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
- unsigned int num_bios, unsigned int *len)
+static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
+ unsigned int num_bios, unsigned int *len)
{
struct bio_list blist = BIO_EMPTY_LIST;
struct bio *clone;
unsigned int ret = 0;
- switch (num_bios) {
- case 0:
- break;
- case 1:
- if (len)
- setup_split_accounting(ci, *len);
- clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
- __map_bio(clone);
- ret = 1;
- break;
- default:
- if (len)
- setup_split_accounting(ci, *len);
- /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
- alloc_multiple_bios(&blist, ci, ti, num_bios, len);
- while ((clone = bio_list_pop(&blist))) {
+ if (WARN_ON_ONCE(num_bios == 0)) /* num_bios = 0 is a bug in caller */
+ return 0;
+
+ /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
+ if (len)
+ setup_split_accounting(ci, *len);
+
+ /*
+ * Using alloc_multiple_bios(), even if num_bios is 1, to consistently
+ * support allocating using GFP_NOWAIT with GFP_NOIO fallback.
+ */
+ alloc_multiple_bios(&blist, ci, ti, num_bios, len);
+ while ((clone = bio_list_pop(&blist))) {
+ if (num_bios > 1)
dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
- __map_bio(clone);
- ret += 1;
- }
- break;
+ __map_bio(clone);
+ ret += 1;
}
return ret;
@@ -1554,26 +1526,60 @@ static void __send_empty_flush(struct clone_info *ci)
{
struct dm_table *t = ci->map;
struct bio flush_bio;
+ blk_opf_t opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
+
+ if ((ci->io->orig_bio->bi_opf & (REQ_IDLE | REQ_SYNC)) ==
+ (REQ_IDLE | REQ_SYNC))
+ opf |= REQ_IDLE;
/*
* Use an on-stack bio for this, it's safe since we don't
* need to reference it after submit. It's just used as
* the basis for the clone(s).
*/
- bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0,
- REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
+ bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, opf);
ci->bio = &flush_bio;
ci->sector_count = 0;
ci->io->tio.clone.bi_iter.bi_size = 0;
- for (unsigned int i = 0; i < t->num_targets; i++) {
- unsigned int bios;
- struct dm_target *ti = dm_table_get_target(t, i);
+ if (!t->flush_bypasses_map) {
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ unsigned int bios;
+ struct dm_target *ti = dm_table_get_target(t, i);
- atomic_add(ti->num_flush_bios, &ci->io->io_count);
- bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
- atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count);
+ if (unlikely(ti->num_flush_bios == 0))
+ continue;
+
+ atomic_add(ti->num_flush_bios, &ci->io->io_count);
+ bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios,
+ NULL);
+ atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count);
+ }
+ } else {
+ /*
+ * Note that there's no need to grab t->devices_lock here
+ * because the targets that support flush optimization don't
+ * modify the list of devices.
+ */
+ struct list_head *devices = dm_table_get_devices(t);
+ unsigned int len = 0;
+ struct dm_dev_internal *dd;
+ list_for_each_entry(dd, devices, list) {
+ struct bio *clone;
+ /*
+ * Note that the structure dm_target_io is not
+ * associated with any target (because the device may be
+ * used by multiple targets), so we set tio->ti = NULL.
+ * We must check for NULL in the I/O processing path, to
+ * avoid NULL pointer dereference.
+ */
+ clone = alloc_tio(ci, NULL, 0, &len, GFP_NOIO);
+ atomic_add(1, &ci->io->io_count);
+ bio_set_dev(clone, dd->dm_dev->bdev);
+ clone->bi_end_io = clone_endio;
+ dm_submit_bio_remap(clone, NULL);
+ }
}
/*
@@ -1585,10 +1591,9 @@ static void __send_empty_flush(struct clone_info *ci)
bio_uninit(ci->bio);
}
-static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
- unsigned int num_bios,
- unsigned int max_granularity,
- unsigned int max_sectors)
+static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti,
+ unsigned int num_bios, unsigned int max_granularity,
+ unsigned int max_sectors)
{
unsigned int len, bios;
@@ -1609,20 +1614,19 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target
static bool is_abnormal_io(struct bio *bio)
{
- enum req_op op = bio_op(bio);
-
- if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) {
- switch (op) {
- case REQ_OP_DISCARD:
- case REQ_OP_SECURE_ERASE:
- case REQ_OP_WRITE_ZEROES:
- return true;
- default:
- break;
- }
+ switch (bio_op(bio)) {
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ case REQ_OP_FLUSH:
+ return false;
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
+ case REQ_OP_WRITE_ZEROES:
+ case REQ_OP_ZONE_RESET_ALL:
+ return true;
+ default:
+ return false;
}
-
- return false;
}
static blk_status_t __process_abnormal_io(struct clone_info *ci,
@@ -1643,14 +1647,10 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
case REQ_OP_SECURE_ERASE:
num_bios = ti->num_secure_erase_bios;
max_sectors = limits->max_secure_erase_sectors;
- if (ti->max_secure_erase_granularity)
- max_granularity = max_sectors;
break;
case REQ_OP_WRITE_ZEROES:
num_bios = ti->num_write_zeroes_bios;
max_sectors = limits->max_write_zeroes_sectors;
- if (ti->max_write_zeroes_granularity)
- max_granularity = max_sectors;
break;
default:
break;
@@ -1665,8 +1665,8 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
if (unlikely(!num_bios))
return BLK_STS_NOTSUPP;
- __send_changing_extent_only(ci, ti, num_bios,
- max_granularity, max_sectors);
+ __send_abnormal_io(ci, ti, num_bios, max_granularity, max_sectors);
+
return BLK_STS_OK;
}
@@ -1725,10 +1725,6 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
if (unlikely(!ti))
return BLK_STS_IOERR;
- if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) &&
- unlikely(!dm_target_supports_nowait(ti->type)))
- return BLK_STS_NOTSUPP;
-
if (unlikely(ci->is_abnormal_io))
return __process_abnormal_io(ci, ti);
@@ -1739,8 +1735,21 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED);
len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
+ if (ci->bio->bi_opf & REQ_ATOMIC && len != ci->sector_count)
+ return BLK_STS_IOERR;
+
setup_split_accounting(ci, len);
- clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO);
+
+ if (unlikely(ci->bio->bi_opf & REQ_NOWAIT)) {
+ if (unlikely(!dm_target_supports_nowait(ti->type)))
+ return BLK_STS_NOTSUPP;
+
+ clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT);
+ if (unlikely(!clone))
+ return BLK_STS_AGAIN;
+ } else {
+ clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO);
+ }
__map_bio(clone);
ci->sector += len;
@@ -1749,11 +1758,11 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
return BLK_STS_OK;
}
-static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
+static void init_clone_info(struct clone_info *ci, struct dm_io *io,
struct dm_table *map, struct bio *bio, bool is_abnormal)
{
ci->map = map;
- ci->io = alloc_io(md, bio);
+ ci->io = io;
ci->bio = bio;
ci->is_abnormal_io = is_abnormal;
ci->submit_as_polled = false;
@@ -1766,6 +1775,165 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
ci->sector_count = 0;
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline bool dm_zone_bio_needs_split(struct bio *bio)
+{
+ /*
+ * Special case the zone operations that cannot or should not be split.
+ */
+ switch (bio_op(bio)) {
+ case REQ_OP_ZONE_APPEND:
+ case REQ_OP_ZONE_FINISH:
+ case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_RESET_ALL:
+ return false;
+ default:
+ break;
+ }
+
+ /*
+ * When mapped devices use the block layer zone write plugging, we must
+ * split any large BIO to the mapped device limits to not submit BIOs
+ * that span zone boundaries and to avoid potential deadlocks with
+ * queue freeze operations.
+ */
+ return bio_needs_zone_write_plugging(bio) || bio_straddles_zones(bio);
+}
+
+static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
+{
+ if (!bio_needs_zone_write_plugging(bio))
+ return false;
+ return blk_zone_plug_bio(bio, 0);
+}
+
+static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci,
+ struct dm_target *ti)
+{
+ struct bio_list blist = BIO_EMPTY_LIST;
+ struct mapped_device *md = ci->io->md;
+ unsigned int zone_sectors = md->disk->queue->limits.chunk_sectors;
+ unsigned long *need_reset;
+ unsigned int i, nr_zones, nr_reset;
+ unsigned int num_bios = 0;
+ blk_status_t sts = BLK_STS_OK;
+ sector_t sector = ti->begin;
+ struct bio *clone;
+ int ret;
+
+ nr_zones = ti->len >> ilog2(zone_sectors);
+ need_reset = bitmap_zalloc(nr_zones, GFP_NOIO);
+ if (!need_reset)
+ return BLK_STS_RESOURCE;
+
+ ret = dm_zone_get_reset_bitmap(md, ci->map, ti->begin,
+ nr_zones, need_reset);
+ if (ret) {
+ sts = BLK_STS_IOERR;
+ goto free_bitmap;
+ }
+
+ /* If we have no zone to reset, we are done. */
+ nr_reset = bitmap_weight(need_reset, nr_zones);
+ if (!nr_reset)
+ goto free_bitmap;
+
+ atomic_add(nr_zones, &ci->io->io_count);
+
+ for (i = 0; i < nr_zones; i++) {
+
+ if (!test_bit(i, need_reset)) {
+ sector += zone_sectors;
+ continue;
+ }
+
+ if (bio_list_empty(&blist)) {
+ /* This may take a while, so be nice to others */
+ if (num_bios)
+ cond_resched();
+
+ /*
+ * We may need to reset thousands of zones, so let's
+ * not go crazy with the clone allocation.
+ */
+ alloc_multiple_bios(&blist, ci, ti, min(nr_reset, 32),
+ NULL);
+ }
+
+ /* Get a clone and change it to a regular reset operation. */
+ clone = bio_list_pop(&blist);
+ clone->bi_opf &= ~REQ_OP_MASK;
+ clone->bi_opf |= REQ_OP_ZONE_RESET | REQ_SYNC;
+ clone->bi_iter.bi_sector = sector;
+ clone->bi_iter.bi_size = 0;
+ __map_bio(clone);
+
+ sector += zone_sectors;
+ num_bios++;
+ nr_reset--;
+ }
+
+ WARN_ON_ONCE(!bio_list_empty(&blist));
+ atomic_sub(nr_zones - num_bios, &ci->io->io_count);
+ ci->sector_count = 0;
+
+free_bitmap:
+ bitmap_free(need_reset);
+
+ return sts;
+}
+
+static void __send_zone_reset_all_native(struct clone_info *ci,
+ struct dm_target *ti)
+{
+ unsigned int bios;
+
+ atomic_add(1, &ci->io->io_count);
+ bios = __send_duplicate_bios(ci, ti, 1, NULL);
+ atomic_sub(1 - bios, &ci->io->io_count);
+
+ ci->sector_count = 0;
+}
+
+static blk_status_t __send_zone_reset_all(struct clone_info *ci)
+{
+ struct dm_table *t = ci->map;
+ blk_status_t sts = BLK_STS_OK;
+
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
+
+ if (ti->zone_reset_all_supported) {
+ __send_zone_reset_all_native(ci, ti);
+ continue;
+ }
+
+ sts = __send_zone_reset_all_emulated(ci, ti);
+ if (sts != BLK_STS_OK)
+ break;
+ }
+
+ /* Release the reference that alloc_io() took for submission. */
+ atomic_sub(1, &ci->io->io_count);
+
+ return sts;
+}
+
+#else
+static inline bool dm_zone_bio_needs_split(struct bio *bio)
+{
+ return false;
+}
+static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
+{
+ return false;
+}
+static blk_status_t __send_zone_reset_all(struct clone_info *ci)
+{
+ return BLK_STS_NOTSUPP;
+}
+#endif
+
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
@@ -1775,28 +1943,87 @@ static void dm_split_and_process_bio(struct mapped_device *md,
struct clone_info ci;
struct dm_io *io;
blk_status_t error = BLK_STS_OK;
- bool is_abnormal;
+ bool is_abnormal, need_split;
is_abnormal = is_abnormal_io(bio);
- if (unlikely(is_abnormal)) {
+ if (static_branch_unlikely(&zoned_enabled)) {
+ need_split = is_abnormal || dm_zone_bio_needs_split(bio);
+ } else {
+ need_split = is_abnormal;
+ }
+
+ if (unlikely(need_split)) {
/*
* Use bio_split_to_limits() for abnormal IO (e.g. discard, etc)
* otherwise associated queue_limits won't be imposed.
+ * Also split the BIO for mapped devices needing zone append
+ * emulation to ensure that the BIO does not cross zone
+ * boundaries.
*/
bio = bio_split_to_limits(bio);
if (!bio)
return;
}
- init_clone_info(&ci, md, map, bio, is_abnormal);
- io = ci.io;
+ /*
+ * Use the block layer zone write plugging for mapped devices that
+ * need zone append emulation (e.g. dm-crypt).
+ */
+ if (static_branch_unlikely(&zoned_enabled) && dm_zone_plug_bio(md, bio))
+ return;
+
+ /* Only support nowait for normal IO */
+ if (unlikely(bio->bi_opf & REQ_NOWAIT) && !is_abnormal) {
+ /*
+ * Don't support NOWAIT for FLUSH because it may allocate
+ * multiple bios and there's no easy way how to undo the
+ * allocations.
+ */
+ if (bio->bi_opf & REQ_PREFLUSH) {
+ bio_wouldblock_error(bio);
+ return;
+ }
+ io = alloc_io(md, bio, GFP_NOWAIT);
+ if (unlikely(!io)) {
+ /* Unable to do anything without dm_io. */
+ bio_wouldblock_error(bio);
+ return;
+ }
+ } else {
+ io = alloc_io(md, bio, GFP_NOIO);
+ }
+ init_clone_info(&ci, io, map, bio, is_abnormal);
- if (bio->bi_opf & REQ_PREFLUSH) {
+ if (unlikely((bio->bi_opf & REQ_PREFLUSH) != 0)) {
+ /*
+ * The "flush_bypasses_map" is set on targets where it is safe
+ * to skip the map function and submit bios directly to the
+ * underlying block devices - currently, it is set for dm-linear
+ * and dm-stripe.
+ *
+ * If we have just one underlying device (i.e. there is one
+ * linear target or multiple linear targets pointing to the same
+ * device), we can send the flush with data directly to it.
+ */
+ if (bio->bi_iter.bi_size && map->flush_bypasses_map) {
+ struct list_head *devices = dm_table_get_devices(map);
+ if (devices->next == devices->prev)
+ goto send_preflush_with_data;
+ }
+ if (bio->bi_iter.bi_size)
+ io->requeue_flush_with_data = true;
__send_empty_flush(&ci);
/* dm_io_complete submits any data associated with flush */
goto out;
}
+send_preflush_with_data:
+ if (static_branch_unlikely(&zoned_enabled) &&
+ (bio_op(bio) == REQ_OP_ZONE_RESET_ALL)) {
+ error = __send_zone_reset_all(&ci);
+ goto out;
+ }
+
error = __split_and_process_bio(&ci);
if (error || !ci.sector_count)
goto out;
@@ -1833,13 +2060,17 @@ static void dm_submit_bio(struct bio *bio)
struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
int srcu_idx;
struct dm_table *map;
- blk_opf_t bio_opf = bio->bi_opf;
- map = dm_get_live_table_bio(md, &srcu_idx, bio_opf);
+ map = dm_get_live_table(md, &srcu_idx);
+ if (unlikely(!map)) {
+ DMERR_LIMIT("%s: mapping table unavailable, erroring io",
+ dm_device_name(md));
+ bio_io_error(bio);
+ goto out;
+ }
- /* If suspended, or map not yet available, queue this IO for later */
- if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
- unlikely(!map)) {
+ /* If suspended, queue this IO for later */
+ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
else if (bio->bi_opf & REQ_RAHEAD)
@@ -1851,7 +2082,7 @@ static void dm_submit_bio(struct bio *bio)
dm_split_and_process_bio(md, map, bio);
out:
- dm_put_live_table_bio(md, srcu_idx, bio_opf);
+ dm_put_live_table(md, srcu_idx);
}
static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
@@ -1999,7 +2230,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
md->dax_dev = NULL;
}
- dm_cleanup_zoned_dev(md);
if (md->disk) {
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
@@ -2046,6 +2276,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
static struct mapped_device *alloc_dev(int minor)
{
int r, numa_node_id = dm_get_numa_node();
+ struct dax_device *dax_dev;
struct mapped_device *md;
void *old_md;
@@ -2090,9 +2321,11 @@ static struct mapped_device *alloc_dev(int minor)
* established. If request-based table is loaded: blk-mq will
* override accordingly.
*/
- md->disk = blk_alloc_disk(md->numa_node_id);
- if (!md->disk)
+ md->disk = blk_alloc_disk(NULL, md->numa_node_id);
+ if (IS_ERR(md->disk)) {
+ md->disk = NULL;
goto bad;
+ }
md->queue = md->disk->queue;
init_waitqueue_head(&md->wait);
@@ -2114,15 +2347,15 @@ static struct mapped_device *alloc_dev(int minor)
md->disk->private_data = md;
sprintf(md->disk->disk_name, "dm-%d", minor);
- if (IS_ENABLED(CONFIG_FS_DAX)) {
- md->dax_dev = alloc_dax(md, &dm_dax_ops);
- if (IS_ERR(md->dax_dev)) {
- md->dax_dev = NULL;
+ dax_dev = alloc_dax(md, &dm_dax_ops);
+ if (IS_ERR(dax_dev)) {
+ if (PTR_ERR(dax_dev) != -EOPNOTSUPP)
goto bad;
- }
- set_dax_nocache(md->dax_dev);
- set_dax_nomc(md->dax_dev);
- if (dax_add_host(md->dax_dev, md->disk))
+ } else {
+ set_dax_nocache(dax_dev);
+ set_dax_nomc(dax_dev);
+ md->dax_dev = dax_dev;
+ if (dax_add_host(dax_dev, md->disk))
goto bad;
}
@@ -2205,21 +2438,35 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct queue_limits *limits)
{
struct dm_table *old_map;
- sector_t size;
+ sector_t size, old_size;
int ret;
lockdep_assert_held(&md->suspend_lock);
size = dm_table_get_size(t);
+ old_size = dm_get_size(md);
+
+ if (!dm_table_supports_size_change(t, old_size, size)) {
+ old_map = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ set_capacity(md->disk, size);
+
+ ret = dm_table_set_restrictions(t, md->queue, limits);
+ if (ret) {
+ set_capacity(md->disk, old_size);
+ old_map = ERR_PTR(ret);
+ goto out;
+ }
+
/*
* Wipe any geometry if the size of the table changed.
*/
- if (size != dm_get_size(md))
+ if (size != old_size)
memset(&md->geometry, 0, sizeof(md->geometry));
- set_capacity(md->disk, size);
-
dm_table_event_callback(t, event_callback, md);
if (dm_table_request_based(t)) {
@@ -2237,10 +2484,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
* requests in the queue may refer to bio from the old bioset,
* so you must walk through the queue to unprep.
*/
- if (!md->mempools) {
+ if (!md->mempools)
md->mempools = t->mempools;
- t->mempools = NULL;
- }
+ else
+ dm_free_md_mempools(t->mempools);
} else {
/*
* The md may already have mempools that need changing.
@@ -2249,14 +2496,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
*/
dm_free_md_mempools(md->mempools);
md->mempools = t->mempools;
- t->mempools = NULL;
- }
-
- ret = dm_table_set_restrictions(t, md->queue, limits);
- if (ret) {
- old_map = ERR_PTR(ret);
- goto out;
}
+ t->mempools = NULL;
old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
rcu_assign_pointer(md->map, (void *)t);
@@ -2316,12 +2557,6 @@ void dm_unlock_md_type(struct mapped_device *md)
mutex_unlock(&md->type_lock);
}
-void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
-{
- BUG_ON(!mutex_is_locked(&md->type_lock));
- md->type = type;
-}
-
enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
{
return md->type;
@@ -2342,22 +2577,15 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
struct table_device *td;
int r;
- switch (type) {
- case DM_TYPE_REQUEST_BASED:
+ WARN_ON_ONCE(type == DM_TYPE_NONE);
+
+ if (type == DM_TYPE_REQUEST_BASED) {
md->disk->fops = &dm_rq_blk_dops;
r = dm_mq_init_request_queue(md, t);
if (r) {
DMERR("Cannot initialize queue for request-based dm mapped device");
return r;
}
- break;
- case DM_TYPE_BIO_BASED:
- case DM_TYPE_DAX_BIO_BASED:
- blk_queue_flag_set(QUEUE_FLAG_IO_STAT, md->queue);
- break;
- case DM_TYPE_NONE:
- WARN_ON_ONCE(true);
- break;
}
r = dm_calculate_queue_limits(t, &limits);
@@ -2550,7 +2778,7 @@ static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int ta
break;
if (signal_pending_state(task_state, current)) {
- r = -EINTR;
+ r = -ERESTARTSYS;
break;
}
@@ -2575,7 +2803,7 @@ static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_st
break;
if (signal_pending_state(task_state, current)) {
- r = -EINTR;
+ r = -ERESTARTSYS;
break;
}
@@ -2667,7 +2895,7 @@ static int lock_fs(struct mapped_device *md)
WARN_ON(test_bit(DMF_FROZEN, &md->flags));
- r = freeze_bdev(md->disk->part0);
+ r = bdev_freeze(md->disk->part0);
if (!r)
set_bit(DMF_FROZEN, &md->flags);
return r;
@@ -2677,7 +2905,7 @@ static void unlock_fs(struct mapped_device *md)
{
if (!test_bit(DMF_FROZEN, &md->flags))
return;
- thaw_bdev(md->disk->part0);
+ bdev_thaw(md->disk->part0);
clear_bit(DMF_FROZEN, &md->flags);
}
@@ -2696,7 +2924,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
{
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
- int r;
+ int r = 0;
lockdep_assert_held(&md->suspend_lock);
@@ -2748,8 +2976,10 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* Stop md->queue before flushing md->wq in case request-based
* dm defers requests to md->wq from md->queue.
*/
- if (dm_request_based(md))
+ if (map && dm_request_based(md)) {
dm_stop_queue(md->queue);
+ set_bit(DMF_QUEUE_STOPPED, &md->flags);
+ }
flush_workqueue(md->wq);
@@ -2758,7 +2988,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* We call dm_wait_for_completion to wait for all existing requests
* to finish.
*/
- r = dm_wait_for_completion(md, task_state);
+ if (map)
+ r = dm_wait_for_completion(md, task_state);
if (!r)
set_bit(dmf_suspended_flag, &md->flags);
@@ -2771,7 +3002,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
if (r < 0) {
dm_queue_flush(md);
- if (dm_request_based(md))
+ if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
dm_start_queue(md->queue);
unlock_fs(md);
@@ -2855,7 +3086,7 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
* so that mapping of targets can work correctly.
* Request-based dm is queueing the deferred I/Os in its request_queue.
*/
- if (dm_request_based(md))
+ if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
dm_start_queue(md->queue);
unlock_fs(md);
@@ -2937,6 +3168,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend
static void __dm_internal_resume(struct mapped_device *md)
{
+ int r;
+ struct dm_table *map;
+
BUG_ON(!md->internal_suspend_count);
if (--md->internal_suspend_count)
@@ -2945,12 +3179,23 @@ static void __dm_internal_resume(struct mapped_device *md)
if (dm_suspended_md(md))
goto done; /* resume from nested suspend */
- /*
- * NOTE: existing callers don't need to call dm_table_resume_targets
- * (which may fail -- so best to avoid it for now by passing NULL map)
- */
- (void) __dm_resume(md, NULL);
-
+ map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
+ r = __dm_resume(md, map);
+ if (r) {
+ /*
+ * If a preresume method of some target failed, we are in a
+ * tricky situation. We can't return an error to the caller. We
+ * can't fake success because then the "resume" and
+ * "postsuspend" methods would not be paired correctly, and it
+ * would break various targets, for example it would cause list
+ * corruption in the "origin" target.
+ *
+ * So, we fake normal suspend here, to make sure that the
+ * "resume" and "postsuspend" methods will be paired correctly.
+ */
+ DMERR("Preresume method failed: %d", r);
+ set_bit(DMF_SUSPENDED, &md->flags);
+ }
done:
clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
smp_mb__after_atomic();
@@ -3141,6 +3386,59 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
kfree(pools);
}
+struct dm_blkdev_id {
+ u8 *id;
+ enum blk_unique_id type;
+};
+
+static int __dm_get_unique_id(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct dm_blkdev_id *dm_id = data;
+ const struct block_device_operations *fops = dev->bdev->bd_disk->fops;
+
+ if (!fops->get_unique_id)
+ return 0;
+
+ return fops->get_unique_id(dev->bdev->bd_disk, dm_id->id, dm_id->type);
+}
+
+/*
+ * Allow access to get_unique_id() for the first device returning a
+ * non-zero result. Reasonable use expects all devices to have the
+ * same unique id.
+ */
+static int dm_blk_get_unique_id(struct gendisk *disk, u8 *id,
+ enum blk_unique_id type)
+{
+ struct mapped_device *md = disk->private_data;
+ struct dm_table *table;
+ struct dm_target *ti;
+ int ret = 0, srcu_idx;
+
+ struct dm_blkdev_id dm_id = {
+ .id = id,
+ .type = type,
+ };
+
+ table = dm_get_live_table(md, &srcu_idx);
+ if (!table || !dm_table_get_size(table))
+ goto out;
+
+ /* We only support devices that have a single target */
+ if (table->num_targets != 1)
+ goto out;
+ ti = dm_table_get_target(table, 0);
+
+ if (!ti->type->iterate_devices)
+ goto out;
+
+ ret = ti->type->iterate_devices(ti, __dm_get_unique_id, &dm_id);
+out:
+ dm_put_live_table(md, srcu_idx);
+ return ret;
+}
+
struct dm_pr {
u64 old_key;
u64 new_key;
@@ -3368,10 +3666,13 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
int r, srcu_idx;
+ bool forward = true;
- r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
+ /* Not a real ioctl, but targets must not interpret non-DM ioctls */
+ r = dm_prepare_ioctl(md, &srcu_idx, &bdev, 0, 0, &forward);
if (r < 0)
goto out;
+ WARN_ON_ONCE(!forward);
ops = bdev->bd_disk->fops->pr_ops;
if (ops && ops->pr_clear)
@@ -3466,6 +3767,7 @@ static const struct block_device_operations dm_blk_dops = {
.ioctl = dm_blk_ioctl,
.getgeo = dm_blk_getgeo,
.report_zones = dm_blk_report_zones,
+ .get_unique_id = dm_blk_get_unique_id,
.pr_ops = &dm_pr_ops,
.owner = THIS_MODULE
};
@@ -3475,6 +3777,7 @@ static const struct block_device_operations dm_rq_blk_dops = {
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
.getgeo = dm_blk_getgeo,
+ .get_unique_id = dm_blk_get_unique_id,
.pr_ops = &dm_pr_ops,
.owner = THIS_MODULE
};
@@ -3504,5 +3807,5 @@ module_param(swap_bios, int, 0644);
MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
MODULE_DESCRIPTION(DM_NAME " driver");
-MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");