diff options
Diffstat (limited to 'drivers/md/dm-delay.c')
| -rw-r--r-- | drivers/md/dm-delay.c | 186 |
1 files changed, 140 insertions, 46 deletions
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 7433525e5985..4bb6553278c7 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -13,11 +13,15 @@ #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/slab.h> +#include <linux/kthread.h> +#include <linux/delay.h> #include <linux/device-mapper.h> #define DM_MSG_PREFIX "delay" +#define SLEEP_SHIFT 3 + struct delay_class { struct dm_dev *dev; sector_t start; @@ -27,11 +31,14 @@ struct delay_class { struct delay_c { struct timer_list delay_timer; - struct mutex timer_lock; + struct mutex process_bios_lock; /* hold while removing bios to be processed from list */ + spinlock_t delayed_bios_lock; /* hold on all accesses to delayed_bios list */ struct workqueue_struct *kdelayd_wq; struct work_struct flush_expired_bios; struct list_head delayed_bios; - atomic_t may_delay; + struct task_struct *worker; + unsigned int worker_sleep_us; + bool may_delay; struct delay_class read; struct delay_class write; @@ -47,23 +54,21 @@ struct dm_delay_info { unsigned long expires; }; -static DEFINE_MUTEX(delayed_bios_lock); - static void handle_delayed_timer(struct timer_list *t) { - struct delay_c *dc = from_timer(dc, t, delay_timer); + struct delay_c *dc = timer_container_of(dc, t, delay_timer); queue_work(dc->kdelayd_wq, &dc->flush_expired_bios); } static void queue_timeout(struct delay_c *dc, unsigned long expires) { - mutex_lock(&dc->timer_lock); - - if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires) - mod_timer(&dc->delay_timer, expires); + timer_reduce(&dc->delay_timer, expires); +} - mutex_unlock(&dc->timer_lock); +static inline bool delay_is_fast(struct delay_c *dc) +{ + return !!dc->worker; } static void flush_bios(struct bio *bio) @@ -78,36 +83,69 @@ static void flush_bios(struct bio *bio) } } -static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all) +static void flush_delayed_bios(struct delay_c *dc, bool flush_all) { struct dm_delay_info *delayed, *next; + struct bio_list flush_bio_list; + LIST_HEAD(local_list); unsigned long next_expires = 0; - unsigned long start_timer = 0; - struct bio_list flush_bios = { }; - - mutex_lock(&delayed_bios_lock); - list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) { + bool start_timer = false; + bio_list_init(&flush_bio_list); + + mutex_lock(&dc->process_bios_lock); + spin_lock(&dc->delayed_bios_lock); + list_replace_init(&dc->delayed_bios, &local_list); + spin_unlock(&dc->delayed_bios_lock); + list_for_each_entry_safe(delayed, next, &local_list, list) { + cond_resched(); if (flush_all || time_after_eq(jiffies, delayed->expires)) { struct bio *bio = dm_bio_from_per_bio_data(delayed, sizeof(struct dm_delay_info)); list_del(&delayed->list); - bio_list_add(&flush_bios, bio); + bio_list_add(&flush_bio_list, bio); delayed->class->ops--; continue; } - if (!start_timer) { - start_timer = 1; - next_expires = delayed->expires; - } else - next_expires = min(next_expires, delayed->expires); + if (!delay_is_fast(dc)) { + if (!start_timer) { + start_timer = true; + next_expires = delayed->expires; + } else { + next_expires = min(next_expires, delayed->expires); + } + } } - mutex_unlock(&delayed_bios_lock); + spin_lock(&dc->delayed_bios_lock); + list_splice(&local_list, &dc->delayed_bios); + spin_unlock(&dc->delayed_bios_lock); + mutex_unlock(&dc->process_bios_lock); if (start_timer) queue_timeout(dc, next_expires); - return bio_list_get(&flush_bios); + flush_bios(bio_list_get(&flush_bio_list)); +} + +static int flush_worker_fn(void *data) +{ + struct delay_c *dc = data; + + while (!kthread_should_stop()) { + flush_delayed_bios(dc, false); + spin_lock(&dc->delayed_bios_lock); + if (unlikely(list_empty(&dc->delayed_bios))) { + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock(&dc->delayed_bios_lock); + schedule(); + } else { + spin_unlock(&dc->delayed_bios_lock); + fsleep(dc->worker_sleep_us); + cond_resched(); + } + } + + return 0; } static void flush_expired_bios(struct work_struct *work) @@ -115,15 +153,17 @@ static void flush_expired_bios(struct work_struct *work) struct delay_c *dc; dc = container_of(work, struct delay_c, flush_expired_bios); - flush_bios(flush_delayed_bios(dc, 0)); + flush_delayed_bios(dc, false); } static void delay_dtr(struct dm_target *ti) { struct delay_c *dc = ti->private; - if (dc->kdelayd_wq) + if (dc->kdelayd_wq) { + timer_shutdown_sync(&dc->delay_timer); destroy_workqueue(dc->kdelayd_wq); + } if (dc->read.dev) dm_put_device(ti, dc->read.dev); @@ -131,8 +171,10 @@ static void delay_dtr(struct dm_target *ti) dm_put_device(ti, dc->write.dev); if (dc->flush.dev) dm_put_device(ti, dc->flush.dev); + if (dc->worker) + kthread_stop(dc->worker); - mutex_destroy(&dc->timer_lock); + mutex_destroy(&dc->process_bios_lock); kfree(dc); } @@ -175,6 +217,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct delay_c *dc; int ret; + unsigned int max_delay, min_delay; if (argc != 3 && argc != 6 && argc != 9) { ti->error = "Requires exactly 3, 6 or 9 arguments"; @@ -188,16 +231,16 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ti->private = dc; - timer_setup(&dc->delay_timer, handle_delayed_timer, 0); - INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); INIT_LIST_HEAD(&dc->delayed_bios); - mutex_init(&dc->timer_lock); - atomic_set(&dc->may_delay, 1); + mutex_init(&dc->process_bios_lock); + spin_lock_init(&dc->delayed_bios_lock); + dc->may_delay = true; dc->argc = argc; ret = delay_class_ctr(ti, &dc->read, argv); if (ret) goto bad; + min_delay = max_delay = dc->read.delay; if (argc == 3) { ret = delay_class_ctr(ti, &dc->write, argv); @@ -212,6 +255,9 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) ret = delay_class_ctr(ti, &dc->write, argv + 3); if (ret) goto bad; + max_delay = max(max_delay, dc->write.delay); + min_delay = min_not_zero(min_delay, dc->write.delay); + if (argc == 6) { ret = delay_class_ctr(ti, &dc->flush, argv + 3); if (ret) @@ -222,13 +268,34 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) ret = delay_class_ctr(ti, &dc->flush, argv + 6); if (ret) goto bad; + max_delay = max(max_delay, dc->flush.delay); + min_delay = min_not_zero(min_delay, dc->flush.delay); out: - dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); - if (!dc->kdelayd_wq) { - ret = -EINVAL; - DMERR("Couldn't start kdelayd"); - goto bad; + if (max_delay < 50) { + if (min_delay >> SLEEP_SHIFT) + dc->worker_sleep_us = 1000; + else + dc->worker_sleep_us = (min_delay * 1000) >> SLEEP_SHIFT; + /* + * In case of small requested delays, use kthread instead of + * timers and workqueue to achieve better latency. + */ + dc->worker = kthread_run(&flush_worker_fn, dc, "dm-delay-flush-worker"); + if (IS_ERR(dc->worker)) { + ret = PTR_ERR(dc->worker); + dc->worker = NULL; + goto bad; + } + } else { + timer_setup(&dc->delay_timer, handle_delayed_timer, 0); + INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); + dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); + if (!dc->kdelayd_wq) { + ret = -EINVAL; + DMERR("Couldn't start kdelayd"); + goto bad; + } } ti->num_flush_bios = 1; @@ -247,7 +314,7 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio) struct dm_delay_info *delayed; unsigned long expires = 0; - if (!c->delay || !atomic_read(&dc->may_delay)) + if (!c->delay) return DM_MAPIO_REMAPPED; delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info)); @@ -255,12 +322,19 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio) delayed->context = dc; delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay); - mutex_lock(&delayed_bios_lock); + spin_lock(&dc->delayed_bios_lock); + if (unlikely(!dc->may_delay)) { + spin_unlock(&dc->delayed_bios_lock); + return DM_MAPIO_REMAPPED; + } c->ops++; list_add_tail(&delayed->list, &dc->delayed_bios); - mutex_unlock(&delayed_bios_lock); + spin_unlock(&dc->delayed_bios_lock); - queue_timeout(dc, expires); + if (delay_is_fast(dc)) + wake_up_process(dc->worker); + else + queue_timeout(dc, expires); return DM_MAPIO_SUBMITTED; } @@ -269,16 +343,20 @@ static void delay_presuspend(struct dm_target *ti) { struct delay_c *dc = ti->private; - atomic_set(&dc->may_delay, 0); - del_timer_sync(&dc->delay_timer); - flush_bios(flush_delayed_bios(dc, 1)); + spin_lock(&dc->delayed_bios_lock); + dc->may_delay = false; + spin_unlock(&dc->delayed_bios_lock); + + if (!delay_is_fast(dc)) + timer_delete(&dc->delay_timer); + flush_delayed_bios(dc, true); } static void delay_resume(struct dm_target *ti) { struct delay_c *dc = ti->private; - atomic_set(&dc->may_delay, 1); + dc->may_delay = true; } static int delay_map(struct dm_target *ti, struct bio *bio) @@ -302,6 +380,21 @@ static int delay_map(struct dm_target *ti, struct bio *bio) return delay_bio(dc, c, bio); } +#ifdef CONFIG_BLK_DEV_ZONED +static int delay_report_zones(struct dm_target *ti, + struct dm_report_zones_args *args, unsigned int nr_zones) +{ + struct delay_c *dc = ti->private; + struct delay_class *c = &dc->read; + + return dm_report_zones(c->dev->bdev, c->start, + c->start + dm_target_offset(ti, args->next_sector), + args, nr_zones); +} +#else +#define delay_report_zones NULL +#endif + #define DMEMIT_DELAY_CLASS(c) \ DMEMIT("%s %llu %u", (c)->dev->name, (unsigned long long)(c)->start, (c)->delay) @@ -356,12 +449,13 @@ out: static struct target_type delay_target = { .name = "delay", - .version = {1, 3, 0}, - .features = DM_TARGET_PASSES_INTEGRITY, + .version = {1, 5, 0}, + .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM, .module = THIS_MODULE, .ctr = delay_ctr, .dtr = delay_dtr, .map = delay_map, + .report_zones = delay_report_zones, .presuspend = delay_presuspend, .resume = delay_resume, .status = delay_status, |
