summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2014-05-09 15:59:38 +0100
committerMike Snitzer <snitzer@redhat.com>2014-05-14 16:11:37 -0400
commit85ad643b7e7e52d37620fb272a9fd577a8095647 (patch)
treee7599aa49a532c6ae05ac99a31bf901341ce2b45 /drivers/md
parent8d07e8a5f5bc7b90f755d9b427ea930024f4c986 (diff)
dm thin: add timeout to stop out-of-data-space mode holding IO forever
If the pool runs out of data space, dm-thin can be configured to either error IOs that would trigger provisioning, or hold those IOs until the pool is resized. Unfortunately, holding IOs until the pool is resized can result in a cascade of tasks hitting the hung_task_timeout, which may render the system unavailable. Add a fixed timeout so IOs can only be held for a maximum of 60 seconds. If LVM is going to resize a thin-pool that is out of data space it needs to be prompt about it. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Cc: stable@vger.kernel.org # 3.14+
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-thin.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 15f748ba6754..2e71de8e0048 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -27,6 +27,7 @@
#define MAPPING_POOL_SIZE 1024
#define PRISON_CELLS 1024
#define COMMIT_PERIOD HZ
+#define NO_SPACE_TIMEOUT (HZ * 60)
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
"A percentage of time allocated for copy on write");
@@ -175,6 +176,7 @@ struct pool {
struct workqueue_struct *wq;
struct work_struct worker;
struct delayed_work waker;
+ struct delayed_work no_space_timeout;
unsigned long last_commit_jiffies;
unsigned ref_count;
@@ -1590,6 +1592,20 @@ static void do_waker(struct work_struct *ws)
queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
}
+/*
+ * We're holding onto IO to allow userland time to react. After the
+ * timeout either the pool will have been resized (and thus back in
+ * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
+ */
+static void do_no_space_timeout(struct work_struct *ws)
+{
+ struct pool *pool = container_of(to_delayed_work(ws), struct pool,
+ no_space_timeout);
+
+ if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
+ set_pool_mode(pool, PM_READ_ONLY);
+}
+
/*----------------------------------------------------------------*/
struct noflush_work {
@@ -1715,6 +1731,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
pool->process_discard = process_discard;
pool->process_prepared_mapping = process_prepared_mapping;
pool->process_prepared_discard = process_prepared_discard_passdown;
+
+ if (!pool->pf.error_if_no_space)
+ queue_delayed_work(pool->wq, &pool->no_space_timeout, NO_SPACE_TIMEOUT);
break;
case PM_WRITE:
@@ -2100,6 +2119,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
INIT_WORK(&pool->worker, do_worker);
INIT_DELAYED_WORK(&pool->waker, do_waker);
+ INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
spin_lock_init(&pool->lock);
bio_list_init(&pool->deferred_flush_bios);
INIT_LIST_HEAD(&pool->prepared_mappings);
@@ -2662,6 +2682,7 @@ static void pool_postsuspend(struct dm_target *ti)
struct pool *pool = pt->pool;
cancel_delayed_work(&pool->waker);
+ cancel_delayed_work(&pool->no_space_timeout);
flush_workqueue(pool->wq);
(void) commit(pool);
}