summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/md/dm-bufio.c15
-rw-r--r--drivers/md/dm-era-target.c1
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-raid.c6
-rw-r--r--drivers/md/dm-table.c43
5 files changed, 28 insertions, 39 deletions
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 33bb074d6941..b8ac591aaaa7 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -974,7 +974,8 @@ static void __get_memory_limit(struct dm_bufio_client *c,
buffers = c->minimum_buffers;
*limit_buffers = buffers;
- *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
+ *threshold_buffers = mult_frac(buffers,
+ DM_BUFIO_WRITEBACK_PERCENT, 100);
}
/*
@@ -1910,19 +1911,15 @@ static int __init dm_bufio_init(void)
memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
- mem = (__u64)((totalram_pages - totalhigh_pages) *
- DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
+ mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
+ DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
if (mem > ULONG_MAX)
mem = ULONG_MAX;
#ifdef CONFIG_MMU
- /*
- * Get the size of vmalloc space the same way as VMALLOC_TOTAL
- * in fs/proc/internal.h
- */
- if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
- mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
+ if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
+ mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
#endif
dm_bufio_default_cache_size = mem;
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index ba84b8d62cd0..73a5c198113a 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1513,7 +1513,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->flush_supported = true;
ti->num_discard_bios = 1;
- ti->discards_supported = true;
era->callbacks.congested_fn = era_is_congested;
dm_table_add_target_callbacks(ti->table, &era->callbacks);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 204606bef9e2..c8faa2b85842 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -499,8 +499,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
if (IS_ERR(clone)) {
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
bool queue_dying = blk_queue_dying(q);
- DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing",
- PTR_ERR(clone), queue_dying ? " (path offline)" : "");
if (queue_dying) {
atomic_inc(&m->pg_init_in_progress);
activate_or_offline_path(pgpath);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 366c625b9591..6319d846e0ad 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2887,9 +2887,6 @@ static void configure_discard_support(struct raid_set *rs)
bool raid456;
struct dm_target *ti = rs->ti;
- /* Assume discards not supported until after checks below. */
- ti->discards_supported = false;
-
/*
* XXX: RAID level 4,5,6 require zeroing for safety.
*/
@@ -2914,9 +2911,6 @@ static void configure_discard_support(struct raid_set *rs)
}
}
- /* All RAID members properly support discards */
- ti->discards_supported = true;
-
/*
* RAID1 and RAID10 personalities require bio splitting,
* RAID0/4/5/6 don't and process large discard bios properly.
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 433bd3f3014c..88130b5d95f9 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1758,13 +1758,12 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
return true;
}
-
-static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && blk_queue_discard(q);
+ return q && !blk_queue_discard(q);
}
static bool dm_table_supports_discards(struct dm_table *t)
@@ -1772,28 +1771,24 @@ static bool dm_table_supports_discards(struct dm_table *t)
struct dm_target *ti;
unsigned i;
- /*
- * Unless any target used by the table set discards_supported,
- * require at least one underlying device to support discards.
- * t->devices includes internal dm devices such as mirror logs
- * so we need to use iterate_devices here, which targets
- * supporting discard selectively must provide.
- */
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->num_discard_bios)
- continue;
-
- if (ti->discards_supported)
- return true;
+ return false;
- if (ti->type->iterate_devices &&
- ti->type->iterate_devices(ti, device_discard_capable, NULL))
- return true;
+ /*
+ * Either the target provides discard support (as implied by setting
+ * 'discards_supported') or it relies on _all_ data devices having
+ * discard support.
+ */
+ if (!ti->discards_supported &&
+ (!ti->type->iterate_devices ||
+ ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
+ return false;
}
- return false;
+ return true;
}
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
@@ -1806,9 +1801,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
*/
q->limits = *limits;
- if (!dm_table_supports_discards(t))
+ if (!dm_table_supports_discards(t)) {
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
- else
+ /* Must also clear discard limits... */
+ q->limits.max_discard_sectors = 0;
+ q->limits.max_hw_discard_sectors = 0;
+ q->limits.discard_granularity = 0;
+ q->limits.discard_alignment = 0;
+ q->limits.discard_misaligned = 0;
+ } else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {