summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-17 15:29:49 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-17 15:29:49 -0700
commita4d1dbed0e27030b3c3ca2d1d5c33a1b45bc53d2 (patch)
tree6f92002ba36efbee3adc1e7b2a0d4b0621c1f1a6 /block
parentc2e7b207058d4ff6a9010430763fb561f307eb67 (diff)
parentb3a834b1596ac668df206aa2bb1f191c31f5f5e4 (diff)
Merge branch 'for-4.7/core' of git://git.kernel.dk/linux-block
Pull core block layer updates from Jens Axboe: "This is the core block IO changes for this merge window. Nothing earth shattering in here, it's mostly just fixes. In detail: - Fix for a long standing issue where wrong ordering in blk-mq caused order_to_size() to spew a warning. From Bart. - Async discard support from Christoph. Basically just splitting our sync interface into a submit + wait part. - Add a cleaner interface for flagging whether a device has a write back cache or not. We've previously overloaded blk_queue_flush() with this, but let's make it more explicit. Drivers cleaned up and updated in the drivers pull request. From me. - Fix for a double check for whether IO accounting is enabled or not. From Michael Callahan. - Fix for the async discard from Mike Snitzer, reinstating the early EOPNOTSUPP return if the device doesn't support discards. - Also from Mike, export bio_inc_remaining() so dm can drop it's private copy of it. - From Ming Lin, add support for passing in an offset for request payloads. - Tag function export from Sagi, which will be used in NVMe in the drivers pull. - Two blktrace related fixes from Shaohua. - Propagate NOMERGE flag when making a request from a bio, also from Shaohua. - An optimization to not parse cgroup paths in blk-throttle, if we don't need to. From Shaohua" * 'for-4.7/core' of git://git.kernel.dk/linux-block: blk-mq: fix undefined behaviour in order_to_size() blk-throttle: don't parse cgroup path if trace isn't enabled blktrace: add missed mask name blktrace: delete garbage for message trace block: make bio_inc_remaining() interface accessible again block: reinstate early return of -EOPNOTSUPP from blkdev_issue_discard block: Minor blk_account_io_start usage cleanup block: add __blkdev_issue_discard block: remove struct bio_batch block: copy NOMERGE flag from bio to request block: add ability to flag write back caching on a device blk-mq: Export tagset iter function block: add offset in blk_add_request_payload() writeback: Fix performance regression in wb_over_bg_thresh()
Diffstat (limited to 'block')
-rw-r--r--block/bio.c11
-rw-r--r--block/blk-core.c5
-rw-r--r--block/blk-lib.c178
-rw-r--r--block/blk-mq-tag.c12
-rw-r--r--block/blk-mq.c5
-rw-r--r--block/blk-settings.c26
-rw-r--r--block/blk-sysfs.c39
-rw-r--r--block/blk-throttle.c5
8 files changed, 148 insertions, 133 deletions
diff --git a/block/bio.c b/block/bio.c
index 807d25e466ec..0e4aa42bc30d 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -311,17 +311,6 @@ static void bio_chain_endio(struct bio *bio)
bio_endio(__bio_chain_endio(bio));
}
-/*
- * Increment chain count for the bio. Make sure the CHAIN flag update
- * is visible before the raised count.
- */
-static inline void bio_inc_remaining(struct bio *bio)
-{
- bio_set_flag(bio, BIO_CHAIN);
- smp_mb__before_atomic();
- atomic_inc(&bio->__bi_remaining);
-}
-
/**
* bio_chain - chain bio completions
* @bio: the target bio
diff --git a/block/blk-core.c b/block/blk-core.c
index b60537b2c35b..c50227796a26 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1523,6 +1523,7 @@ EXPORT_SYMBOL(blk_put_request);
* blk_add_request_payload - add a payload to a request
* @rq: request to update
* @page: page backing the payload
+ * @offset: offset in page
* @len: length of the payload.
*
* This allows to later add a payload to an already submitted request by
@@ -1533,12 +1534,12 @@ EXPORT_SYMBOL(blk_put_request);
* discard requests should ever use it.
*/
void blk_add_request_payload(struct request *rq, struct page *page,
- unsigned int len)
+ int offset, unsigned int len)
{
struct bio *bio = rq->bio;
bio->bi_io_vec->bv_page = page;
- bio->bi_io_vec->bv_offset = 0;
+ bio->bi_io_vec->bv_offset = offset;
bio->bi_io_vec->bv_len = len;
bio->bi_iter.bi_size = len;
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 9ebf65379556..23d7f301a196 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -9,82 +9,46 @@
#include "blk.h"
-struct bio_batch {
- atomic_t done;
- int error;
- struct completion *wait;
-};
-
-static void bio_batch_end_io(struct bio *bio)
+static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages,
+ gfp_t gfp)
{
- struct bio_batch *bb = bio->bi_private;
+ struct bio *new = bio_alloc(gfp, nr_pages);
+
+ if (bio) {
+ bio_chain(bio, new);
+ submit_bio(rw, bio);
+ }
- if (bio->bi_error && bio->bi_error != -EOPNOTSUPP)
- bb->error = bio->bi_error;
- if (atomic_dec_and_test(&bb->done))
- complete(bb->wait);
- bio_put(bio);
+ return new;
}
-/**
- * blkdev_issue_discard - queue a discard
- * @bdev: blockdev to issue discard for
- * @sector: start sector
- * @nr_sects: number of sectors to discard
- * @gfp_mask: memory allocation flags (for bio_alloc)
- * @flags: BLKDEV_IFL_* flags to control behaviour
- *
- * Description:
- * Issue a discard request for the sectors in question.
- */
-int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop)
{
- DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
- int type = REQ_WRITE | REQ_DISCARD;
+ struct bio *bio = *biop;
unsigned int granularity;
int alignment;
- struct bio_batch bb;
- struct bio *bio;
- int ret = 0;
- struct blk_plug plug;
if (!q)
return -ENXIO;
-
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
+ if ((type & REQ_SECURE) && !blk_queue_secdiscard(q))
+ return -EOPNOTSUPP;
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
- if (flags & BLKDEV_DISCARD_SECURE) {
- if (!blk_queue_secdiscard(q))
- return -EOPNOTSUPP;
- type |= REQ_SECURE;
- }
-
- atomic_set(&bb.done, 1);
- bb.error = 0;
- bb.wait = &wait;
-
- blk_start_plug(&plug);
while (nr_sects) {
unsigned int req_sects;
sector_t end_sect, tmp;
- bio = bio_alloc(gfp_mask, 1);
- if (!bio) {
- ret = -ENOMEM;
- break;
- }
-
/* Make sure bi_size doesn't overflow */
req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
- /*
+ /**
* If splitting a request, and the next starting sector would be
* misaligned, stop the discard at the previous aligned sector.
*/
@@ -98,18 +62,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
req_sects = end_sect - sector;
}
+ bio = next_bio(bio, type, 1, gfp_mask);
bio->bi_iter.bi_sector = sector;
- bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev;
- bio->bi_private = &bb;
bio->bi_iter.bi_size = req_sects << 9;
nr_sects -= req_sects;
sector = end_sect;
- atomic_inc(&bb.done);
- submit_bio(type, bio);
-
/*
* We can loop for a long time in here, if someone does
* full device discards (like mkfs). Be nice and allow
@@ -118,14 +78,44 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
*/
cond_resched();
}
- blk_finish_plug(&plug);
- /* Wait for bios in-flight */
- if (!atomic_dec_and_test(&bb.done))
- wait_for_completion_io(&wait);
+ *biop = bio;
+ return 0;
+}
+EXPORT_SYMBOL(__blkdev_issue_discard);
+
+/**
+ * blkdev_issue_discard - queue a discard
+ * @bdev: blockdev to issue discard for
+ * @sector: start sector
+ * @nr_sects: number of sectors to discard
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ * @flags: BLKDEV_IFL_* flags to control behaviour
+ *
+ * Description:
+ * Issue a discard request for the sectors in question.
+ */
+int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+{
+ int type = REQ_WRITE | REQ_DISCARD;
+ struct bio *bio = NULL;
+ struct blk_plug plug;
+ int ret;
+
+ if (flags & BLKDEV_DISCARD_SECURE)
+ type |= REQ_SECURE;
+
+ blk_start_plug(&plug);
+ ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type,
+ &bio);
+ if (!ret && bio) {
+ ret = submit_bio_wait(type, bio);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+ }
+ blk_finish_plug(&plug);
- if (bb.error)
- return bb.error;
return ret;
}
EXPORT_SYMBOL(blkdev_issue_discard);
@@ -145,11 +135,9 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask,
struct page *page)
{
- DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
unsigned int max_write_same_sectors;
- struct bio_batch bb;
- struct bio *bio;
+ struct bio *bio = NULL;
int ret = 0;
if (!q)
@@ -158,21 +146,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
/* Ensure that max_write_same_sectors doesn't overflow bi_size */
max_write_same_sectors = UINT_MAX >> 9;
- atomic_set(&bb.done, 1);
- bb.error = 0;
- bb.wait = &wait;
-
while (nr_sects) {
- bio = bio_alloc(gfp_mask, 1);
- if (!bio) {
- ret = -ENOMEM;
- break;
- }
-
+ bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask);
bio->bi_iter.bi_sector = sector;
- bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev;
- bio->bi_private = &bb;
bio->bi_vcnt = 1;
bio->bi_io_vec->bv_page = page;
bio->bi_io_vec->bv_offset = 0;
@@ -186,18 +163,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
bio->bi_iter.bi_size = nr_sects << 9;
nr_sects = 0;
}
-
- atomic_inc(&bb.done);
- submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
}
- /* Wait for bios in-flight */
- if (!atomic_dec_and_test(&bb.done))
- wait_for_completion_io(&wait);
-
- if (bb.error)
- return bb.error;
- return ret;
+ if (bio)
+ ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
+ return ret != -EOPNOTSUPP ? ret : 0;
}
EXPORT_SYMBOL(blkdev_issue_write_same);
@@ -216,28 +186,15 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask)
{
int ret;
- struct bio *bio;
- struct bio_batch bb;
+ struct bio *bio = NULL;
unsigned int sz;
- DECLARE_COMPLETION_ONSTACK(wait);
-
- atomic_set(&bb.done, 1);
- bb.error = 0;
- bb.wait = &wait;
- ret = 0;
while (nr_sects != 0) {
- bio = bio_alloc(gfp_mask,
- min(nr_sects, (sector_t)BIO_MAX_PAGES));
- if (!bio) {
- ret = -ENOMEM;
- break;
- }
-
+ bio = next_bio(bio, WRITE,
+ min(nr_sects, (sector_t)BIO_MAX_PAGES),
+ gfp_mask);
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
- bio->bi_end_io = bio_batch_end_io;
- bio->bi_private = &bb;
while (nr_sects != 0) {
sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
@@ -247,18 +204,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
if (ret < (sz << 9))
break;
}
- ret = 0;
- atomic_inc(&bb.done);
- submit_bio(WRITE, bio);
}
- /* Wait for bios in-flight */
- if (!atomic_dec_and_test(&bb.done))
- wait_for_completion_io(&wait);
-
- if (bb.error)
- return bb.error;
- return ret;
+ if (bio)
+ return submit_bio_wait(WRITE, bio);
+ return 0;
}
/**
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index abdbb47405cb..2fd04286f103 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -474,6 +474,18 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
}
EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
+void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
+ busy_tag_iter_fn *fn, void *priv)
+{
+ int i;
+
+ for (i = 0; i < tagset->nr_hw_queues; i++) {
+ if (tagset->tags && tagset->tags[i])
+ blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
+ }
+}
+EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
+
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
void *priv)
{
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1699baf39b78..7df9c9263b21 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1122,8 +1122,7 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
{
init_request_from_bio(rq, bio);
- if (blk_do_io_stat(rq))
- blk_account_io_start(rq, 1);
+ blk_account_io_start(rq, 1);
}
static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
@@ -1496,7 +1495,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
int to_do;
void *p;
- while (left < order_to_size(this_order - 1) && this_order)
+ while (this_order && left < order_to_size(this_order - 1))
this_order--;
do {
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 331e4eee0dda..c903bee43cf8 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -846,6 +846,32 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
}
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
+/**
+ * blk_queue_write_cache - configure queue's write cache
+ * @q: the request queue for the device
+ * @wc: write back cache on or off
+ * @fua: device supports FUA writes, if true
+ *
+ * Tell the block layer about the write cache of @q.
+ */
+void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
+{
+ spin_lock_irq(q->queue_lock);
+ if (wc) {
+ queue_flag_set(QUEUE_FLAG_WC, q);
+ q->flush_flags = REQ_FLUSH;
+ } else
+ queue_flag_clear(QUEUE_FLAG_WC, q);
+ if (fua) {
+ if (wc)
+ q->flush_flags |= REQ_FUA;
+ queue_flag_set(QUEUE_FLAG_FUA, q);
+ } else
+ queue_flag_clear(QUEUE_FLAG_FUA, q);
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_write_cache);
+
static int __init blk_settings_init(void)
{
blk_max_low_pfn = max_low_pfn - 1;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 995b58d46ed1..99205965f559 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -347,6 +347,38 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
return ret;
}
+static ssize_t queue_wc_show(struct request_queue *q, char *page)
+{
+ if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
+ return sprintf(page, "write back\n");
+
+ return sprintf(page, "write through\n");
+}
+
+static ssize_t queue_wc_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ int set = -1;
+
+ if (!strncmp(page, "write back", 10))
+ set = 1;
+ else if (!strncmp(page, "write through", 13) ||
+ !strncmp(page, "none", 4))
+ set = 0;
+
+ if (set == -1)
+ return -EINVAL;
+
+ spin_lock_irq(q->queue_lock);
+ if (set)
+ queue_flag_set(QUEUE_FLAG_WC, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_WC, q);
+ spin_unlock_irq(q->queue_lock);
+
+ return count;
+}
+
static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
.show = queue_requests_show,
@@ -478,6 +510,12 @@ static struct queue_sysfs_entry queue_poll_entry = {
.store = queue_poll_store,
};
+static struct queue_sysfs_entry queue_wc_entry = {
+ .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_wc_show,
+ .store = queue_wc_store,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
@@ -503,6 +541,7 @@ static struct attribute *default_attrs[] = {
&queue_iostats_entry.attr,
&queue_random_entry.attr,
&queue_poll_entry.attr,
+ &queue_wc_entry.attr,
NULL,
};
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 2149a1ddbacf..47a3e540631a 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -211,15 +211,14 @@ static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
*
* The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
* throtl_grp; otherwise, just "throtl".
- *
- * TODO: this should be made a function and name formatting should happen
- * after testing whether blktrace is enabled.
*/
#define throtl_log(sq, fmt, args...) do { \
struct throtl_grp *__tg = sq_to_tg((sq)); \
struct throtl_data *__td = sq_to_td((sq)); \
\
(void)__td; \
+ if (likely(!blk_trace_note_message_enabled(__td->queue))) \
+ break; \
if ((__tg)) { \
char __pbuf[128]; \
\