diff options
Diffstat (limited to 'block/blk-sysfs.c')
| -rw-r--r-- | block/blk-sysfs.c | 1318 |
1 files changed, 659 insertions, 659 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 590d1ef2f961..8684c57498cc 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -9,24 +9,31 @@ #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/blktrace_api.h> -#include <linux/blk-mq.h> -#include <linux/blk-cgroup.h> +#include <linux/debugfs.h> #include "blk.h" #include "blk-mq.h" #include "blk-mq-debugfs.h" +#include "blk-mq-sched.h" +#include "blk-rq-qos.h" #include "blk-wbt.h" +#include "blk-cgroup.h" +#include "blk-throttle.h" struct queue_sysfs_entry { struct attribute attr; - ssize_t (*show)(struct request_queue *, char *); - ssize_t (*store)(struct request_queue *, const char *, size_t); + ssize_t (*show)(struct gendisk *disk, char *page); + ssize_t (*show_limit)(struct gendisk *disk, char *page); + + ssize_t (*store)(struct gendisk *disk, const char *page, size_t count); + int (*store_limit)(struct gendisk *disk, const char *page, + size_t count, struct queue_limits *lim); }; static ssize_t queue_var_show(unsigned long var, char *page) { - return sprintf(page, "%lu\n", var); + return sysfs_emit(page, "%lu\n", var); } static ssize_t @@ -44,270 +51,332 @@ queue_var_store(unsigned long *var, const char *page, size_t count) return count; } -static ssize_t queue_var_store64(s64 *var, const char *page) +static ssize_t queue_requests_show(struct gendisk *disk, char *page) { - int err; - s64 v; - - err = kstrtos64(page, 10, &v); - if (err < 0) - return err; - - *var = v; - return 0; -} + ssize_t ret; -static ssize_t queue_requests_show(struct request_queue *q, char *page) -{ - return queue_var_show(q->nr_requests, (page)); + mutex_lock(&disk->queue->elevator_lock); + ret = queue_var_show(disk->queue->nr_requests, page); + mutex_unlock(&disk->queue->elevator_lock); + return ret; } static ssize_t -queue_requests_store(struct request_queue *q, const char *page, size_t count) +queue_requests_store(struct gendisk *disk, const char *page, size_t count) { + struct request_queue *q = disk->queue; + struct blk_mq_tag_set *set = q->tag_set; + struct elevator_tags *et = NULL; + unsigned int memflags; unsigned long nr; - int ret, err; - - if (!queue_is_mq(q)) - return -EINVAL; + int ret; ret = queue_var_store(&nr, page, count); if (ret < 0) return ret; + /* + * Serialize updating nr_requests with concurrent queue_requests_store() + * and switching elevator. + */ + down_write(&set->update_nr_hwq_lock); + + if (nr == q->nr_requests) + goto unlock; + if (nr < BLKDEV_MIN_RQ) nr = BLKDEV_MIN_RQ; - err = blk_mq_update_nr_requests(q, nr); - if (err) - return err; + /* + * Switching elevator is protected by update_nr_hwq_lock: + * - read lock is held from elevator sysfs attribute; + * - write lock is held from updating nr_hw_queues; + * Hence it's safe to access q->elevator here with write lock held. + */ + if (nr <= set->reserved_tags || + (q->elevator && nr > MAX_SCHED_RQ) || + (!q->elevator && nr > set->queue_depth)) { + ret = -EINVAL; + goto unlock; + } + + if (!blk_mq_is_shared_tags(set->flags) && q->elevator && + nr > q->elevator->et->nr_requests) { + /* + * Tags will grow, allocate memory before freezing queue to + * prevent deadlock. + */ + et = blk_mq_alloc_sched_tags(set, q->nr_hw_queues, nr); + if (!et) { + ret = -ENOMEM; + goto unlock; + } + } + memflags = blk_mq_freeze_queue(q); + mutex_lock(&q->elevator_lock); + et = blk_mq_update_nr_requests(q, et, nr); + mutex_unlock(&q->elevator_lock); + blk_mq_unfreeze_queue(q, memflags); + + if (et) + blk_mq_free_sched_tags(et, set); + +unlock: + up_write(&set->update_nr_hwq_lock); return ret; } -static ssize_t queue_ra_show(struct request_queue *q, char *page) +static ssize_t queue_ra_show(struct gendisk *disk, char *page) { - unsigned long ra_kb = q->backing_dev_info->ra_pages << - (PAGE_SHIFT - 10); + ssize_t ret; - return queue_var_show(ra_kb, (page)); + mutex_lock(&disk->queue->limits_lock); + ret = queue_var_show(disk->bdi->ra_pages << (PAGE_SHIFT - 10), page); + mutex_unlock(&disk->queue->limits_lock); + + return ret; } static ssize_t -queue_ra_store(struct request_queue *q, const char *page, size_t count) +queue_ra_store(struct gendisk *disk, const char *page, size_t count) { unsigned long ra_kb; - ssize_t ret = queue_var_store(&ra_kb, page, count); + ssize_t ret; + struct request_queue *q = disk->queue; + ret = queue_var_store(&ra_kb, page, count); if (ret < 0) return ret; - - q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); + /* + * The ->ra_pages change below is protected by ->limits_lock because it + * is usually calculated from the queue limits by + * queue_limits_commit_update(). + * + * bdi->ra_pages reads are not serialized against bdi->ra_pages writes. + * Use WRITE_ONCE() to write bdi->ra_pages once. + */ + mutex_lock(&q->limits_lock); + WRITE_ONCE(disk->bdi->ra_pages, ra_kb >> (PAGE_SHIFT - 10)); + mutex_unlock(&q->limits_lock); return ret; } -static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) -{ - int max_sectors_kb = queue_max_sectors(q) >> 1; - - return queue_var_show(max_sectors_kb, (page)); +#define QUEUE_SYSFS_LIMIT_SHOW(_field) \ +static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \ +{ \ + return queue_var_show(disk->queue->limits._field, page); \ +} + +QUEUE_SYSFS_LIMIT_SHOW(max_segments) +QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments) +QUEUE_SYSFS_LIMIT_SHOW(max_integrity_segments) +QUEUE_SYSFS_LIMIT_SHOW(max_segment_size) +QUEUE_SYSFS_LIMIT_SHOW(max_write_streams) +QUEUE_SYSFS_LIMIT_SHOW(write_stream_granularity) +QUEUE_SYSFS_LIMIT_SHOW(logical_block_size) +QUEUE_SYSFS_LIMIT_SHOW(physical_block_size) +QUEUE_SYSFS_LIMIT_SHOW(chunk_sectors) +QUEUE_SYSFS_LIMIT_SHOW(io_min) +QUEUE_SYSFS_LIMIT_SHOW(io_opt) +QUEUE_SYSFS_LIMIT_SHOW(discard_granularity) +QUEUE_SYSFS_LIMIT_SHOW(zone_write_granularity) +QUEUE_SYSFS_LIMIT_SHOW(virt_boundary_mask) +QUEUE_SYSFS_LIMIT_SHOW(dma_alignment) +QUEUE_SYSFS_LIMIT_SHOW(max_open_zones) +QUEUE_SYSFS_LIMIT_SHOW(max_active_zones) +QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_min) +QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_max) + +#define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(_field) \ +static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \ +{ \ + return sysfs_emit(page, "%llu\n", \ + (unsigned long long)disk->queue->limits._field << \ + SECTOR_SHIFT); \ +} + +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_discard_sectors) +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_discard_sectors) +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_write_zeroes_sectors) +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_wzeroes_unmap_sectors) +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_wzeroes_unmap_sectors) +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_max_sectors) +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_boundary_sectors) +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_zone_append_sectors) + +#define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(_field) \ +static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \ +{ \ + return queue_var_show(disk->queue->limits._field >> 1, page); \ } -static ssize_t queue_max_segments_show(struct request_queue *q, char *page) -{ - return queue_var_show(queue_max_segments(q), (page)); -} +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_sectors) +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_hw_sectors) -static ssize_t queue_max_discard_segments_show(struct request_queue *q, - char *page) -{ - return queue_var_show(queue_max_discard_segments(q), (page)); +#define QUEUE_SYSFS_SHOW_CONST(_name, _val) \ +static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \ +{ \ + return sysfs_emit(page, "%d\n", _val); \ } -static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) -{ - return queue_var_show(q->limits.max_integrity_segments, (page)); -} +/* deprecated fields */ +QUEUE_SYSFS_SHOW_CONST(discard_zeroes_data, 0) +QUEUE_SYSFS_SHOW_CONST(write_same_max, 0) +QUEUE_SYSFS_SHOW_CONST(poll_delay, -1) -static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) +static int queue_max_discard_sectors_store(struct gendisk *disk, + const char *page, size_t count, struct queue_limits *lim) { - return queue_var_show(queue_max_segment_size(q), (page)); -} + unsigned long max_discard_bytes; + ssize_t ret; -static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) -{ - return queue_var_show(queue_logical_block_size(q), page); -} + ret = queue_var_store(&max_discard_bytes, page, count); + if (ret < 0) + return ret; -static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) -{ - return queue_var_show(queue_physical_block_size(q), page); -} + if (max_discard_bytes & (disk->queue->limits.discard_granularity - 1)) + return -EINVAL; -static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) -{ - return queue_var_show(q->limits.chunk_sectors, page); -} + if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX) + return -EINVAL; -static ssize_t queue_io_min_show(struct request_queue *q, char *page) -{ - return queue_var_show(queue_io_min(q), page); + lim->max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT; + return 0; } -static ssize_t queue_io_opt_show(struct request_queue *q, char *page) +static int queue_max_wzeroes_unmap_sectors_store(struct gendisk *disk, + const char *page, size_t count, struct queue_limits *lim) { - return queue_var_show(queue_io_opt(q), page); -} + unsigned long max_zeroes_bytes, max_hw_zeroes_bytes; + ssize_t ret; -static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) -{ - return queue_var_show(q->limits.discard_granularity, page); -} + ret = queue_var_store(&max_zeroes_bytes, page, count); + if (ret < 0) + return ret; -static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) -{ + max_hw_zeroes_bytes = lim->max_hw_wzeroes_unmap_sectors << SECTOR_SHIFT; + if (max_zeroes_bytes != 0 && max_zeroes_bytes != max_hw_zeroes_bytes) + return -EINVAL; - return sprintf(page, "%llu\n", - (unsigned long long)q->limits.max_hw_discard_sectors << 9); + lim->max_user_wzeroes_unmap_sectors = max_zeroes_bytes >> SECTOR_SHIFT; + return 0; } -static ssize_t queue_discard_max_show(struct request_queue *q, char *page) +static int +queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count, + struct queue_limits *lim) { - return sprintf(page, "%llu\n", - (unsigned long long)q->limits.max_discard_sectors << 9); + unsigned long max_sectors_kb; + ssize_t ret; + + ret = queue_var_store(&max_sectors_kb, page, count); + if (ret < 0) + return ret; + + lim->max_user_sectors = max_sectors_kb << 1; + return 0; } -static ssize_t queue_discard_max_store(struct request_queue *q, - const char *page, size_t count) +static ssize_t queue_feature_store(struct gendisk *disk, const char *page, + size_t count, struct queue_limits *lim, blk_features_t feature) { - unsigned long max_discard; - ssize_t ret = queue_var_store(&max_discard, page, count); + unsigned long val; + ssize_t ret; + ret = queue_var_store(&val, page, count); if (ret < 0) return ret; - if (max_discard & (q->limits.discard_granularity - 1)) - return -EINVAL; + if (val) + lim->features |= feature; + else + lim->features &= ~feature; + return 0; +} - max_discard >>= 9; - if (max_discard > UINT_MAX) - return -EINVAL; +#define QUEUE_SYSFS_FEATURE(_name, _feature) \ +static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \ +{ \ + return sysfs_emit(page, "%u\n", \ + !!(disk->queue->limits.features & _feature)); \ +} \ +static int queue_##_name##_store(struct gendisk *disk, \ + const char *page, size_t count, struct queue_limits *lim) \ +{ \ + return queue_feature_store(disk, page, count, lim, _feature); \ +} - if (max_discard > q->limits.max_hw_discard_sectors) - max_discard = q->limits.max_hw_discard_sectors; +QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL) +QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM) +QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT) +QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES); - q->limits.max_discard_sectors = max_discard; - return ret; +#define QUEUE_SYSFS_FEATURE_SHOW(_name, _feature) \ +static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \ +{ \ + return sysfs_emit(page, "%u\n", \ + !!(disk->queue->limits.features & _feature)); \ } -static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) +QUEUE_SYSFS_FEATURE_SHOW(fua, BLK_FEAT_FUA); +QUEUE_SYSFS_FEATURE_SHOW(dax, BLK_FEAT_DAX); + +static ssize_t queue_poll_show(struct gendisk *disk, char *page) { - return queue_var_show(0, page); + if (queue_is_mq(disk->queue)) + return sysfs_emit(page, "%u\n", blk_mq_can_poll(disk->queue)); + + return sysfs_emit(page, "%u\n", + !!(disk->queue->limits.features & BLK_FEAT_POLL)); } -static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) +static ssize_t queue_zoned_show(struct gendisk *disk, char *page) { - return sprintf(page, "%llu\n", - (unsigned long long)q->limits.max_write_same_sectors << 9); + if (blk_queue_is_zoned(disk->queue)) + return sysfs_emit(page, "host-managed\n"); + return sysfs_emit(page, "none\n"); } -static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) +static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page) { - return sprintf(page, "%llu\n", - (unsigned long long)q->limits.max_write_zeroes_sectors << 9); + return queue_var_show(disk_nr_zones(disk), page); } -static ssize_t -queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) +static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page) { - unsigned long max_sectors_kb, - max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, - page_kb = 1 << (PAGE_SHIFT - 10); - ssize_t ret = queue_var_store(&max_sectors_kb, page, count); - - if (ret < 0) - return ret; - - max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) - q->limits.max_dev_sectors >> 1); - - if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) - return -EINVAL; - - spin_lock_irq(&q->queue_lock); - q->limits.max_sectors = max_sectors_kb << 1; - q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); - spin_unlock_irq(&q->queue_lock); - - return ret; + return queue_var_show(!!blk_queue_passthrough_stat(disk->queue), page); } -static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) +static int queue_iostats_passthrough_store(struct gendisk *disk, + const char *page, size_t count, struct queue_limits *lim) { - int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; - - return queue_var_show(max_hw_sectors_kb, (page)); -} + unsigned long ios; + ssize_t ret; -#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ -static ssize_t \ -queue_show_##name(struct request_queue *q, char *page) \ -{ \ - int bit; \ - bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ - return queue_var_show(neg ? !bit : bit, page); \ -} \ -static ssize_t \ -queue_store_##name(struct request_queue *q, const char *page, size_t count) \ -{ \ - unsigned long val; \ - ssize_t ret; \ - ret = queue_var_store(&val, page, count); \ - if (ret < 0) \ - return ret; \ - if (neg) \ - val = !val; \ - \ - if (val) \ - blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ - else \ - blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ - return ret; \ -} - -QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); -QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); -QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); -#undef QUEUE_SYSFS_BIT_FNS - -static ssize_t queue_zoned_show(struct request_queue *q, char *page) -{ - switch (blk_queue_zoned_model(q)) { - case BLK_ZONED_HA: - return sprintf(page, "host-aware\n"); - case BLK_ZONED_HM: - return sprintf(page, "host-managed\n"); - default: - return sprintf(page, "none\n"); - } -} + ret = queue_var_store(&ios, page, count); + if (ret < 0) + return ret; -static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) -{ - return queue_var_show(blk_queue_nr_zones(q), page); + if (ios) + lim->flags |= BLK_FLAG_IOSTATS_PASSTHROUGH; + else + lim->flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH; + return 0; } -static ssize_t queue_nomerges_show(struct request_queue *q, char *page) +static ssize_t queue_nomerges_show(struct gendisk *disk, char *page) { - return queue_var_show((blk_queue_nomerges(q) << 1) | - blk_queue_noxmerges(q), page); + return queue_var_show((blk_queue_nomerges(disk->queue) << 1) | + blk_queue_noxmerges(disk->queue), page); } -static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, +static ssize_t queue_nomerges_store(struct gendisk *disk, const char *page, size_t count) { unsigned long nm; + struct request_queue *q = disk->queue; ssize_t ret = queue_var_store(&nm, page, count); if (ret < 0) @@ -323,25 +392,32 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, return ret; } -static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) +static ssize_t queue_rq_affinity_show(struct gendisk *disk, char *page) { - bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); - bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); + bool set = test_bit(QUEUE_FLAG_SAME_COMP, &disk->queue->queue_flags); + bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &disk->queue->queue_flags); return queue_var_show(set << force, page); } static ssize_t -queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) +queue_rq_affinity_store(struct gendisk *disk, const char *page, size_t count) { ssize_t ret = -EINVAL; #ifdef CONFIG_SMP + struct request_queue *q = disk->queue; unsigned long val; ret = queue_var_store(&val, page, count); if (ret < 0) return ret; + /* + * Here we update two queue flags each using atomic bitops, although + * updating two flags isn't atomic it should be harmless as those flags + * are accessed individually using atomic test_bit operation. So we + * don't grab any lock while updating these flags. + */ if (val == 2) { blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); @@ -356,75 +432,41 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) return ret; } -static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) -{ - int val; - - if (q->poll_nsec == -1) - val = -1; - else - val = q->poll_nsec / 1000; - - return sprintf(page, "%d\n", val); -} - -static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, +static ssize_t queue_poll_delay_store(struct gendisk *disk, const char *page, size_t count) { - int err, val; - - if (!q->mq_ops || !q->mq_ops->poll) - return -EINVAL; - - err = kstrtoint(page, 10, &val); - if (err < 0) - return err; - - if (val == -1) - q->poll_nsec = -1; - else - q->poll_nsec = val * 1000; - return count; } -static ssize_t queue_poll_show(struct request_queue *q, char *page) -{ - return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); -} - -static ssize_t queue_poll_store(struct request_queue *q, const char *page, +static ssize_t queue_poll_store(struct gendisk *disk, const char *page, size_t count) { - unsigned long poll_on; - ssize_t ret; - - if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL || - !q->tag_set->map[HCTX_TYPE_POLL].nr_queues) - return -EINVAL; - - ret = queue_var_store(&poll_on, page, count); - if (ret < 0) - return ret; + ssize_t ret = count; + struct request_queue *q = disk->queue; - if (poll_on) - blk_queue_flag_set(QUEUE_FLAG_POLL, q); - else - blk_queue_flag_clear(QUEUE_FLAG_POLL, q); + if (!(q->limits.features & BLK_FEAT_POLL)) { + ret = -EINVAL; + goto out; + } + pr_info_ratelimited("writes to the poll attribute are ignored.\n"); + pr_info_ratelimited("please use driver specific parameters instead.\n"); +out: return ret; } -static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) +static ssize_t queue_io_timeout_show(struct gendisk *disk, char *page) { - return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); + return sysfs_emit(page, "%u\n", + jiffies_to_msecs(READ_ONCE(disk->queue->rq_timeout))); } -static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, +static ssize_t queue_io_timeout_store(struct gendisk *disk, const char *page, size_t count) { unsigned int val; int err; + struct request_queue *q = disk->queue; err = kstrtou32(page, 10, &val); if (err || val == 0) @@ -435,20 +477,170 @@ static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, return count; } -static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) +static ssize_t queue_wc_show(struct gendisk *disk, char *page) { - if (!wbt_rq_qos(q)) + if (blk_queue_write_cache(disk->queue)) + return sysfs_emit(page, "write back\n"); + return sysfs_emit(page, "write through\n"); +} + +static int queue_wc_store(struct gendisk *disk, const char *page, + size_t count, struct queue_limits *lim) +{ + bool disable; + + if (!strncmp(page, "write back", 10)) { + disable = false; + } else if (!strncmp(page, "write through", 13) || + !strncmp(page, "none", 4)) { + disable = true; + } else { return -EINVAL; + } + + if (disable) + lim->flags |= BLK_FLAG_WRITE_CACHE_DISABLED; + else + lim->flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED; + return 0; +} + +#define QUEUE_RO_ENTRY(_prefix, _name) \ +static struct queue_sysfs_entry _prefix##_entry = { \ + .attr = { .name = _name, .mode = 0444 }, \ + .show = _prefix##_show, \ +}; + +#define QUEUE_RW_ENTRY(_prefix, _name) \ +static struct queue_sysfs_entry _prefix##_entry = { \ + .attr = { .name = _name, .mode = 0644 }, \ + .show = _prefix##_show, \ + .store = _prefix##_store, \ +}; + +#define QUEUE_LIM_RO_ENTRY(_prefix, _name) \ +static struct queue_sysfs_entry _prefix##_entry = { \ + .attr = { .name = _name, .mode = 0444 }, \ + .show_limit = _prefix##_show, \ +} + +#define QUEUE_LIM_RW_ENTRY(_prefix, _name) \ +static struct queue_sysfs_entry _prefix##_entry = { \ + .attr = { .name = _name, .mode = 0644 }, \ + .show_limit = _prefix##_show, \ + .store_limit = _prefix##_store, \ +} + +QUEUE_RW_ENTRY(queue_requests, "nr_requests"); +QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); +QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); +QUEUE_LIM_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); +QUEUE_LIM_RO_ENTRY(queue_max_segments, "max_segments"); +QUEUE_LIM_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); +QUEUE_LIM_RO_ENTRY(queue_max_segment_size, "max_segment_size"); +QUEUE_LIM_RO_ENTRY(queue_max_write_streams, "max_write_streams"); +QUEUE_LIM_RO_ENTRY(queue_write_stream_granularity, "write_stream_granularity"); +QUEUE_RW_ENTRY(elv_iosched, "scheduler"); + +QUEUE_LIM_RO_ENTRY(queue_logical_block_size, "logical_block_size"); +QUEUE_LIM_RO_ENTRY(queue_physical_block_size, "physical_block_size"); +QUEUE_LIM_RO_ENTRY(queue_chunk_sectors, "chunk_sectors"); +QUEUE_LIM_RO_ENTRY(queue_io_min, "minimum_io_size"); +QUEUE_LIM_RO_ENTRY(queue_io_opt, "optimal_io_size"); + +QUEUE_LIM_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); +QUEUE_LIM_RO_ENTRY(queue_discard_granularity, "discard_granularity"); +QUEUE_LIM_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes"); +QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes"); +QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); + +QUEUE_LIM_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes"); +QUEUE_LIM_RO_ENTRY(queue_atomic_write_boundary_sectors, + "atomic_write_boundary_bytes"); +QUEUE_LIM_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes"); +QUEUE_LIM_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes"); + +QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes"); +QUEUE_LIM_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes"); +QUEUE_LIM_RO_ENTRY(queue_max_hw_wzeroes_unmap_sectors, + "write_zeroes_unmap_max_hw_bytes"); +QUEUE_LIM_RW_ENTRY(queue_max_wzeroes_unmap_sectors, + "write_zeroes_unmap_max_bytes"); +QUEUE_LIM_RO_ENTRY(queue_max_zone_append_sectors, "zone_append_max_bytes"); +QUEUE_LIM_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity"); + +QUEUE_LIM_RO_ENTRY(queue_zoned, "zoned"); +QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones"); +QUEUE_LIM_RO_ENTRY(queue_max_open_zones, "max_open_zones"); +QUEUE_LIM_RO_ENTRY(queue_max_active_zones, "max_active_zones"); + +QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); +QUEUE_LIM_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough"); +QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); +QUEUE_RW_ENTRY(queue_poll, "io_poll"); +QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); +QUEUE_LIM_RW_ENTRY(queue_wc, "write_cache"); +QUEUE_LIM_RO_ENTRY(queue_fua, "fua"); +QUEUE_LIM_RO_ENTRY(queue_dax, "dax"); +QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); +QUEUE_LIM_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask"); +QUEUE_LIM_RO_ENTRY(queue_dma_alignment, "dma_alignment"); + +/* legacy alias for logical_block_size: */ +static struct queue_sysfs_entry queue_hw_sector_size_entry = { + .attr = {.name = "hw_sector_size", .mode = 0444 }, + .show_limit = queue_logical_block_size_show, +}; + +QUEUE_LIM_RW_ENTRY(queue_rotational, "rotational"); +QUEUE_LIM_RW_ENTRY(queue_iostats, "iostats"); +QUEUE_LIM_RW_ENTRY(queue_add_random, "add_random"); +QUEUE_LIM_RW_ENTRY(queue_stable_writes, "stable_writes"); + +#ifdef CONFIG_BLK_WBT +static ssize_t queue_var_store64(s64 *var, const char *page) +{ + int err; + s64 v; + + err = kstrtos64(page, 10, &v); + if (err < 0) + return err; + + *var = v; + return 0; +} + +static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page) +{ + ssize_t ret; + struct request_queue *q = disk->queue; + + mutex_lock(&disk->rqos_state_mutex); + if (!wbt_rq_qos(q)) { + ret = -EINVAL; + goto out; + } - return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); + if (wbt_disabled(q)) { + ret = sysfs_emit(page, "0\n"); + goto out; + } + + ret = sysfs_emit(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); +out: + mutex_unlock(&disk->rqos_state_mutex); + return ret; } -static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, +static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page, size_t count) { + struct request_queue *q = disk->queue; struct rq_qos *rqos; ssize_t ret; s64 val; + unsigned int memflags; ret = queue_var_store64(&val, page); if (ret < 0) @@ -456,283 +648,58 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, if (val < -1) return -EINVAL; + /* + * Ensure that the queue is idled, in case the latency update + * ends up either enabling or disabling wbt completely. We can't + * have IO inflight if that happens. + */ + memflags = blk_mq_freeze_queue(q); + rqos = wbt_rq_qos(q); if (!rqos) { - ret = wbt_init(q); + ret = wbt_init(disk); if (ret) - return ret; + goto out; } + ret = count; if (val == -1) val = wbt_default_latency_nsec(q); else if (val >= 0) val *= 1000ULL; - /* - * Ensure that the queue is idled, in case the latency update - * ends up either enabling or disabling wbt completely. We can't - * have IO inflight if that happens. - */ - blk_mq_freeze_queue(q); + if (wbt_get_min_lat(q) == val) + goto out; + blk_mq_quiesce_queue(q); + mutex_lock(&disk->rqos_state_mutex); wbt_set_min_lat(q, val); - wbt_update_limits(q); + mutex_unlock(&disk->rqos_state_mutex); blk_mq_unquiesce_queue(q); - blk_mq_unfreeze_queue(q); - - return count; -} - -static ssize_t queue_wc_show(struct request_queue *q, char *page) -{ - if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) - return sprintf(page, "write back\n"); - - return sprintf(page, "write through\n"); -} - -static ssize_t queue_wc_store(struct request_queue *q, const char *page, - size_t count) -{ - int set = -1; - - if (!strncmp(page, "write back", 10)) - set = 1; - else if (!strncmp(page, "write through", 13) || - !strncmp(page, "none", 4)) - set = 0; - - if (set == -1) - return -EINVAL; - - if (set) - blk_queue_flag_set(QUEUE_FLAG_WC, q); - else - blk_queue_flag_clear(QUEUE_FLAG_WC, q); - - return count; -} - -static ssize_t queue_fua_show(struct request_queue *q, char *page) -{ - return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); -} +out: + blk_mq_unfreeze_queue(q, memflags); -static ssize_t queue_dax_show(struct request_queue *q, char *page) -{ - return queue_var_show(blk_queue_dax(q), page); + return ret; } -static struct queue_sysfs_entry queue_requests_entry = { - .attr = {.name = "nr_requests", .mode = 0644 }, - .show = queue_requests_show, - .store = queue_requests_store, -}; - -static struct queue_sysfs_entry queue_ra_entry = { - .attr = {.name = "read_ahead_kb", .mode = 0644 }, - .show = queue_ra_show, - .store = queue_ra_store, -}; - -static struct queue_sysfs_entry queue_max_sectors_entry = { - .attr = {.name = "max_sectors_kb", .mode = 0644 }, - .show = queue_max_sectors_show, - .store = queue_max_sectors_store, -}; - -static struct queue_sysfs_entry queue_max_hw_sectors_entry = { - .attr = {.name = "max_hw_sectors_kb", .mode = 0444 }, - .show = queue_max_hw_sectors_show, -}; - -static struct queue_sysfs_entry queue_max_segments_entry = { - .attr = {.name = "max_segments", .mode = 0444 }, - .show = queue_max_segments_show, -}; - -static struct queue_sysfs_entry queue_max_discard_segments_entry = { - .attr = {.name = "max_discard_segments", .mode = 0444 }, - .show = queue_max_discard_segments_show, -}; - -static struct queue_sysfs_entry queue_max_integrity_segments_entry = { - .attr = {.name = "max_integrity_segments", .mode = 0444 }, - .show = queue_max_integrity_segments_show, -}; - -static struct queue_sysfs_entry queue_max_segment_size_entry = { - .attr = {.name = "max_segment_size", .mode = 0444 }, - .show = queue_max_segment_size_show, -}; - -static struct queue_sysfs_entry queue_iosched_entry = { - .attr = {.name = "scheduler", .mode = 0644 }, - .show = elv_iosched_show, - .store = elv_iosched_store, -}; - -static struct queue_sysfs_entry queue_hw_sector_size_entry = { - .attr = {.name = "hw_sector_size", .mode = 0444 }, - .show = queue_logical_block_size_show, -}; - -static struct queue_sysfs_entry queue_logical_block_size_entry = { - .attr = {.name = "logical_block_size", .mode = 0444 }, - .show = queue_logical_block_size_show, -}; - -static struct queue_sysfs_entry queue_physical_block_size_entry = { - .attr = {.name = "physical_block_size", .mode = 0444 }, - .show = queue_physical_block_size_show, -}; - -static struct queue_sysfs_entry queue_chunk_sectors_entry = { - .attr = {.name = "chunk_sectors", .mode = 0444 }, - .show = queue_chunk_sectors_show, -}; - -static struct queue_sysfs_entry queue_io_min_entry = { - .attr = {.name = "minimum_io_size", .mode = 0444 }, - .show = queue_io_min_show, -}; - -static struct queue_sysfs_entry queue_io_opt_entry = { - .attr = {.name = "optimal_io_size", .mode = 0444 }, - .show = queue_io_opt_show, -}; - -static struct queue_sysfs_entry queue_discard_granularity_entry = { - .attr = {.name = "discard_granularity", .mode = 0444 }, - .show = queue_discard_granularity_show, -}; - -static struct queue_sysfs_entry queue_discard_max_hw_entry = { - .attr = {.name = "discard_max_hw_bytes", .mode = 0444 }, - .show = queue_discard_max_hw_show, -}; - -static struct queue_sysfs_entry queue_discard_max_entry = { - .attr = {.name = "discard_max_bytes", .mode = 0644 }, - .show = queue_discard_max_show, - .store = queue_discard_max_store, -}; - -static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { - .attr = {.name = "discard_zeroes_data", .mode = 0444 }, - .show = queue_discard_zeroes_data_show, -}; - -static struct queue_sysfs_entry queue_write_same_max_entry = { - .attr = {.name = "write_same_max_bytes", .mode = 0444 }, - .show = queue_write_same_max_show, -}; - -static struct queue_sysfs_entry queue_write_zeroes_max_entry = { - .attr = {.name = "write_zeroes_max_bytes", .mode = 0444 }, - .show = queue_write_zeroes_max_show, -}; - -static struct queue_sysfs_entry queue_nonrot_entry = { - .attr = {.name = "rotational", .mode = 0644 }, - .show = queue_show_nonrot, - .store = queue_store_nonrot, -}; - -static struct queue_sysfs_entry queue_zoned_entry = { - .attr = {.name = "zoned", .mode = 0444 }, - .show = queue_zoned_show, -}; - -static struct queue_sysfs_entry queue_nr_zones_entry = { - .attr = {.name = "nr_zones", .mode = 0444 }, - .show = queue_nr_zones_show, -}; - -static struct queue_sysfs_entry queue_nomerges_entry = { - .attr = {.name = "nomerges", .mode = 0644 }, - .show = queue_nomerges_show, - .store = queue_nomerges_store, -}; - -static struct queue_sysfs_entry queue_rq_affinity_entry = { - .attr = {.name = "rq_affinity", .mode = 0644 }, - .show = queue_rq_affinity_show, - .store = queue_rq_affinity_store, -}; - -static struct queue_sysfs_entry queue_iostats_entry = { - .attr = {.name = "iostats", .mode = 0644 }, - .show = queue_show_iostats, - .store = queue_store_iostats, -}; - -static struct queue_sysfs_entry queue_random_entry = { - .attr = {.name = "add_random", .mode = 0644 }, - .show = queue_show_random, - .store = queue_store_random, -}; - -static struct queue_sysfs_entry queue_poll_entry = { - .attr = {.name = "io_poll", .mode = 0644 }, - .show = queue_poll_show, - .store = queue_poll_store, -}; - -static struct queue_sysfs_entry queue_poll_delay_entry = { - .attr = {.name = "io_poll_delay", .mode = 0644 }, - .show = queue_poll_delay_show, - .store = queue_poll_delay_store, -}; - -static struct queue_sysfs_entry queue_wc_entry = { - .attr = {.name = "write_cache", .mode = 0644 }, - .show = queue_wc_show, - .store = queue_wc_store, -}; - -static struct queue_sysfs_entry queue_fua_entry = { - .attr = {.name = "fua", .mode = 0444 }, - .show = queue_fua_show, -}; - -static struct queue_sysfs_entry queue_dax_entry = { - .attr = {.name = "dax", .mode = 0444 }, - .show = queue_dax_show, -}; - -static struct queue_sysfs_entry queue_io_timeout_entry = { - .attr = {.name = "io_timeout", .mode = 0644 }, - .show = queue_io_timeout_show, - .store = queue_io_timeout_store, -}; - -static struct queue_sysfs_entry queue_wb_lat_entry = { - .attr = {.name = "wbt_lat_usec", .mode = 0644 }, - .show = queue_wb_lat_show, - .store = queue_wb_lat_store, -}; - -#ifdef CONFIG_BLK_DEV_THROTTLING_LOW -static struct queue_sysfs_entry throtl_sample_time_entry = { - .attr = {.name = "throttle_sample_time", .mode = 0644 }, - .show = blk_throtl_sample_time_show, - .store = blk_throtl_sample_time_store, -}; +QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec"); #endif -static struct attribute *default_attrs[] = { - &queue_requests_entry.attr, - &queue_ra_entry.attr, +/* Common attributes for bio-based and request-based queues. */ +static struct attribute *queue_attrs[] = { + /* + * Attributes which are protected with q->limits_lock. + */ &queue_max_hw_sectors_entry.attr, &queue_max_sectors_entry.attr, &queue_max_segments_entry.attr, &queue_max_discard_segments_entry.attr, &queue_max_integrity_segments_entry.attr, &queue_max_segment_size_entry.attr, - &queue_iosched_entry.attr, + &queue_max_write_streams_entry.attr, + &queue_write_stream_granularity_entry.attr, &queue_hw_sector_size_entry.attr, &queue_logical_block_size_entry.attr, &queue_physical_block_size_entry.attr, @@ -740,51 +707,125 @@ static struct attribute *default_attrs[] = { &queue_io_min_entry.attr, &queue_io_opt_entry.attr, &queue_discard_granularity_entry.attr, - &queue_discard_max_entry.attr, - &queue_discard_max_hw_entry.attr, - &queue_discard_zeroes_data_entry.attr, - &queue_write_same_max_entry.attr, - &queue_write_zeroes_max_entry.attr, - &queue_nonrot_entry.attr, + &queue_max_discard_sectors_entry.attr, + &queue_max_hw_discard_sectors_entry.attr, + &queue_atomic_write_max_sectors_entry.attr, + &queue_atomic_write_boundary_sectors_entry.attr, + &queue_atomic_write_unit_min_entry.attr, + &queue_atomic_write_unit_max_entry.attr, + &queue_max_write_zeroes_sectors_entry.attr, + &queue_max_hw_wzeroes_unmap_sectors_entry.attr, + &queue_max_wzeroes_unmap_sectors_entry.attr, + &queue_max_zone_append_sectors_entry.attr, + &queue_zone_write_granularity_entry.attr, + &queue_rotational_entry.attr, &queue_zoned_entry.attr, - &queue_nr_zones_entry.attr, - &queue_nomerges_entry.attr, - &queue_rq_affinity_entry.attr, + &queue_max_open_zones_entry.attr, + &queue_max_active_zones_entry.attr, + &queue_iostats_passthrough_entry.attr, &queue_iostats_entry.attr, - &queue_random_entry.attr, - &queue_poll_entry.attr, + &queue_stable_writes_entry.attr, + &queue_add_random_entry.attr, &queue_wc_entry.attr, &queue_fua_entry.attr, &queue_dax_entry.attr, - &queue_wb_lat_entry.attr, + &queue_virt_boundary_mask_entry.attr, + &queue_dma_alignment_entry.attr, + &queue_ra_entry.attr, + + /* + * Attributes which don't require locking. + */ + &queue_discard_zeroes_data_entry.attr, + &queue_write_same_max_entry.attr, + &queue_nr_zones_entry.attr, + &queue_nomerges_entry.attr, + &queue_poll_entry.attr, &queue_poll_delay_entry.attr, - &queue_io_timeout_entry.attr, -#ifdef CONFIG_BLK_DEV_THROTTLING_LOW - &throtl_sample_time_entry.attr, + + NULL, +}; + +/* Request-based queue attributes that are not relevant for bio-based queues. */ +static struct attribute *blk_mq_queue_attrs[] = { + /* + * Attributes which require some form of locking other than + * q->sysfs_lock. + */ + &elv_iosched_entry.attr, + &queue_requests_entry.attr, +#ifdef CONFIG_BLK_WBT + &queue_wb_lat_entry.attr, #endif + /* + * Attributes which don't require locking. + */ + &queue_rq_affinity_entry.attr, + &queue_io_timeout_entry.attr, + NULL, }; +static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, + int n) +{ + struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); + struct request_queue *q = disk->queue; + + if ((attr == &queue_max_open_zones_entry.attr || + attr == &queue_max_active_zones_entry.attr) && + !blk_queue_is_zoned(q)) + return 0; + + return attr->mode; +} + +static umode_t blk_mq_queue_attr_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); + struct request_queue *q = disk->queue; + + if (!queue_is_mq(q)) + return 0; + + if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout) + return 0; + + return attr->mode; +} + +static struct attribute_group queue_attr_group = { + .attrs = queue_attrs, + .is_visible = queue_attr_visible, +}; + +static struct attribute_group blk_mq_queue_attr_group = { + .attrs = blk_mq_queue_attrs, + .is_visible = blk_mq_queue_attr_visible, +}; + #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) static ssize_t queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct queue_sysfs_entry *entry = to_queue(attr); - struct request_queue *q = - container_of(kobj, struct request_queue, kobj); - ssize_t res; + struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); - if (!entry->show) + if (!entry->show && !entry->show_limit) return -EIO; - mutex_lock(&q->sysfs_lock); - if (blk_queue_dying(q)) { - mutex_unlock(&q->sysfs_lock); - return -ENOENT; + + if (entry->show_limit) { + ssize_t res; + + mutex_lock(&disk->queue->limits_lock); + res = entry->show_limit(disk, page); + mutex_unlock(&disk->queue->limits_lock); + return res; } - res = entry->show(q, page); - mutex_unlock(&q->sysfs_lock); - return res; + + return entry->show(disk, page); } static ssize_t @@ -792,95 +833,30 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { struct queue_sysfs_entry *entry = to_queue(attr); - struct request_queue *q; - ssize_t res; + struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); + struct request_queue *q = disk->queue; - if (!entry->store) + if (!entry->store_limit && !entry->store) return -EIO; - q = container_of(kobj, struct request_queue, kobj); - mutex_lock(&q->sysfs_lock); - if (blk_queue_dying(q)) { - mutex_unlock(&q->sysfs_lock); - return -ENOENT; - } - res = entry->store(q, page, length); - mutex_unlock(&q->sysfs_lock); - return res; -} - -static void blk_free_queue_rcu(struct rcu_head *rcu_head) -{ - struct request_queue *q = container_of(rcu_head, struct request_queue, - rcu_head); - kmem_cache_free(blk_requestq_cachep, q); -} + if (entry->store_limit) { + ssize_t res; -/** - * __blk_release_queue - release a request queue when it is no longer needed - * @work: pointer to the release_work member of the request queue to be released - * - * Description: - * blk_release_queue is the counterpart of blk_init_queue(). It should be - * called when a request queue is being released; typically when a block - * device is being de-registered. Its primary task it to free the queue - * itself. - * - * Notes: - * The low level driver must have finished any outstanding requests first - * via blk_cleanup_queue(). - * - * Although blk_release_queue() may be called with preemption disabled, - * __blk_release_queue() may sleep. - */ -static void __blk_release_queue(struct work_struct *work) -{ - struct request_queue *q = container_of(work, typeof(*q), release_work); + struct queue_limits lim = queue_limits_start_update(q); - if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) - blk_stat_remove_callback(q, q->poll_cb); - blk_stat_free_callback(q->poll_cb); + res = entry->store_limit(disk, page, length, &lim); + if (res < 0) { + queue_limits_cancel_update(q); + return res; + } - if (!blk_queue_dead(q)) { - /* - * Last reference was dropped without having called - * blk_cleanup_queue(). - */ - WARN_ONCE(blk_queue_init_done(q), - "request queue %p has been registered but blk_cleanup_queue() has not been called for that queue\n", - q); - blk_exit_queue(q); + res = queue_limits_commit_update_frozen(q, &lim); + if (res) + return res; + return length; } - WARN(blk_queue_root_blkg(q), - "request queue %p is being released but it has not yet been removed from the blkcg controller\n", - q); - - blk_free_queue_stats(q->stats); - - blk_queue_free_zone_bitmaps(q); - - if (queue_is_mq(q)) - blk_mq_release(q); - - blk_trace_shutdown(q); - - if (queue_is_mq(q)) - blk_mq_debugfs_unregister(q); - - bioset_exit(&q->bio_split); - - ida_simple_remove(&blk_queue_ida, q->id); - call_rcu(&q->rcu_head, blk_free_queue_rcu); -} - -static void blk_release_queue(struct kobject *kobj) -{ - struct request_queue *q = - container_of(kobj, struct request_queue, kobj); - - INIT_WORK(&q->release_work, __blk_release_queue); - schedule_work(&q->release_work); + return entry->store(disk, page, length); } static const struct sysfs_ops queue_sysfs_ops = { @@ -888,29 +864,81 @@ static const struct sysfs_ops queue_sysfs_ops = { .store = queue_attr_store, }; -struct kobj_type blk_queue_ktype = { +static const struct attribute_group *blk_queue_attr_groups[] = { + &queue_attr_group, + &blk_mq_queue_attr_group, + NULL +}; + +static void blk_queue_release(struct kobject *kobj) +{ + /* nothing to do here, all data is associated with the parent gendisk */ +} + +const struct kobj_type blk_queue_ktype = { + .default_groups = blk_queue_attr_groups, .sysfs_ops = &queue_sysfs_ops, - .default_attrs = default_attrs, - .release = blk_release_queue, + .release = blk_queue_release, }; +static void blk_debugfs_remove(struct gendisk *disk) +{ + struct request_queue *q = disk->queue; + + mutex_lock(&q->debugfs_mutex); + blk_trace_shutdown(q); + debugfs_remove_recursive(q->debugfs_dir); + q->debugfs_dir = NULL; + q->sched_debugfs_dir = NULL; + q->rqos_debugfs_dir = NULL; + mutex_unlock(&q->debugfs_mutex); +} + /** * blk_register_queue - register a block layer queue with sysfs * @disk: Disk of which the request queue should be registered with sysfs. */ int blk_register_queue(struct gendisk *disk) { - int ret; - struct device *dev = disk_to_dev(disk); struct request_queue *q = disk->queue; + int ret; - if (WARN_ON(!q)) - return -ENXIO; + ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue"); + if (ret < 0) + return ret; + + if (queue_is_mq(q)) { + ret = blk_mq_sysfs_register(disk); + if (ret) + goto out_del_queue_kobj; + } + mutex_lock(&q->sysfs_lock); + + mutex_lock(&q->debugfs_mutex); + q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root); + if (queue_is_mq(q)) + blk_mq_debugfs_register(q); + mutex_unlock(&q->debugfs_mutex); + + ret = disk_register_independent_access_ranges(disk); + if (ret) + goto out_debugfs_remove; + + ret = blk_crypto_sysfs_register(disk); + if (ret) + goto out_unregister_ia_ranges; + + if (queue_is_mq(q)) + elevator_set_default(q); - WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags), - "%s is registering an already registered queue\n", - kobject_name(&dev->kobj)); blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); + wbt_enable_default(disk); + + /* Now everything is ready and send out KOBJ_ADD uevent */ + kobject_uevent(&disk->queue_kobj, KOBJ_ADD); + if (q->elevator) + kobject_uevent(&q->elevator->kobj, KOBJ_ADD); + mutex_unlock(&q->sysfs_lock); /* * SCSI probing may synchronously create and destroy a lot of @@ -921,52 +949,22 @@ int blk_register_queue(struct gendisk *disk) * faster to shut down and is made fully functional here as * request_queues for non-existent devices never get registered. */ - if (!blk_queue_init_done(q)) { - blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); - percpu_ref_switch_to_percpu(&q->q_usage_counter); - } - - ret = blk_trace_init_sysfs(dev); - if (ret) - return ret; - - /* Prevent changes through sysfs until registration is completed. */ - mutex_lock(&q->sysfs_lock); - - ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); - if (ret < 0) { - blk_trace_remove_sysfs(dev); - goto unlock; - } - - if (queue_is_mq(q)) { - __blk_mq_register_dev(dev, q); - blk_mq_debugfs_register(q); - } - - kobject_uevent(&q->kobj, KOBJ_ADD); + blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); + percpu_ref_switch_to_percpu(&q->q_usage_counter); - wbt_enable_default(q); - - blk_throtl_register_queue(q); + return ret; - if (q->elevator) { - ret = elv_register_queue(q); - if (ret) { - mutex_unlock(&q->sysfs_lock); - kobject_uevent(&q->kobj, KOBJ_REMOVE); - kobject_del(&q->kobj); - blk_trace_remove_sysfs(dev); - kobject_put(&dev->kobj); - return ret; - } - } - ret = 0; -unlock: +out_unregister_ia_ranges: + disk_unregister_independent_access_ranges(disk); +out_debugfs_remove: + blk_debugfs_remove(disk); mutex_unlock(&q->sysfs_lock); + if (queue_is_mq(q)) + blk_mq_sysfs_unregister(disk); +out_del_queue_kobj: + kobject_del(&disk->queue_kobj); return ret; } -EXPORT_SYMBOL_GPL(blk_register_queue); /** * blk_unregister_queue - counterpart of blk_register_queue() @@ -983,7 +981,7 @@ void blk_unregister_queue(struct gendisk *disk) return; /* Return early if disk->queue was never registered. */ - if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)) + if (!blk_queue_registered(q)) return; /* @@ -992,25 +990,27 @@ void blk_unregister_queue(struct gendisk *disk) * concurrent elv_iosched_store() calls. */ mutex_lock(&q->sysfs_lock); - blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); + mutex_unlock(&q->sysfs_lock); /* * Remove the sysfs attributes before unregistering the queue data * structures that can be modified through sysfs. */ if (queue_is_mq(q)) - blk_mq_unregister_dev(disk_to_dev(disk), q); - mutex_unlock(&q->sysfs_lock); - - kobject_uevent(&q->kobj, KOBJ_REMOVE); - kobject_del(&q->kobj); - blk_trace_remove_sysfs(disk_to_dev(disk)); + blk_mq_sysfs_unregister(disk); + blk_crypto_sysfs_unregister(disk); mutex_lock(&q->sysfs_lock); - if (q->elevator) - elv_unregister_queue(q); + disk_unregister_independent_access_ranges(disk); mutex_unlock(&q->sysfs_lock); - kobject_put(&disk_to_dev(disk)->kobj); + /* Now that we've deleted all child objects, we can delete the queue. */ + kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE); + kobject_del(&disk->queue_kobj); + + if (queue_is_mq(q)) + elevator_set_none(q); + + blk_debugfs_remove(disk); } |
