diff options
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r-- | block/blk-sysfs.c | 731 |
1 files changed, 396 insertions, 335 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 8c8f69d8ba48..b2b9b89d6967 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -22,14 +22,18 @@ struct queue_sysfs_entry { struct attribute attr; - ssize_t (*show)(struct request_queue *, char *); - ssize_t (*store)(struct request_queue *, const char *, size_t); + ssize_t (*show)(struct gendisk *disk, char *page); + ssize_t (*show_limit)(struct gendisk *disk, char *page); + + ssize_t (*store)(struct gendisk *disk, const char *page, size_t count); + int (*store_limit)(struct gendisk *disk, const char *page, + size_t count, struct queue_limits *lim); }; static ssize_t queue_var_show(unsigned long var, char *page) { - return sprintf(page, "%lu\n", var); + return sysfs_emit(page, "%lu\n", var); } static ssize_t @@ -47,16 +51,23 @@ queue_var_store(unsigned long *var, const char *page, size_t count) return count; } -static ssize_t queue_requests_show(struct request_queue *q, char *page) +static ssize_t queue_requests_show(struct gendisk *disk, char *page) { - return queue_var_show(q->nr_requests, page); + ssize_t ret; + + mutex_lock(&disk->queue->elevator_lock); + ret = queue_var_show(disk->queue->nr_requests, page); + mutex_unlock(&disk->queue->elevator_lock); + return ret; } static ssize_t -queue_requests_store(struct request_queue *q, const char *page, size_t count) +queue_requests_store(struct gendisk *disk, const char *page, size_t count) { unsigned long nr; int ret, err; + unsigned int memflags; + struct request_queue *q = disk->queue; if (!queue_is_mq(q)) return -EINVAL; @@ -65,307 +76,294 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) if (ret < 0) return ret; + memflags = blk_mq_freeze_queue(q); + mutex_lock(&q->elevator_lock); if (nr < BLKDEV_MIN_RQ) nr = BLKDEV_MIN_RQ; - err = blk_mq_update_nr_requests(q, nr); + err = blk_mq_update_nr_requests(disk->queue, nr); if (err) - return err; - + ret = err; + mutex_unlock(&q->elevator_lock); + blk_mq_unfreeze_queue(q, memflags); return ret; } -static ssize_t queue_ra_show(struct request_queue *q, char *page) +static ssize_t queue_ra_show(struct gendisk *disk, char *page) { - unsigned long ra_kb; + ssize_t ret; - if (!q->disk) - return -EINVAL; - ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10); - return queue_var_show(ra_kb, page); + mutex_lock(&disk->queue->limits_lock); + ret = queue_var_show(disk->bdi->ra_pages << (PAGE_SHIFT - 10), page); + mutex_unlock(&disk->queue->limits_lock); + + return ret; } static ssize_t -queue_ra_store(struct request_queue *q, const char *page, size_t count) +queue_ra_store(struct gendisk *disk, const char *page, size_t count) { unsigned long ra_kb; ssize_t ret; + unsigned int memflags; + struct request_queue *q = disk->queue; - if (!q->disk) - return -EINVAL; ret = queue_var_store(&ra_kb, page, count); if (ret < 0) return ret; - q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10); - return ret; -} - -static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) -{ - int max_sectors_kb = queue_max_sectors(q) >> 1; - - return queue_var_show(max_sectors_kb, page); -} - -static ssize_t queue_max_segments_show(struct request_queue *q, char *page) -{ - return queue_var_show(queue_max_segments(q), page); -} - -static ssize_t queue_max_discard_segments_show(struct request_queue *q, - char *page) -{ - return queue_var_show(queue_max_discard_segments(q), page); -} - -static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) -{ - return queue_var_show(q->limits.max_integrity_segments, page); -} - -static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) -{ - return queue_var_show(queue_max_segment_size(q), page); -} - -static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) -{ - return queue_var_show(queue_logical_block_size(q), page); -} - -static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) -{ - return queue_var_show(queue_physical_block_size(q), page); -} + /* + * ->ra_pages is protected by ->limits_lock because it is usually + * calculated from the queue limits by queue_limits_commit_update. + */ + mutex_lock(&q->limits_lock); + memflags = blk_mq_freeze_queue(q); + disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10); + mutex_unlock(&q->limits_lock); + blk_mq_unfreeze_queue(q, memflags); -static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) -{ - return queue_var_show(q->limits.chunk_sectors, page); + return ret; } -static ssize_t queue_io_min_show(struct request_queue *q, char *page) -{ - return queue_var_show(queue_io_min(q), page); +#define QUEUE_SYSFS_LIMIT_SHOW(_field) \ +static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \ +{ \ + return queue_var_show(disk->queue->limits._field, page); \ +} + +QUEUE_SYSFS_LIMIT_SHOW(max_segments) +QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments) +QUEUE_SYSFS_LIMIT_SHOW(max_integrity_segments) +QUEUE_SYSFS_LIMIT_SHOW(max_segment_size) +QUEUE_SYSFS_LIMIT_SHOW(max_write_streams) +QUEUE_SYSFS_LIMIT_SHOW(write_stream_granularity) +QUEUE_SYSFS_LIMIT_SHOW(logical_block_size) +QUEUE_SYSFS_LIMIT_SHOW(physical_block_size) +QUEUE_SYSFS_LIMIT_SHOW(chunk_sectors) +QUEUE_SYSFS_LIMIT_SHOW(io_min) +QUEUE_SYSFS_LIMIT_SHOW(io_opt) +QUEUE_SYSFS_LIMIT_SHOW(discard_granularity) +QUEUE_SYSFS_LIMIT_SHOW(zone_write_granularity) +QUEUE_SYSFS_LIMIT_SHOW(virt_boundary_mask) +QUEUE_SYSFS_LIMIT_SHOW(dma_alignment) +QUEUE_SYSFS_LIMIT_SHOW(max_open_zones) +QUEUE_SYSFS_LIMIT_SHOW(max_active_zones) +QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_min) +QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_max) + +#define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(_field) \ +static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \ +{ \ + return sysfs_emit(page, "%llu\n", \ + (unsigned long long)disk->queue->limits._field << \ + SECTOR_SHIFT); \ } -static ssize_t queue_io_opt_show(struct request_queue *q, char *page) -{ - return queue_var_show(queue_io_opt(q), page); -} +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_discard_sectors) +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_discard_sectors) +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_write_zeroes_sectors) +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_max_sectors) +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_boundary_sectors) +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_zone_append_sectors) -static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) -{ - return queue_var_show(q->limits.discard_granularity, page); +#define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(_field) \ +static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \ +{ \ + return queue_var_show(disk->queue->limits._field >> 1, page); \ } -static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) -{ +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_sectors) +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_hw_sectors) - return sprintf(page, "%llu\n", - (unsigned long long)q->limits.max_hw_discard_sectors << 9); +#define QUEUE_SYSFS_SHOW_CONST(_name, _val) \ +static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \ +{ \ + return sysfs_emit(page, "%d\n", _val); \ } -static ssize_t queue_discard_max_show(struct request_queue *q, char *page) -{ - return sprintf(page, "%llu\n", - (unsigned long long)q->limits.max_discard_sectors << 9); -} +/* deprecated fields */ +QUEUE_SYSFS_SHOW_CONST(discard_zeroes_data, 0) +QUEUE_SYSFS_SHOW_CONST(write_same_max, 0) +QUEUE_SYSFS_SHOW_CONST(poll_delay, -1) -static ssize_t queue_discard_max_store(struct request_queue *q, - const char *page, size_t count) +static int queue_max_discard_sectors_store(struct gendisk *disk, + const char *page, size_t count, struct queue_limits *lim) { unsigned long max_discard_bytes; - struct queue_limits lim; ssize_t ret; - int err; ret = queue_var_store(&max_discard_bytes, page, count); if (ret < 0) return ret; - if (max_discard_bytes & (q->limits.discard_granularity - 1)) + if (max_discard_bytes & (disk->queue->limits.discard_granularity - 1)) return -EINVAL; if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX) return -EINVAL; - blk_mq_freeze_queue(q); - lim = queue_limits_start_update(q); - lim.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT; - err = queue_limits_commit_update(q, &lim); - blk_mq_unfreeze_queue(q); - - if (err) - return err; - return ret; -} - -static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) -{ - return queue_var_show(0, page); -} - -static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) -{ - return queue_var_show(0, page); -} - -static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) -{ - return sprintf(page, "%llu\n", - (unsigned long long)q->limits.max_write_zeroes_sectors << 9); -} - -static ssize_t queue_zone_write_granularity_show(struct request_queue *q, - char *page) -{ - return queue_var_show(queue_zone_write_granularity(q), page); -} - -static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page) -{ - unsigned long long max_sectors = q->limits.max_zone_append_sectors; - - return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT); + lim->max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT; + return 0; } -static ssize_t -queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) +static int +queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count, + struct queue_limits *lim) { unsigned long max_sectors_kb; - struct queue_limits lim; ssize_t ret; - int err; ret = queue_var_store(&max_sectors_kb, page, count); if (ret < 0) return ret; - blk_mq_freeze_queue(q); - lim = queue_limits_start_update(q); - lim.max_user_sectors = max_sectors_kb << 1; - err = queue_limits_commit_update(q, &lim); - blk_mq_unfreeze_queue(q); - if (err) - return err; - return ret; + lim->max_user_sectors = max_sectors_kb << 1; + return 0; } -static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) +static ssize_t queue_feature_store(struct gendisk *disk, const char *page, + size_t count, struct queue_limits *lim, blk_features_t feature) { - int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; - - return queue_var_show(max_hw_sectors_kb, page); -} + unsigned long val; + ssize_t ret; -static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page) -{ - return queue_var_show(q->limits.virt_boundary_mask, page); -} + ret = queue_var_store(&val, page, count); + if (ret < 0) + return ret; -static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page) -{ - return queue_var_show(queue_dma_alignment(q), page); + if (val) + lim->features |= feature; + else + lim->features &= ~feature; + return 0; } -#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ -static ssize_t \ -queue_##name##_show(struct request_queue *q, char *page) \ +#define QUEUE_SYSFS_FEATURE(_name, _feature) \ +static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \ { \ - int bit; \ - bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ - return queue_var_show(neg ? !bit : bit, page); \ + return sysfs_emit(page, "%u\n", \ + !!(disk->queue->limits.features & _feature)); \ } \ -static ssize_t \ -queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ +static int queue_##_name##_store(struct gendisk *disk, \ + const char *page, size_t count, struct queue_limits *lim) \ +{ \ + return queue_feature_store(disk, page, count, lim, _feature); \ +} + +QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL) +QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM) +QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT) +QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES); + +#define QUEUE_SYSFS_FEATURE_SHOW(_name, _feature) \ +static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \ { \ - unsigned long val; \ - ssize_t ret; \ - ret = queue_var_store(&val, page, count); \ - if (ret < 0) \ - return ret; \ - if (neg) \ - val = !val; \ - \ - if (val) \ - blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ - else \ - blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ - return ret; \ + return sysfs_emit(page, "%u\n", \ + !!(disk->queue->limits.features & _feature)); \ } -QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); -QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); -QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); -QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); -#undef QUEUE_SYSFS_BIT_FNS +QUEUE_SYSFS_FEATURE_SHOW(fua, BLK_FEAT_FUA); +QUEUE_SYSFS_FEATURE_SHOW(dax, BLK_FEAT_DAX); + +static ssize_t queue_poll_show(struct gendisk *disk, char *page) +{ + if (queue_is_mq(disk->queue)) + return sysfs_emit(page, "%u\n", blk_mq_can_poll(disk->queue)); + + return sysfs_emit(page, "%u\n", + !!(disk->queue->limits.features & BLK_FEAT_POLL)); +} -static ssize_t queue_zoned_show(struct request_queue *q, char *page) +static ssize_t queue_zoned_show(struct gendisk *disk, char *page) { - if (blk_queue_is_zoned(q)) - return sprintf(page, "host-managed\n"); - return sprintf(page, "none\n"); + if (blk_queue_is_zoned(disk->queue)) + return sysfs_emit(page, "host-managed\n"); + return sysfs_emit(page, "none\n"); } -static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) +static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page) { - return queue_var_show(disk_nr_zones(q->disk), page); + return queue_var_show(disk_nr_zones(disk), page); } -static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) +static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page) { - return queue_var_show(bdev_max_open_zones(q->disk->part0), page); + return queue_var_show(!!blk_queue_passthrough_stat(disk->queue), page); } -static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page) +static int queue_iostats_passthrough_store(struct gendisk *disk, + const char *page, size_t count, struct queue_limits *lim) { - return queue_var_show(bdev_max_active_zones(q->disk->part0), page); + unsigned long ios; + ssize_t ret; + + ret = queue_var_store(&ios, page, count); + if (ret < 0) + return ret; + + if (ios) + lim->flags |= BLK_FLAG_IOSTATS_PASSTHROUGH; + else + lim->flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH; + return 0; } -static ssize_t queue_nomerges_show(struct request_queue *q, char *page) +static ssize_t queue_nomerges_show(struct gendisk *disk, char *page) { - return queue_var_show((blk_queue_nomerges(q) << 1) | - blk_queue_noxmerges(q), page); + return queue_var_show((blk_queue_nomerges(disk->queue) << 1) | + blk_queue_noxmerges(disk->queue), page); } -static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, +static ssize_t queue_nomerges_store(struct gendisk *disk, const char *page, size_t count) { unsigned long nm; + unsigned int memflags; + struct request_queue *q = disk->queue; ssize_t ret = queue_var_store(&nm, page, count); if (ret < 0) return ret; + memflags = blk_mq_freeze_queue(q); blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); if (nm == 2) blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); else if (nm) blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); + blk_mq_unfreeze_queue(q, memflags); return ret; } -static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) +static ssize_t queue_rq_affinity_show(struct gendisk *disk, char *page) { - bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); - bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); + bool set = test_bit(QUEUE_FLAG_SAME_COMP, &disk->queue->queue_flags); + bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &disk->queue->queue_flags); return queue_var_show(set << force, page); } static ssize_t -queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) +queue_rq_affinity_store(struct gendisk *disk, const char *page, size_t count) { ssize_t ret = -EINVAL; #ifdef CONFIG_SMP + struct request_queue *q = disk->queue; unsigned long val; + unsigned int memflags; ret = queue_var_store(&val, page, count); if (ret < 0) return ret; + /* + * Here we update two queue flags each using atomic bitops, although + * updating two flags isn't atomic it should be harmless as those flags + * are accessed individually using atomic test_bit operation. So we + * don't grab any lock while updating these flags. + */ + memflags = blk_mq_freeze_queue(q); if (val == 2) { blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); @@ -376,89 +374,87 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); } + blk_mq_unfreeze_queue(q, memflags); #endif return ret; } -static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) -{ - return sprintf(page, "%d\n", -1); -} - -static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, +static ssize_t queue_poll_delay_store(struct gendisk *disk, const char *page, size_t count) { return count; } -static ssize_t queue_poll_show(struct request_queue *q, char *page) -{ - return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); -} - -static ssize_t queue_poll_store(struct request_queue *q, const char *page, +static ssize_t queue_poll_store(struct gendisk *disk, const char *page, size_t count) { - if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) - return -EINVAL; + unsigned int memflags; + ssize_t ret = count; + struct request_queue *q = disk->queue; + + memflags = blk_mq_freeze_queue(q); + if (!(q->limits.features & BLK_FEAT_POLL)) { + ret = -EINVAL; + goto out; + } + pr_info_ratelimited("writes to the poll attribute are ignored.\n"); pr_info_ratelimited("please use driver specific parameters instead.\n"); - return count; +out: + blk_mq_unfreeze_queue(q, memflags); + return ret; } -static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) +static ssize_t queue_io_timeout_show(struct gendisk *disk, char *page) { - return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); + return sysfs_emit(page, "%u\n", + jiffies_to_msecs(READ_ONCE(disk->queue->rq_timeout))); } -static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, +static ssize_t queue_io_timeout_store(struct gendisk *disk, const char *page, size_t count) { - unsigned int val; + unsigned int val, memflags; int err; + struct request_queue *q = disk->queue; err = kstrtou32(page, 10, &val); if (err || val == 0) return -EINVAL; + memflags = blk_mq_freeze_queue(q); blk_queue_rq_timeout(q, msecs_to_jiffies(val)); + blk_mq_unfreeze_queue(q, memflags); return count; } -static ssize_t queue_wc_show(struct request_queue *q, char *page) +static ssize_t queue_wc_show(struct gendisk *disk, char *page) { - if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) - return sprintf(page, "write back\n"); - - return sprintf(page, "write through\n"); + if (blk_queue_write_cache(disk->queue)) + return sysfs_emit(page, "write back\n"); + return sysfs_emit(page, "write through\n"); } -static ssize_t queue_wc_store(struct request_queue *q, const char *page, - size_t count) +static int queue_wc_store(struct gendisk *disk, const char *page, + size_t count, struct queue_limits *lim) { + bool disable; + if (!strncmp(page, "write back", 10)) { - if (!test_bit(QUEUE_FLAG_HW_WC, &q->queue_flags)) - return -EINVAL; - blk_queue_flag_set(QUEUE_FLAG_WC, q); + disable = false; } else if (!strncmp(page, "write through", 13) || - !strncmp(page, "none", 4)) { - blk_queue_flag_clear(QUEUE_FLAG_WC, q); + !strncmp(page, "none", 4)) { + disable = true; } else { return -EINVAL; } - return count; -} - -static ssize_t queue_fua_show(struct request_queue *q, char *page) -{ - return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); -} - -static ssize_t queue_dax_show(struct request_queue *q, char *page) -{ - return queue_var_show(blk_queue_dax(q), page); + if (disable) + lim->flags |= BLK_FLAG_WRITE_CACHE_DISABLED; + else + lim->flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED; + return 0; } #define QUEUE_RO_ENTRY(_prefix, _name) \ @@ -474,62 +470,80 @@ static struct queue_sysfs_entry _prefix##_entry = { \ .store = _prefix##_store, \ }; +#define QUEUE_LIM_RO_ENTRY(_prefix, _name) \ +static struct queue_sysfs_entry _prefix##_entry = { \ + .attr = { .name = _name, .mode = 0444 }, \ + .show_limit = _prefix##_show, \ +} + +#define QUEUE_LIM_RW_ENTRY(_prefix, _name) \ +static struct queue_sysfs_entry _prefix##_entry = { \ + .attr = { .name = _name, .mode = 0644 }, \ + .show_limit = _prefix##_show, \ + .store_limit = _prefix##_store, \ +} + QUEUE_RW_ENTRY(queue_requests, "nr_requests"); QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); -QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); -QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); -QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); -QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); -QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size"); +QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); +QUEUE_LIM_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); +QUEUE_LIM_RO_ENTRY(queue_max_segments, "max_segments"); +QUEUE_LIM_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); +QUEUE_LIM_RO_ENTRY(queue_max_segment_size, "max_segment_size"); +QUEUE_LIM_RO_ENTRY(queue_max_write_streams, "max_write_streams"); +QUEUE_LIM_RO_ENTRY(queue_write_stream_granularity, "write_stream_granularity"); QUEUE_RW_ENTRY(elv_iosched, "scheduler"); -QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size"); -QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size"); -QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors"); -QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size"); -QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size"); +QUEUE_LIM_RO_ENTRY(queue_logical_block_size, "logical_block_size"); +QUEUE_LIM_RO_ENTRY(queue_physical_block_size, "physical_block_size"); +QUEUE_LIM_RO_ENTRY(queue_chunk_sectors, "chunk_sectors"); +QUEUE_LIM_RO_ENTRY(queue_io_min, "minimum_io_size"); +QUEUE_LIM_RO_ENTRY(queue_io_opt, "optimal_io_size"); -QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); -QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity"); -QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes"); -QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes"); +QUEUE_LIM_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); +QUEUE_LIM_RO_ENTRY(queue_discard_granularity, "discard_granularity"); +QUEUE_LIM_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes"); +QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes"); QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); +QUEUE_LIM_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes"); +QUEUE_LIM_RO_ENTRY(queue_atomic_write_boundary_sectors, + "atomic_write_boundary_bytes"); +QUEUE_LIM_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes"); +QUEUE_LIM_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes"); + QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes"); -QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes"); -QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes"); -QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity"); +QUEUE_LIM_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes"); +QUEUE_LIM_RO_ENTRY(queue_max_zone_append_sectors, "zone_append_max_bytes"); +QUEUE_LIM_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity"); -QUEUE_RO_ENTRY(queue_zoned, "zoned"); +QUEUE_LIM_RO_ENTRY(queue_zoned, "zoned"); QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones"); -QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones"); -QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones"); +QUEUE_LIM_RO_ENTRY(queue_max_open_zones, "max_open_zones"); +QUEUE_LIM_RO_ENTRY(queue_max_active_zones, "max_active_zones"); QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); +QUEUE_LIM_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough"); QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); QUEUE_RW_ENTRY(queue_poll, "io_poll"); QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); -QUEUE_RW_ENTRY(queue_wc, "write_cache"); -QUEUE_RO_ENTRY(queue_fua, "fua"); -QUEUE_RO_ENTRY(queue_dax, "dax"); +QUEUE_LIM_RW_ENTRY(queue_wc, "write_cache"); +QUEUE_LIM_RO_ENTRY(queue_fua, "fua"); +QUEUE_LIM_RO_ENTRY(queue_dax, "dax"); QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); -QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask"); -QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment"); - -#ifdef CONFIG_BLK_DEV_THROTTLING_LOW -QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time"); -#endif +QUEUE_LIM_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask"); +QUEUE_LIM_RO_ENTRY(queue_dma_alignment, "dma_alignment"); /* legacy alias for logical_block_size: */ static struct queue_sysfs_entry queue_hw_sector_size_entry = { - .attr = {.name = "hw_sector_size", .mode = 0444 }, - .show = queue_logical_block_size_show, + .attr = {.name = "hw_sector_size", .mode = 0444 }, + .show_limit = queue_logical_block_size_show, }; -QUEUE_RW_ENTRY(queue_nonrot, "rotational"); -QUEUE_RW_ENTRY(queue_iostats, "iostats"); -QUEUE_RW_ENTRY(queue_random, "add_random"); -QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); +QUEUE_LIM_RW_ENTRY(queue_rotational, "rotational"); +QUEUE_LIM_RW_ENTRY(queue_iostats, "iostats"); +QUEUE_LIM_RW_ENTRY(queue_add_random, "add_random"); +QUEUE_LIM_RW_ENTRY(queue_stable_writes, "stable_writes"); #ifdef CONFIG_BLK_WBT static ssize_t queue_var_store64(s64 *var, const char *page) @@ -545,23 +559,36 @@ static ssize_t queue_var_store64(s64 *var, const char *page) return 0; } -static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) +static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page) { - if (!wbt_rq_qos(q)) - return -EINVAL; + ssize_t ret; + struct request_queue *q = disk->queue; - if (wbt_disabled(q)) - return sprintf(page, "0\n"); + mutex_lock(&disk->rqos_state_mutex); + if (!wbt_rq_qos(q)) { + ret = -EINVAL; + goto out; + } + + if (wbt_disabled(q)) { + ret = sysfs_emit(page, "0\n"); + goto out; + } - return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); + ret = sysfs_emit(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); +out: + mutex_unlock(&disk->rqos_state_mutex); + return ret; } -static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, +static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page, size_t count) { + struct request_queue *q = disk->queue; struct rq_qos *rqos; ssize_t ret; s64 val; + unsigned int memflags; ret = queue_var_store64(&val, page); if (ret < 0) @@ -569,35 +596,40 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, if (val < -1) return -EINVAL; + memflags = blk_mq_freeze_queue(q); + rqos = wbt_rq_qos(q); if (!rqos) { - ret = wbt_init(q->disk); + ret = wbt_init(disk); if (ret) - return ret; + goto out; } + ret = count; if (val == -1) val = wbt_default_latency_nsec(q); else if (val >= 0) val *= 1000ULL; if (wbt_get_min_lat(q) == val) - return count; + goto out; /* * Ensure that the queue is idled, in case the latency update * ends up either enabling or disabling wbt completely. We can't * have IO inflight if that happens. */ - blk_mq_freeze_queue(q); blk_mq_quiesce_queue(q); + mutex_lock(&disk->rqos_state_mutex); wbt_set_min_lat(q, val); + mutex_unlock(&disk->rqos_state_mutex); blk_mq_unquiesce_queue(q); - blk_mq_unfreeze_queue(q); +out: + blk_mq_unfreeze_queue(q, memflags); - return count; + return ret; } QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec"); @@ -605,13 +637,17 @@ QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec"); /* Common attributes for bio-based and request-based queues. */ static struct attribute *queue_attrs[] = { - &queue_ra_entry.attr, + /* + * Attributes which are protected with q->limits_lock. + */ &queue_max_hw_sectors_entry.attr, &queue_max_sectors_entry.attr, &queue_max_segments_entry.attr, &queue_max_discard_segments_entry.attr, &queue_max_integrity_segments_entry.attr, &queue_max_segment_size_entry.attr, + &queue_max_write_streams_entry.attr, + &queue_write_stream_granularity_entry.attr, &queue_hw_sector_size_entry.attr, &queue_logical_block_size_entry.attr, &queue_physical_block_size_entry.attr, @@ -619,44 +655,60 @@ static struct attribute *queue_attrs[] = { &queue_io_min_entry.attr, &queue_io_opt_entry.attr, &queue_discard_granularity_entry.attr, - &queue_discard_max_entry.attr, - &queue_discard_max_hw_entry.attr, - &queue_discard_zeroes_data_entry.attr, - &queue_write_same_max_entry.attr, - &queue_write_zeroes_max_entry.attr, - &queue_zone_append_max_entry.attr, + &queue_max_discard_sectors_entry.attr, + &queue_max_hw_discard_sectors_entry.attr, + &queue_atomic_write_max_sectors_entry.attr, + &queue_atomic_write_boundary_sectors_entry.attr, + &queue_atomic_write_unit_min_entry.attr, + &queue_atomic_write_unit_max_entry.attr, + &queue_max_write_zeroes_sectors_entry.attr, + &queue_max_zone_append_sectors_entry.attr, &queue_zone_write_granularity_entry.attr, - &queue_nonrot_entry.attr, + &queue_rotational_entry.attr, &queue_zoned_entry.attr, - &queue_nr_zones_entry.attr, &queue_max_open_zones_entry.attr, &queue_max_active_zones_entry.attr, - &queue_nomerges_entry.attr, + &queue_iostats_passthrough_entry.attr, &queue_iostats_entry.attr, &queue_stable_writes_entry.attr, - &queue_random_entry.attr, - &queue_poll_entry.attr, + &queue_add_random_entry.attr, &queue_wc_entry.attr, &queue_fua_entry.attr, &queue_dax_entry.attr, - &queue_poll_delay_entry.attr, -#ifdef CONFIG_BLK_DEV_THROTTLING_LOW - &blk_throtl_sample_time_entry.attr, -#endif &queue_virt_boundary_mask_entry.attr, &queue_dma_alignment_entry.attr, + &queue_ra_entry.attr, + + /* + * Attributes which don't require locking. + */ + &queue_discard_zeroes_data_entry.attr, + &queue_write_same_max_entry.attr, + &queue_nr_zones_entry.attr, + &queue_nomerges_entry.attr, + &queue_poll_entry.attr, + &queue_poll_delay_entry.attr, + NULL, }; /* Request-based queue attributes that are not relevant for bio-based queues. */ static struct attribute *blk_mq_queue_attrs[] = { - &queue_requests_entry.attr, + /* + * Attributes which require some form of locking other than + * q->sysfs_lock. + */ &elv_iosched_entry.attr, - &queue_rq_affinity_entry.attr, - &queue_io_timeout_entry.attr, + &queue_requests_entry.attr, #ifdef CONFIG_BLK_WBT &queue_wb_lat_entry.attr, #endif + /* + * Attributes which don't require locking. + */ + &queue_rq_affinity_entry.attr, + &queue_io_timeout_entry.attr, + NULL, }; @@ -706,15 +758,20 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct queue_sysfs_entry *entry = to_queue(attr); struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); - struct request_queue *q = disk->queue; - ssize_t res; - if (!entry->show) + if (!entry->show && !entry->show_limit) return -EIO; - mutex_lock(&q->sysfs_lock); - res = entry->show(q, page); - mutex_unlock(&q->sysfs_lock); - return res; + + if (entry->show_limit) { + ssize_t res; + + mutex_lock(&disk->queue->limits_lock); + res = entry->show_limit(disk, page); + mutex_unlock(&disk->queue->limits_lock); + return res; + } + + return entry->show(disk, page); } static ssize_t @@ -724,15 +781,28 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, struct queue_sysfs_entry *entry = to_queue(attr); struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); struct request_queue *q = disk->queue; - ssize_t res; - if (!entry->store) + if (!entry->store_limit && !entry->store) return -EIO; - mutex_lock(&q->sysfs_lock); - res = entry->store(q, page, length); - mutex_unlock(&q->sysfs_lock); - return res; + if (entry->store_limit) { + ssize_t res; + + struct queue_limits lim = queue_limits_start_update(q); + + res = entry->store_limit(disk, page, length, &lim); + if (res < 0) { + queue_limits_cancel_update(q); + return res; + } + + res = queue_limits_commit_update_frozen(q, &lim); + if (res) + return res; + return length; + } + + return entry->store(disk, page, length); } static const struct sysfs_ops queue_sysfs_ops = { @@ -779,7 +849,6 @@ int blk_register_queue(struct gendisk *disk) struct request_queue *q = disk->queue; int ret; - mutex_lock(&q->sysfs_dir_lock); kobject_init(&disk->queue_kobj, &blk_queue_ktype); ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue"); if (ret < 0) @@ -802,26 +871,21 @@ int blk_register_queue(struct gendisk *disk) if (ret) goto out_debugfs_remove; - if (q->elevator) { - ret = elv_register_queue(q, false); - if (ret) - goto out_unregister_ia_ranges; - } - ret = blk_crypto_sysfs_register(disk); if (ret) - goto out_elv_unregister; + goto out_unregister_ia_ranges; - blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); + if (queue_is_mq(q)) + elevator_set_default(q); wbt_enable_default(disk); - blk_throtl_register(disk); + + blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); /* Now everything is ready and send out KOBJ_ADD uevent */ kobject_uevent(&disk->queue_kobj, KOBJ_ADD); if (q->elevator) kobject_uevent(&q->elevator->kobj, KOBJ_ADD); mutex_unlock(&q->sysfs_lock); - mutex_unlock(&q->sysfs_dir_lock); /* * SCSI probing may synchronously create and destroy a lot of @@ -832,23 +896,20 @@ int blk_register_queue(struct gendisk *disk) * faster to shut down and is made fully functional here as * request_queues for non-existent devices never get registered. */ - if (!blk_queue_init_done(q)) { - blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); - percpu_ref_switch_to_percpu(&q->q_usage_counter); - } + blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); + percpu_ref_switch_to_percpu(&q->q_usage_counter); return ret; -out_elv_unregister: - elv_unregister_queue(q); out_unregister_ia_ranges: disk_unregister_independent_access_ranges(disk); out_debugfs_remove: blk_debugfs_remove(disk); mutex_unlock(&q->sysfs_lock); + if (queue_is_mq(q)) + blk_mq_sysfs_unregister(disk); out_put_queue_kobj: kobject_put(&disk->queue_kobj); - mutex_unlock(&q->sysfs_dir_lock); return ret; } @@ -879,7 +940,6 @@ void blk_unregister_queue(struct gendisk *disk) blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); mutex_unlock(&q->sysfs_lock); - mutex_lock(&q->sysfs_dir_lock); /* * Remove the sysfs attributes before unregistering the queue data * structures that can be modified through sysfs. @@ -889,14 +949,15 @@ void blk_unregister_queue(struct gendisk *disk) blk_crypto_sysfs_unregister(disk); mutex_lock(&q->sysfs_lock); - elv_unregister_queue(q); disk_unregister_independent_access_ranges(disk); mutex_unlock(&q->sysfs_lock); /* Now that we've deleted all child objects, we can delete the queue. */ kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE); kobject_del(&disk->queue_kobj); - mutex_unlock(&q->sysfs_dir_lock); + + if (queue_is_mq(q)) + elevator_set_none(q); blk_debugfs_remove(disk); } |