diff options
Diffstat (limited to 'drivers/scsi/sd.c')
-rw-r--r-- | drivers/scsi/sd.c | 376 |
1 files changed, 232 insertions, 144 deletions
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index fe82baa924f8..8bb3a3611851 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -33,11 +33,12 @@ * than the level indicated above to trigger output. */ +#include <linux/bio-integrity.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/mm.h> -#include <linux/bio.h> +#include <linux/bio-integrity.h> #include <linux/hdreg.h> #include <linux/errno.h> #include <linux/idr.h> @@ -102,12 +103,13 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC); #define SD_MINORS 16 -static void sd_config_discard(struct scsi_disk *, unsigned int); -static void sd_config_write_same(struct scsi_disk *); +static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim, + unsigned int mode); +static void sd_config_write_same(struct scsi_disk *sdkp, + struct queue_limits *lim); static int sd_revalidate_disk(struct gendisk *); static void sd_unlock_native_capacity(struct gendisk *disk); static void sd_shutdown(struct device *); -static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); static void scsi_disk_release(struct device *cdev); static DEFINE_IDA(sd_index_ida); @@ -120,17 +122,18 @@ static const char *sd_cache_types[] = { "write back, no read (daft)" }; -static void sd_set_flush_flag(struct scsi_disk *sdkp) +static void sd_set_flush_flag(struct scsi_disk *sdkp, + struct queue_limits *lim) { - bool wc = false, fua = false; - if (sdkp->WCE) { - wc = true; + lim->features |= BLK_FEAT_WRITE_CACHE; if (sdkp->DPOFUA) - fua = true; + lim->features |= BLK_FEAT_FUA; + else + lim->features &= ~BLK_FEAT_FUA; + } else { + lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); } - - blk_queue_write_cache(sdkp->disk->queue, wc, fua); } static ssize_t @@ -168,9 +171,18 @@ cache_type_store(struct device *dev, struct device_attribute *attr, wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0; if (sdkp->cache_override) { + struct queue_limits lim; + sdkp->WCE = wce; sdkp->RCD = rcd; - sd_set_flush_flag(sdkp); + + lim = queue_limits_start_update(sdkp->disk->queue); + sd_set_flush_flag(sdkp, &lim); + blk_mq_freeze_queue(sdkp->disk->queue); + ret = queue_limits_commit_update(sdkp->disk->queue, &lim); + blk_mq_unfreeze_queue(sdkp->disk->queue); + if (ret) + return ret; return count; } @@ -457,16 +469,12 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr, { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; - int mode; + struct queue_limits lim; + int mode, err; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (sd_is_zoned(sdkp)) { - sd_config_discard(sdkp, SD_LBP_DISABLE); - return count; - } - if (sdp->type != TYPE_DISK) return -EINVAL; @@ -474,8 +482,13 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr, if (mode < 0) return -EINVAL; - sd_config_discard(sdkp, mode); - + lim = queue_limits_start_update(sdkp->disk->queue); + sd_config_discard(sdkp, &lim, mode); + blk_mq_freeze_queue(sdkp->disk->queue); + err = queue_limits_commit_update(sdkp->disk->queue, &lim); + blk_mq_unfreeze_queue(sdkp->disk->queue); + if (err) + return err; return count; } static DEVICE_ATTR_RW(provisioning_mode); @@ -558,6 +571,7 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr, { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; + struct queue_limits lim; unsigned long max; int err; @@ -579,8 +593,13 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr, sdkp->max_ws_blocks = max; } - sd_config_write_same(sdkp); - + lim = queue_limits_start_update(sdkp->disk->queue); + sd_config_write_same(sdkp, &lim); + blk_mq_freeze_queue(sdkp->disk->queue); + err = queue_limits_commit_update(sdkp->disk->queue, &lim); + blk_mq_unfreeze_queue(sdkp->disk->queue); + if (err) + return err; return count; } static DEVICE_ATTR_RW(max_write_same_blocks); @@ -823,25 +842,28 @@ static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd, return protect; } -static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) +static void sd_disable_discard(struct scsi_disk *sdkp) +{ + sdkp->provisioning_mode = SD_LBP_DISABLE; + blk_queue_disable_discard(sdkp->disk->queue); +} + +static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim, + unsigned int mode) { - struct request_queue *q = sdkp->disk->queue; unsigned int logical_block_size = sdkp->device->sector_size; unsigned int max_blocks = 0; - q->limits.discard_alignment = - sdkp->unmap_alignment * logical_block_size; - q->limits.discard_granularity = - max(sdkp->physical_block_size, - sdkp->unmap_granularity * logical_block_size); + lim->discard_alignment = sdkp->unmap_alignment * logical_block_size; + lim->discard_granularity = max(sdkp->physical_block_size, + sdkp->unmap_granularity * logical_block_size); sdkp->provisioning_mode = mode; switch (mode) { case SD_LBP_FULL: case SD_LBP_DISABLE: - blk_queue_max_discard_sectors(q, 0); - return; + break; case SD_LBP_UNMAP: max_blocks = min_not_zero(sdkp->max_unmap_blocks, @@ -872,7 +894,8 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) break; } - blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9)); + lim->max_hw_discard_sectors = max_blocks * + (logical_block_size >> SECTOR_SHIFT); } static void *sd_set_special_bvec(struct request *rq, unsigned int data_len) @@ -918,6 +941,64 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) return scsi_alloc_sgtables(cmd); } +static void sd_config_atomic(struct scsi_disk *sdkp, struct queue_limits *lim) +{ + unsigned int logical_block_size = sdkp->device->sector_size, + physical_block_size_sectors, max_atomic, unit_min, unit_max; + + if ((!sdkp->max_atomic && !sdkp->max_atomic_with_boundary) || + sdkp->protection_type == T10_PI_TYPE2_PROTECTION) + return; + + physical_block_size_sectors = sdkp->physical_block_size / + sdkp->device->sector_size; + + unit_min = rounddown_pow_of_two(sdkp->atomic_granularity ? + sdkp->atomic_granularity : + physical_block_size_sectors); + + /* + * Only use atomic boundary when we have the odd scenario of + * sdkp->max_atomic == 0, which the spec does permit. + */ + if (sdkp->max_atomic) { + max_atomic = sdkp->max_atomic; + unit_max = rounddown_pow_of_two(sdkp->max_atomic); + sdkp->use_atomic_write_boundary = 0; + } else { + max_atomic = sdkp->max_atomic_with_boundary; + unit_max = rounddown_pow_of_two(sdkp->max_atomic_boundary); + sdkp->use_atomic_write_boundary = 1; + } + + /* + * Ensure compliance with granularity and alignment. For now, keep it + * simple and just don't support atomic writes for values mismatched + * with max_{boundary}atomic, physical block size, and + * atomic_granularity itself. + * + * We're really being distrustful by checking unit_max also... + */ + if (sdkp->atomic_granularity > 1) { + if (unit_min > 1 && unit_min % sdkp->atomic_granularity) + return; + if (unit_max > 1 && unit_max % sdkp->atomic_granularity) + return; + } + + if (sdkp->atomic_alignment > 1) { + if (unit_min > 1 && unit_min % sdkp->atomic_alignment) + return; + if (unit_max > 1 && unit_max % sdkp->atomic_alignment) + return; + } + + lim->atomic_write_hw_max = max_atomic * logical_block_size; + lim->atomic_write_hw_boundary = 0; + lim->atomic_write_hw_unit_min = unit_min * logical_block_size; + lim->atomic_write_hw_unit_max = unit_max * logical_block_size; +} + static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap) { @@ -1000,9 +1081,16 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) return sd_setup_write_same10_cmnd(cmd, false); } -static void sd_config_write_same(struct scsi_disk *sdkp) +static void sd_disable_write_same(struct scsi_disk *sdkp) +{ + sdkp->device->no_write_same = 1; + sdkp->max_ws_blocks = 0; + blk_queue_disable_write_zeroes(sdkp->disk->queue); +} + +static void sd_config_write_same(struct scsi_disk *sdkp, + struct queue_limits *lim) { - struct request_queue *q = sdkp->disk->queue; unsigned int logical_block_size = sdkp->device->sector_size; if (sdkp->device->no_write_same) { @@ -1056,8 +1144,8 @@ static void sd_config_write_same(struct scsi_disk *sdkp) } out: - blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks * - (logical_block_size >> 9)); + lim->max_write_zeroes_sectors = + sdkp->max_ws_blocks * (logical_block_size >> SECTOR_SHIFT); } static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd) @@ -1209,6 +1297,26 @@ static int sd_cdl_dld(struct scsi_disk *sdkp, struct scsi_cmnd *scmd) return (hint - IOPRIO_HINT_DEV_DURATION_LIMIT_1) + 1; } +static blk_status_t sd_setup_atomic_cmnd(struct scsi_cmnd *cmd, + sector_t lba, unsigned int nr_blocks, + bool boundary, unsigned char flags) +{ + cmd->cmd_len = 16; + cmd->cmnd[0] = WRITE_ATOMIC_16; + cmd->cmnd[1] = flags; + put_unaligned_be64(lba, &cmd->cmnd[2]); + put_unaligned_be16(nr_blocks, &cmd->cmnd[12]); + if (boundary) + put_unaligned_be16(nr_blocks, &cmd->cmnd[10]); + else + put_unaligned_be16(0, &cmd->cmnd[10]); + put_unaligned_be16(nr_blocks, &cmd->cmnd[12]); + cmd->cmnd[14] = 0; + cmd->cmnd[15] = 0; + + return BLK_STS_OK; +} + static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) { struct request *rq = scsi_cmd_to_rq(cmd); @@ -1274,6 +1382,10 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks, protect | fua, dld); + } else if (rq->cmd_flags & REQ_ATOMIC && write) { + ret = sd_setup_atomic_cmnd(cmd, lba, nr_blocks, + sdkp->use_atomic_write_boundary, + protect | fua); } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) { ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks, protect | fua, dld); @@ -2247,15 +2359,14 @@ static int sd_done(struct scsi_cmnd *SCpnt) case 0x24: /* INVALID FIELD IN CDB */ switch (SCpnt->cmnd[0]) { case UNMAP: - sd_config_discard(sdkp, SD_LBP_DISABLE); + sd_disable_discard(sdkp); break; case WRITE_SAME_16: case WRITE_SAME: if (SCpnt->cmnd[1] & 8) { /* UNMAP */ - sd_config_discard(sdkp, SD_LBP_DISABLE); + sd_disable_discard(sdkp); } else { - sdkp->device->no_write_same = 1; - sd_config_write_same(sdkp); + sd_disable_write_same(sdkp); req->rq_flags |= RQF_QUIET; } break; @@ -2267,7 +2378,7 @@ static int sd_done(struct scsi_cmnd *SCpnt) } out: - if (sd_is_zoned(sdkp)) + if (sdkp->device->type == TYPE_ZBC) good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr); SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt, @@ -2461,11 +2572,13 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer return 0; } -static void sd_config_protection(struct scsi_disk *sdkp) +static void sd_config_protection(struct scsi_disk *sdkp, + struct queue_limits *lim) { struct scsi_device *sdp = sdkp->device; - sd_dif_config_host(sdkp); + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) + sd_dif_config_host(sdkp, lim); if (!sdkp->protection_type) return; @@ -2514,7 +2627,7 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, #define READ_CAPACITY_RETRIES_ON_RESET 10 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, - unsigned char *buffer) + struct queue_limits *lim, unsigned char *buffer) { unsigned char cmd[16]; struct scsi_sense_hdr sshdr; @@ -2588,7 +2701,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, /* Lowest aligned logical block */ alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size; - blk_queue_alignment_offset(sdp->request_queue, alignment); + lim->alignment_offset = alignment; if (alignment && sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "physical block alignment offset: %u\n", alignment); @@ -2599,7 +2712,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, if (buffer[14] & 0x40) /* LBPRZ */ sdkp->lbprz = 1; - sd_config_discard(sdkp, SD_LBP_WS16); + sd_config_discard(sdkp, lim, SD_LBP_WS16); } sdkp->capacity = lba + 1; @@ -2702,13 +2815,14 @@ static int sd_try_rc16_first(struct scsi_device *sdp) * read disk capacity */ static void -sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) +sd_read_capacity(struct scsi_disk *sdkp, struct queue_limits *lim, + unsigned char *buffer) { int sector_size; struct scsi_device *sdp = sdkp->device; if (sd_try_rc16_first(sdp)) { - sector_size = read_capacity_16(sdkp, sdp, buffer); + sector_size = read_capacity_16(sdkp, sdp, lim, buffer); if (sector_size == -EOVERFLOW) goto got_data; if (sector_size == -ENODEV) @@ -2728,7 +2842,7 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) int old_sector_size = sector_size; sd_printk(KERN_NOTICE, sdkp, "Very big device. " "Trying to use READ CAPACITY(16).\n"); - sector_size = read_capacity_16(sdkp, sdp, buffer); + sector_size = read_capacity_16(sdkp, sdp, lim, buffer); if (sector_size < 0) { sd_printk(KERN_NOTICE, sdkp, "Using 0xffffffff as device size\n"); @@ -2787,9 +2901,8 @@ got_data: */ sector_size = 512; } - blk_queue_logical_block_size(sdp->request_queue, sector_size); - blk_queue_physical_block_size(sdp->request_queue, - sdkp->physical_block_size); + lim->logical_block_size = sector_size; + lim->physical_block_size = sdkp->physical_block_size; sdkp->device->sector_size = sector_size; if (sdkp->capacity > 0xffffffff) @@ -3195,11 +3308,30 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) return; } -/** - * sd_read_block_limits - Query disk device for preferred I/O sizes. - * @sdkp: disk to query +static unsigned int sd_discard_mode(struct scsi_disk *sdkp) +{ + if (!sdkp->lbpvpd) { + /* LBP VPD page not provided */ + if (sdkp->max_unmap_blocks) + return SD_LBP_UNMAP; + return SD_LBP_WS16; + } + + /* LBP VPD page tells us what to use */ + if (sdkp->lbpu && sdkp->max_unmap_blocks) + return SD_LBP_UNMAP; + if (sdkp->lbpws) + return SD_LBP_WS16; + if (sdkp->lbpws10) + return SD_LBP_WS10; + return SD_LBP_DISABLE; +} + +/* + * Query disk device for preferred I/O sizes. */ -static void sd_read_block_limits(struct scsi_disk *sdkp) +static void sd_read_block_limits(struct scsi_disk *sdkp, + struct queue_limits *lim) { struct scsi_vpd *vpd; @@ -3219,7 +3351,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]); if (!sdkp->lbpme) - goto out; + goto config_atomic; lba_count = get_unaligned_be32(&vpd->data[20]); desc_count = get_unaligned_be32(&vpd->data[24]); @@ -3233,23 +3365,16 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) sdkp->unmap_alignment = get_unaligned_be32(&vpd->data[32]) & ~(1 << 31); - if (!sdkp->lbpvpd) { /* LBP VPD page not provided */ - - if (sdkp->max_unmap_blocks) - sd_config_discard(sdkp, SD_LBP_UNMAP); - else - sd_config_discard(sdkp, SD_LBP_WS16); - - } else { /* LBP VPD page tells us what to use */ - if (sdkp->lbpu && sdkp->max_unmap_blocks) - sd_config_discard(sdkp, SD_LBP_UNMAP); - else if (sdkp->lbpws) - sd_config_discard(sdkp, SD_LBP_WS16); - else if (sdkp->lbpws10) - sd_config_discard(sdkp, SD_LBP_WS10); - else - sd_config_discard(sdkp, SD_LBP_DISABLE); - } + sd_config_discard(sdkp, lim, sd_discard_mode(sdkp)); + +config_atomic: + sdkp->max_atomic = get_unaligned_be32(&vpd->data[44]); + sdkp->atomic_alignment = get_unaligned_be32(&vpd->data[48]); + sdkp->atomic_granularity = get_unaligned_be32(&vpd->data[52]); + sdkp->max_atomic_with_boundary = get_unaligned_be32(&vpd->data[56]); + sdkp->max_atomic_boundary = get_unaligned_be32(&vpd->data[60]); + + sd_config_atomic(sdkp, lim); } out: @@ -3268,13 +3393,10 @@ static void sd_read_block_limits_ext(struct scsi_disk *sdkp) rcu_read_unlock(); } -/** - * sd_read_block_characteristics - Query block dev. characteristics - * @sdkp: disk to query - */ -static void sd_read_block_characteristics(struct scsi_disk *sdkp) +/* Query block device characteristics */ +static void sd_read_block_characteristics(struct scsi_disk *sdkp, + struct queue_limits *lim) { - struct request_queue *q = sdkp->disk->queue; struct scsi_vpd *vpd; u16 rot; @@ -3290,37 +3412,13 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) sdkp->zoned = (vpd->data[8] >> 4) & 3; rcu_read_unlock(); - if (rot == 1) { - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); - } - - -#ifdef CONFIG_BLK_DEV_ZONED /* sd_probe rejects ZBD devices early otherwise */ - if (sdkp->device->type == TYPE_ZBC) { - /* - * Host-managed. - */ - disk_set_zoned(sdkp->disk); - - /* - * Per ZBC and ZAC specifications, writes in sequential write - * required zones of host-managed devices must be aligned to - * the device physical block size. - */ - blk_queue_zone_write_granularity(q, sdkp->physical_block_size); - } else { - /* - * Host-aware devices are treated as conventional. - */ - WARN_ON_ONCE(blk_queue_is_zoned(q)); - } -#endif /* CONFIG_BLK_DEV_ZONED */ + if (rot == 1) + lim->features &= ~(BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM); if (!sdkp->first_scan) return; - if (blk_queue_is_zoned(q)) + if (sdkp->device->type == TYPE_ZBC) sd_printk(KERN_NOTICE, sdkp, "Host-managed zoned block device\n"); else if (sdkp->zoned == 1) sd_printk(KERN_NOTICE, sdkp, "Host-aware SMR disk used as regular disk\n"); @@ -3601,10 +3699,11 @@ static int sd_revalidate_disk(struct gendisk *disk) { struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdp = sdkp->device; - struct request_queue *q = sdkp->disk->queue; sector_t old_capacity = sdkp->capacity; + struct queue_limits lim; unsigned char *buffer; - unsigned int dev_max, rw_max; + unsigned int dev_max; + int err; SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_revalidate_disk\n")); @@ -3625,12 +3724,14 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_spinup_disk(sdkp); + lim = queue_limits_start_update(sdkp->disk->queue); + /* * Without media there is no reason to ask; moreover, some devices * react badly if we do. */ if (sdkp->media_present) { - sd_read_capacity(sdkp, buffer); + sd_read_capacity(sdkp, &lim, buffer); /* * Some USB/UAS devices return generic values for mode pages * until the media has been accessed. Trigger a READ operation @@ -3644,15 +3745,14 @@ static int sd_revalidate_disk(struct gendisk *disk) * cause this to be updated correctly and any device which * doesn't support it should be treated as rotational. */ - blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); - blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); + lim.features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM); if (scsi_device_supports_vpd(sdp)) { sd_read_block_provisioning(sdkp); - sd_read_block_limits(sdkp); + sd_read_block_limits(sdkp, &lim); sd_read_block_limits_ext(sdkp); - sd_read_block_characteristics(sdkp); - sd_zbc_read_zones(sdkp, buffer); + sd_read_block_characteristics(sdkp, &lim); + sd_zbc_read_zones(sdkp, &lim, buffer); sd_read_cpr(sdkp); } @@ -3664,62 +3764,50 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_read_app_tag_own(sdkp, buffer); sd_read_write_same(sdkp, buffer); sd_read_security(sdkp, buffer); - sd_config_protection(sdkp); + sd_config_protection(sdkp, &lim); } /* * We now have all cache related info, determine how we deal * with flush requests. */ - sd_set_flush_flag(sdkp); + sd_set_flush_flag(sdkp, &lim); /* Initial block count limit based on CDB TRANSFER LENGTH field size. */ dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS; /* Some devices report a maximum block count for READ/WRITE requests. */ dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); - q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); + lim.max_dev_sectors = logical_to_sectors(sdp, dev_max); if (sd_validate_min_xfer_size(sdkp)) - blk_queue_io_min(sdkp->disk->queue, - logical_to_bytes(sdp, sdkp->min_xfer_blocks)); + lim.io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks); else - blk_queue_io_min(sdkp->disk->queue, 0); - - if (sd_validate_opt_xfer_size(sdkp, dev_max)) { - q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); - rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); - } else { - q->limits.io_opt = 0; - rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), - (sector_t)BLK_DEF_MAX_SECTORS_CAP); - } + lim.io_min = 0; /* * Limit default to SCSI host optimal sector limit if set. There may be * an impact on performance for when the size of a request exceeds this * host limit. */ - rw_max = min_not_zero(rw_max, sdp->host->opt_sectors); - - /* Do not exceed controller limit */ - rw_max = min(rw_max, queue_max_hw_sectors(q)); - - /* - * Only update max_sectors if previously unset or if the current value - * exceeds the capabilities of the hardware. - */ - if (sdkp->first_scan || - q->limits.max_sectors > q->limits.max_dev_sectors || - q->limits.max_sectors > q->limits.max_hw_sectors) - q->limits.max_sectors = rw_max; + lim.io_opt = sdp->host->opt_sectors << SECTOR_SHIFT; + if (sd_validate_opt_xfer_size(sdkp, dev_max)) { + lim.io_opt = min_not_zero(lim.io_opt, + logical_to_bytes(sdp, sdkp->opt_xfer_blocks)); + } sdkp->first_scan = 0; set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity)); - sd_config_write_same(sdkp); + sd_config_write_same(sdkp, &lim); kfree(buffer); + blk_mq_freeze_queue(sdkp->disk->queue); + err = queue_limits_commit_update(sdkp->disk->queue, &lim); + blk_mq_unfreeze_queue(sdkp->disk->queue); + if (err) + return err; + /* * For a zoned drive, revalidating the zones can be done only once * the gendisk capacity is set. So if this fails, set back the gendisk |