diff options
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
| -rw-r--r-- | drivers/scsi/scsi_lib.c | 638 |
1 files changed, 419 insertions, 219 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9ed1ebcb7443..51ad2ad07e43 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -23,7 +23,7 @@ #include <linux/blk-mq.h> #include <linux/blk-integrity.h> #include <linux/ratelimit.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -32,7 +32,7 @@ #include <scsi/scsi_driver.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> -#include <scsi/scsi_transport.h> /* __scsi_init_queue() */ +#include <scsi/scsi_transport.h> /* scsi_init_limits() */ #include <scsi/scsi_dh.h> #include <trace/events/scsi.h> @@ -122,11 +122,9 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs) WARN_ON_ONCE(true); } - if (msecs) { - blk_mq_requeue_request(rq, false); + blk_mq_requeue_request(rq, false); + if (!scsi_host_in_recovery(cmd->device->host)) blk_mq_delay_kick_requeue_list(rq->q, msecs); - } else - blk_mq_requeue_request(rq, true); } /** @@ -165,7 +163,8 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) */ cmd->result = 0; - blk_mq_requeue_request(scsi_cmd_to_rq(cmd), true); + blk_mq_requeue_request(scsi_cmd_to_rq(cmd), + !scsi_host_in_recovery(cmd->device->host)); } /** @@ -185,61 +184,157 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) __scsi_queue_insert(cmd, reason, true); } +/** + * scsi_failures_reset_retries - reset all failures to zero + * @failures: &struct scsi_failures with specific failure modes set + */ +void scsi_failures_reset_retries(struct scsi_failures *failures) +{ + struct scsi_failure *failure; + + failures->total_retries = 0; + + for (failure = failures->failure_definitions; failure->result; + failure++) + failure->retries = 0; +} +EXPORT_SYMBOL_GPL(scsi_failures_reset_retries); /** - * __scsi_execute - insert request and wait for the result - * @sdev: scsi device + * scsi_check_passthrough - Determine if passthrough scsi_cmnd needs a retry. + * @scmd: scsi_cmnd to check. + * @failures: scsi_failures struct that lists failures to check for. + * + * Returns -EAGAIN if the caller should retry else 0. + */ +static int scsi_check_passthrough(struct scsi_cmnd *scmd, + struct scsi_failures *failures) +{ + struct scsi_failure *failure; + struct scsi_sense_hdr sshdr; + enum sam_status status; + + if (!scmd->result) + return 0; + + if (!failures) + return 0; + + for (failure = failures->failure_definitions; failure->result; + failure++) { + if (failure->result == SCMD_FAILURE_RESULT_ANY) + goto maybe_retry; + + if (host_byte(scmd->result) && + host_byte(scmd->result) == host_byte(failure->result)) + goto maybe_retry; + + status = status_byte(scmd->result); + if (!status) + continue; + + if (failure->result == SCMD_FAILURE_STAT_ANY && + !scsi_status_is_good(scmd->result)) + goto maybe_retry; + + if (status != status_byte(failure->result)) + continue; + + if (status_byte(failure->result) != SAM_STAT_CHECK_CONDITION || + failure->sense == SCMD_FAILURE_SENSE_ANY) + goto maybe_retry; + + if (!scsi_command_normalize_sense(scmd, &sshdr)) + return 0; + + if (failure->sense != sshdr.sense_key) + continue; + + if (failure->asc == SCMD_FAILURE_ASC_ANY) + goto maybe_retry; + + if (failure->asc != sshdr.asc) + continue; + + if (failure->ascq == SCMD_FAILURE_ASCQ_ANY || + failure->ascq == sshdr.ascq) + goto maybe_retry; + } + + return 0; + +maybe_retry: + if (failure->allowed) { + if (failure->allowed == SCMD_FAILURE_NO_LIMIT || + ++failure->retries <= failure->allowed) + return -EAGAIN; + } else { + if (failures->total_allowed == SCMD_FAILURE_NO_LIMIT || + ++failures->total_retries <= failures->total_allowed) + return -EAGAIN; + } + + return 0; +} + +/** + * scsi_execute_cmd - insert request and wait for the result + * @sdev: scsi_device * @cmd: scsi command - * @data_direction: data direction + * @opf: block layer request cmd_flags * @buffer: data buffer * @bufflen: len of buffer - * @sense: optional sense buffer - * @sshdr: optional decoded sense header * @timeout: request timeout in HZ - * @retries: number of times to retry request - * @flags: flags for ->cmd_flags - * @rq_flags: flags for ->rq_flags - * @resid: optional residual length + * @ml_retries: number of times SCSI midlayer will retry request + * @args: Optional args. See struct definition for field descriptions * * Returns the scsi_cmnd result field if a command was executed, or a negative * Linux error code if we didn't get that far. */ -int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, - int data_direction, void *buffer, unsigned bufflen, - unsigned char *sense, struct scsi_sense_hdr *sshdr, - int timeout, int retries, blk_opf_t flags, - req_flags_t rq_flags, int *resid) +int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd, + blk_opf_t opf, void *buffer, unsigned int bufflen, + int timeout, int ml_retries, + const struct scsi_exec_args *args) { + static const struct scsi_exec_args default_args; struct request *req; struct scsi_cmnd *scmd; int ret; - req = scsi_alloc_request(sdev->request_queue, - data_direction == DMA_TO_DEVICE ? - REQ_OP_DRV_OUT : REQ_OP_DRV_IN, - rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0); + if (!args) + args = &default_args; + else if (WARN_ON_ONCE(args->sense && + args->sense_len != SCSI_SENSE_BUFFERSIZE)) + return -EINVAL; + +retry: + req = scsi_alloc_request(sdev->request_queue, opf, args->req_flags); if (IS_ERR(req)) return PTR_ERR(req); if (bufflen) { - ret = blk_rq_map_kern(sdev->request_queue, req, - buffer, bufflen, GFP_NOIO); + ret = blk_rq_map_kern(req, buffer, bufflen, GFP_NOIO); if (ret) goto out; } scmd = blk_mq_rq_to_pdu(req); scmd->cmd_len = COMMAND_SIZE(cmd[0]); memcpy(scmd->cmnd, cmd, scmd->cmd_len); - scmd->allowed = retries; + scmd->allowed = ml_retries; + scmd->flags |= args->scmd_flags; req->timeout = timeout; - req->cmd_flags |= flags; - req->rq_flags |= rq_flags | RQF_QUIET; + req->rq_flags |= RQF_QUIET; /* * head injection *required* here otherwise quiesce won't work */ blk_execute_rq(req, true); + if (scsi_check_passthrough(scmd, args->failures) == -EAGAIN) { + blk_mq_free_request(req); + goto retry; + } + /* * Some devices (USB mass-storage in particular) may transfer * garbage data together with a residue indicating that the data @@ -249,20 +344,21 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, if (unlikely(scmd->resid_len > 0 && scmd->resid_len <= bufflen)) memset(buffer + bufflen - scmd->resid_len, 0, scmd->resid_len); - if (resid) - *resid = scmd->resid_len; - if (sense && scmd->sense_len) - memcpy(sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); - if (sshdr) + if (args->resid) + *args->resid = scmd->resid_len; + if (args->sense) + memcpy(args->sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); + if (args->sshdr) scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len, - sshdr); + args->sshdr); + ret = scmd->result; out: blk_mq_free_request(req); return ret; } -EXPORT_SYMBOL(__scsi_execute); +EXPORT_SYMBOL(scsi_execute_cmd); /* * Wake up the error handler if necessary. Avoid as follows that the error @@ -280,9 +376,11 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd) rcu_read_lock(); __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state); if (unlikely(scsi_host_in_recovery(shost))) { + unsigned int busy = scsi_host_busy(shost); + spin_lock_irqsave(shost->host_lock, flags); if (shost->host_failed || shost->host_eh_scheduled) - scsi_eh_wakeup(shost); + scsi_eh_wakeup(shost, busy); spin_unlock_irqrestore(shost->host_lock, flags); } rcu_read_unlock(); @@ -298,15 +396,11 @@ void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd) if (starget->can_queue > 0) atomic_dec(&starget->target_busy); - sbitmap_put(&sdev->budget_map, cmd->budget_token); + if (sdev->budget_map.map) + sbitmap_put(&sdev->budget_map, cmd->budget_token); cmd->budget_token = -1; } -static void scsi_kick_queue(struct request_queue *q) -{ - blk_mq_run_hw_queues(q, false); -} - /* * Kick the queue of SCSI device @sdev if @sdev != current_sdev. Called with * interrupts disabled. @@ -342,7 +436,8 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) * but in most cases, we will be first. Ideally, each LU on the * target would get some limited time or requests on the target. */ - scsi_kick_queue(current_sdev->request_queue); + blk_mq_run_hw_queues(current_sdev->request_queue, + shost->queuecommand_may_block); spin_lock_irqsave(shost->host_lock, flags); if (!starget->starget_sdev_user) @@ -429,7 +524,7 @@ static void scsi_starved_list_run(struct Scsi_Host *shost) continue; spin_unlock_irqrestore(shost->host_lock, flags); - scsi_kick_queue(slq); + blk_mq_run_hw_queues(slq, false); blk_put_queue(slq); spin_lock_irqsave(shost->host_lock, flags); @@ -454,7 +549,8 @@ static void scsi_run_queue(struct request_queue *q) if (!list_empty(&sdev->host->starved_list)) scsi_starved_list_run(sdev->host); - blk_mq_run_hw_queues(q, false); + /* Note: blk_mq_kick_requeue_list() runs the queue asynchronously. */ + blk_mq_kick_requeue_list(q); } void scsi_requeue_run_queue(struct work_struct *work) @@ -504,6 +600,9 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) static void scsi_run_queue_async(struct scsi_device *sdev) { + if (scsi_host_in_recovery(sdev->host)) + return; + if (scsi_target(sdev)->single_lun || !list_empty(&sdev->host->starved_list)) { kblockd_schedule_work(&sdev->requeue_work); @@ -539,14 +638,12 @@ static bool scsi_end_request(struct request *req, blk_status_t error, if (blk_update_request(req, error, bytes)) return true; - // XXX: - if (blk_queue_add_random(q)) + if (q->limits.features & BLK_FEAT_ADD_RANDOM) add_disk_randomness(req->q->disk); - if (!blk_rq_is_passthrough(req)) { - WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); - cmd->flags &= ~SCMD_INITIALIZED; - } + WARN_ON_ONCE(!blk_rq_is_passthrough(req) && + !(cmd->flags & SCMD_INITIALIZED)); + cmd->flags = 0; /* * Calling rcu_barrier() is not necessary here because the @@ -579,11 +676,6 @@ static bool scsi_end_request(struct request *req, blk_status_t error, return false; } -static inline u8 get_scsi_ml_byte(int result) -{ - return (result >> 8) & 0xff; -} - /** * scsi_result_to_blk_status - translate a SCSI result code into blk_status_t * @result: scsi error code @@ -596,17 +688,19 @@ static blk_status_t scsi_result_to_blk_status(int result) * Check the scsi-ml byte first in case we converted a host or status * byte. */ - switch (get_scsi_ml_byte(result)) { + switch (scsi_ml_byte(result)) { case SCSIML_STAT_OK: break; case SCSIML_STAT_RESV_CONFLICT: - return BLK_STS_NEXUS; + return BLK_STS_RESV_CONFLICT; case SCSIML_STAT_NOSPC: return BLK_STS_NOSPC; case SCSIML_STAT_MED_ERROR: return BLK_STS_MEDIUM; case SCSIML_STAT_TGT_FAILURE: return BLK_STS_TARGET; + case SCSIML_STAT_DL_TIMEOUT: + return BLK_STS_DURATION_LIMIT; } switch (host_byte(result)) { @@ -778,12 +872,18 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) case 0x1a: /* start stop unit in progress */ case 0x1b: /* sanitize in progress */ case 0x1d: /* configuration in progress */ - case 0x24: /* depopulation in progress */ action = ACTION_DELAYED_RETRY; break; case 0x0a: /* ALUA state transition */ action = ACTION_DELAYED_REPREP; break; + /* + * Depopulation might take many hours, + * thus it is not worthwhile to retry. + */ + case 0x24: /* depopulation in progress */ + case 0x25: /* depopulation restore in progress */ + fallthrough; default: action = ACTION_FAIL; break; @@ -804,6 +904,8 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) blk_stat = BLK_STS_ZONE_OPEN_RESOURCE; } break; + case COMPLETED: + fallthrough; default: action = ACTION_FAIL; break; @@ -1047,11 +1149,11 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) * Next, walk the list, and fill in the addresses and sizes of * each segment. */ - count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg); + count = __blk_rq_map_sg(rq, cmd->sdb.table.sgl, &last_sg); - if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) { + if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) { unsigned int pad_len = - (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; + (rq->q->limits.dma_pad_mask & ~blk_rq_bytes(rq)) + 1; last_sg->length += pad_len; cmd->extra_len += pad_len; @@ -1073,7 +1175,6 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) if (blk_integrity_rq(rq)) { struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; - int ivecs; if (WARN_ON_ONCE(!prot_sdb)) { /* @@ -1085,20 +1186,15 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) goto out_free_sgtables; } - ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); - - if (sg_alloc_table_chained(&prot_sdb->table, ivecs, + if (sg_alloc_table_chained(&prot_sdb->table, + rq->nr_integrity_segments, prot_sdb->table.sgl, SCSI_INLINE_PROT_SG_CNT)) { ret = BLK_STS_RESOURCE; goto out_free_sgtables; } - count = blk_rq_map_integrity_sg(rq->q, rq->bio, - prot_sdb->table.sgl); - BUG_ON(count > ivecs); - BUG_ON(count > queue_max_integrity_segments(rq->q)); - + count = blk_rq_map_integrity_sg(rq, prot_sdb->table.sgl); cmd->prot_sdb = prot_sdb; cmd->prot_sdb->table.nents = count; } @@ -1130,6 +1226,15 @@ static void scsi_initialize_rq(struct request *rq) cmd->retries = 0; } +/** + * scsi_alloc_request - allocate a block request and partially + * initialize its &scsi_cmnd + * @q: the device's request queue + * @opf: the request operation code + * @flags: block layer allocation flags + * + * Return: &struct request pointer on success or %NULL on failure + */ struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf, blk_mq_req_flags_t flags) { @@ -1148,8 +1253,12 @@ EXPORT_SYMBOL_GPL(scsi_alloc_request); */ static void scsi_cleanup_rq(struct request *rq) { + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); + + cmd->flags = 0; + if (rq->rq_flags & RQF_DONTPREP) { - scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq)); + scsi_mq_uninit_cmd(cmd); rq->rq_flags &= ~RQF_DONTPREP; } } @@ -1252,29 +1361,30 @@ static inline int scsi_dev_queue_ready(struct request_queue *q, { int token; + if (!sdev->budget_map.map) + return INT_MAX; + token = sbitmap_get(&sdev->budget_map); - if (atomic_read(&sdev->device_blocked)) { - if (token < 0) - goto out; + if (token < 0) + return -1; - if (scsi_device_busy(sdev) > 1) - goto out_dec; + if (!atomic_read(&sdev->device_blocked)) + return token; - /* - * unblock after device_blocked iterates to zero - */ - if (atomic_dec_return(&sdev->device_blocked) > 0) - goto out_dec; - SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, - "unblocking device at zero depth\n")); + /* + * Only unblock if no other commands are pending and + * if device_blocked has decreased to zero + */ + if (scsi_device_busy(sdev) > 1 || + atomic_dec_return(&sdev->device_blocked) > 0) { + sbitmap_put(&sdev->budget_map, token); + return -1; } + SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, + "unblocking device at zero depth\n")); + return token; -out_dec: - if (token >= 0) - sbitmap_put(&sdev->budget_map, token); -out: - return -1; } /* @@ -1424,6 +1534,14 @@ static void scsi_complete(struct request *rq) struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); enum scsi_disposition disposition; + if (blk_mq_is_reserved_rq(rq)) { + /* Only pass-through requests are supported in this code path. */ + WARN_ON_ONCE(!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))); + scsi_mq_uninit_cmd(cmd); + __blk_mq_end_request(rq, scsi_result_to_blk_status(cmd->result)); + return; + } + INIT_LIST_HEAD(&cmd->eh_entry); atomic_inc(&cmd->device->iodone_cnt); @@ -1464,6 +1582,8 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) struct Scsi_Host *host = cmd->device->host; int rtn = 0; + atomic_inc(&cmd->device->iorequest_cnt); + /* check if the device is still usable */ if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { /* in SDEV_DEL we error all commands. DID_NO_CONNECT @@ -1484,6 +1604,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) */ SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, "queuecommand : device blocked\n")); + atomic_dec(&cmd->device->iorequest_cnt); return SCSI_MLQUEUE_DEVICE_BUSY; } @@ -1516,6 +1637,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) trace_scsi_dispatch_cmd_start(cmd); rtn = host->hostt->queuecommand(host, cmd); if (rtn) { + atomic_dec(&cmd->device->iorequest_cnt); trace_scsi_dispatch_cmd_error(cmd, rtn); if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && rtn != SCSI_MLQUEUE_TARGET_BUSY) @@ -1562,13 +1684,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req) if (in_flight) __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); - /* - * Only clear the driver-private command data if the LLD does not supply - * a function to initialize that data. - */ - if (!shost->hostt->init_cmd_priv) - memset(cmd + 1, 0, shost->hostt->cmd_size); - cmd->prot_op = SCSI_PROT_NORMAL; if (blk_rq_bytes(req)) cmd->sc_data_direction = rq_dma_dir(req); @@ -1646,7 +1761,8 @@ static void scsi_mq_put_budget(struct request_queue *q, int budget_token) { struct scsi_device *sdev = q->queuedata; - sbitmap_put(&sdev->budget_map, budget_token); + if (sdev->budget_map.map) + sbitmap_put(&sdev->budget_map, budget_token); } /* @@ -1715,25 +1831,38 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, WARN_ON_ONCE(cmd->budget_token < 0); /* - * If the device is not in running state we will reject some or all - * commands. + * Bypass the SCSI device, SCSI target and SCSI host checks for + * reserved commands. */ - if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { - ret = scsi_device_state_check(sdev, req); - if (ret != BLK_STS_OK) + if (!blk_mq_is_reserved_rq(req)) { + /* + * If the device is not in running state we will reject some or + * all commands. + */ + if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { + ret = scsi_device_state_check(sdev, req); + if (ret != BLK_STS_OK) + goto out_put_budget; + } + + ret = BLK_STS_RESOURCE; + if (!scsi_target_queue_ready(shost, sdev)) goto out_put_budget; + if (unlikely(scsi_host_in_recovery(shost))) { + if (cmd->flags & SCMD_FAIL_IF_RECOVERING) + ret = BLK_STS_OFFLINE; + goto out_dec_target_busy; + } + if (!scsi_host_queue_ready(q, shost, sdev, cmd)) + goto out_dec_target_busy; } - ret = BLK_STS_RESOURCE; - if (!scsi_target_queue_ready(shost, sdev)) - goto out_put_budget; - if (unlikely(scsi_host_in_recovery(shost))) { - if (cmd->flags & SCMD_FAIL_IF_RECOVERING) - ret = BLK_STS_OFFLINE; - goto out_dec_target_busy; - } - if (!scsi_host_queue_ready(q, shost, sdev, cmd)) - goto out_dec_target_busy; + /* + * Only clear the driver-private command data if the LLD does not supply + * a function to initialize that data. + */ + if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv) + memset(scsi_cmd_priv(cmd), 0, shost->hostt->cmd_size); if (!(req->rq_flags & RQF_DONTPREP)) { ret = scsi_prepare_cmd(req); @@ -1755,6 +1884,14 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, cmd->submitter = SUBMITTED_BY_BLOCK_LAYER; blk_mq_start_request(req); + if (blk_mq_is_reserved_rq(req)) { + reason = shost->hostt->queue_reserved_command(shost, cmd); + if (reason) { + ret = BLK_STS_RESOURCE; + goto out_put_budget; + } + return BLK_STS_OK; + } reason = scsi_dispatch_cmd(cmd); if (reason) { scsi_set_blocked(cmd, reason); @@ -1762,7 +1899,6 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, goto out_dec_host_busy; } - atomic_inc(&cmd->device->iorequest_cnt); return BLK_STS_OK; out_dec_host_busy: @@ -1777,7 +1913,6 @@ out_put_budget: case BLK_STS_OK: break; case BLK_STS_RESOURCE: - case BLK_STS_ZONE_RESOURCE: if (scsi_device_blocked(sdev)) ret = BLK_STS_DEV_RESOURCE; break; @@ -1872,42 +2007,40 @@ static void scsi_map_queues(struct blk_mq_tag_set *set) blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); } -void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) +void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim) { struct device *dev = shost->dma_dev; - /* - * this limit is imposed by hardware restrictions - */ - blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, - SG_MAX_SEGMENTS)); + memset(lim, 0, sizeof(*lim)); + lim->max_segments = + min_t(unsigned short, shost->sg_tablesize, SG_MAX_SEGMENTS); if (scsi_host_prot_dma(shost)) { shost->sg_prot_tablesize = min_not_zero(shost->sg_prot_tablesize, (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); - blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); + lim->max_integrity_segments = shost->sg_prot_tablesize; } - blk_queue_max_hw_sectors(q, shost->max_sectors); - blk_queue_segment_boundary(q, shost->dma_boundary); - dma_set_seg_boundary(dev, shost->dma_boundary); - - blk_queue_max_segment_size(q, shost->max_segment_size); - blk_queue_virt_boundary(q, shost->virt_boundary_mask); - dma_set_max_seg_size(dev, queue_max_segment_size(q)); + lim->max_hw_sectors = shost->max_sectors; + lim->seg_boundary_mask = shost->dma_boundary; + lim->max_segment_size = shost->max_segment_size; + lim->virt_boundary_mask = shost->virt_boundary_mask; + lim->dma_alignment = max_t(unsigned int, + shost->dma_alignment, dma_get_cache_alignment() - 1); /* - * Set a reasonable default alignment: The larger of 32-byte (dword), - * which is a common minimum for HBAs, and the minimum DMA alignment, - * which is set by the platform. - * - * Devices that require a bigger alignment can increase it later. + * Propagate the DMA formation properties to the dma-mapping layer as + * a courtesy service to the LLDDs. This needs to check that the buses + * actually support the DMA API first, though. */ - blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1); + if (dev->dma_parms) { + dma_set_seg_boundary(dev, shost->dma_boundary); + dma_set_max_seg_size(dev, shost->max_segment_size); + } } -EXPORT_SYMBOL_GPL(__scsi_init_queue); +EXPORT_SYMBOL_GPL(scsi_init_limits); static const struct blk_mq_ops scsi_mq_ops_no_commit = { .get_budget = scsi_mq_get_budget, @@ -1977,12 +2110,14 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost) tag_set->ops = &scsi_mq_ops_no_commit; tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1; tag_set->nr_maps = shost->nr_maps ? : 1; - tag_set->queue_depth = shost->can_queue; + tag_set->queue_depth = shost->can_queue + shost->nr_reserved_cmds; + tag_set->reserved_tags = shost->nr_reserved_cmds; tag_set->cmd_size = cmd_size; tag_set->numa_node = dev_to_node(shost->dma_dev); - tag_set->flags = BLK_MQ_F_SHOULD_MERGE; - tag_set->flags |= - BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); + if (shost->hostt->tag_alloc_policy_rr) + tag_set->flags |= BLK_MQ_F_TAG_RR; + if (shost->queuecommand_may_block) + tag_set->flags |= BLK_MQ_F_BLOCKING; tag_set->driver_data = shost; if (shost->host_tagset) tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED; @@ -2000,6 +2135,44 @@ void scsi_mq_free_tags(struct kref *kref) } /** + * scsi_get_internal_cmd() - Allocate an internal SCSI command. + * @sdev: SCSI device from which to allocate the command + * @data_direction: Data direction for the allocated command + * @flags: request allocation flags, e.g. BLK_MQ_REQ_RESERVED or + * BLK_MQ_REQ_NOWAIT. + * + * Allocates a SCSI command for internal LLDD use. + */ +struct scsi_cmnd *scsi_get_internal_cmd(struct scsi_device *sdev, + enum dma_data_direction data_direction, + blk_mq_req_flags_t flags) +{ + enum req_op op = data_direction == DMA_TO_DEVICE ? REQ_OP_DRV_OUT : + REQ_OP_DRV_IN; + struct scsi_cmnd *scmd; + struct request *rq; + + rq = scsi_alloc_request(sdev->request_queue, op, flags); + if (IS_ERR(rq)) + return NULL; + scmd = blk_mq_rq_to_pdu(rq); + scmd->device = sdev; + + return scmd; +} +EXPORT_SYMBOL_GPL(scsi_get_internal_cmd); + +/** + * scsi_put_internal_cmd() - Free an internal SCSI command. + * @scmd: SCSI command to be freed + */ +void scsi_put_internal_cmd(struct scsi_cmnd *scmd) +{ + blk_mq_free_request(blk_mq_rq_from_pdu(scmd)); +} +EXPORT_SYMBOL_GPL(scsi_put_internal_cmd); + +/** * scsi_device_from_queue - return sdev associated with a request_queue * @q: The request queue to return the sdev from * @@ -2086,6 +2259,9 @@ int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, { unsigned char cmd[10]; unsigned char *real_buffer; + const struct scsi_exec_args exec_args = { + .sshdr = sshdr, + }; int ret; memset(cmd, 0, sizeof(cmd)); @@ -2135,8 +2311,8 @@ int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, cmd[4] = len; } - ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, - sshdr, timeout, retries, NULL); + ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, real_buffer, len, + timeout, retries, &exec_args); kfree(real_buffer); return ret; } @@ -2147,6 +2323,7 @@ EXPORT_SYMBOL_GPL(scsi_mode_select); * @sdev: SCSI device to be queried * @dbd: set to prevent mode sense from returning block descriptors * @modepage: mode page being requested + * @subpage: sub-page of the mode page being requested * @buffer: request buffer (may not be smaller than eight bytes) * @len: length of request buffer. * @timeout: command timeout @@ -2158,15 +2335,33 @@ EXPORT_SYMBOL_GPL(scsi_mode_select); * Returns zero if successful, or a negative error number on failure */ int -scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, +scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage, unsigned char *buffer, int len, int timeout, int retries, struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) { unsigned char cmd[12]; int use_10_for_ms; int header_length; - int result, retry_count = retries; + int result; struct scsi_sense_hdr my_sshdr; + struct scsi_failure failure_defs[] = { + { + .sense = UNIT_ATTENTION, + .asc = SCMD_FAILURE_ASC_ANY, + .ascq = SCMD_FAILURE_ASCQ_ANY, + .allowed = retries, + .result = SAM_STAT_CHECK_CONDITION, + }, + {} + }; + struct scsi_failures failures = { + .failure_definitions = failure_defs, + }; + const struct scsi_exec_args exec_args = { + /* caller might not be interested in sense, but we need it */ + .sshdr = sshdr ? : &my_sshdr, + .failures = &failures, + }; memset(data, 0, sizeof(*data)); memset(&cmd[0], 0, 12); @@ -2174,10 +2369,9 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, dbd = sdev->set_dbd_for_ms ? 8 : dbd; cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ cmd[2] = modepage; + cmd[3] = subpage; - /* caller might not be interested in sense, but we need it */ - if (!sshdr) - sshdr = &my_sshdr; + sshdr = exec_args.sshdr; retry: use_10_for_ms = sdev->use_10_for_ms || len > 255; @@ -2200,8 +2394,8 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, memset(buffer, 0, len); - result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, - sshdr, timeout, retries, NULL); + result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len, + timeout, retries, &exec_args); if (result < 0) return result; @@ -2228,12 +2422,6 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, goto retry; } } - if (scsi_status_is_check_condition(result) && - sshdr->sense_key == UNIT_ATTENTION && - retry_count) { - retry_count--; - goto retry; - } } return -EIO; } @@ -2281,16 +2469,19 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, char cmd[] = { TEST_UNIT_READY, 0, 0, 0, 0, 0, }; + const struct scsi_exec_args exec_args = { + .sshdr = sshdr, + }; int result; /* try to eat the UNIT_ATTENTION if there are enough retries */ do { - result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, - timeout, 1, NULL); - if (sdev->removable && scsi_sense_valid(sshdr) && + result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, NULL, 0, + timeout, 1, &exec_args); + if (sdev->removable && result > 0 && scsi_sense_valid(sshdr) && sshdr->sense_key == UNIT_ATTENTION) sdev->changed = 1; - } while (scsi_sense_valid(sshdr) && + } while (result > 0 && scsi_sense_valid(sshdr) && sshdr->sense_key == UNIT_ATTENTION && --retries); return result; @@ -2442,7 +2633,7 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) envp[idx++] = "SDEV_MEDIA_CHANGE=1"; break; case SDEV_EVT_INQUIRY_CHANGE_REPORTED: - scsi_rescan_device(&sdev->sdev_gendev); + scsi_rescan_device(sdev); envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; break; case SDEV_EVT_CAPACITY_CHANGE_REPORTED: @@ -2616,6 +2807,7 @@ int scsi_device_quiesce(struct scsi_device *sdev) { struct request_queue *q = sdev->request_queue; + unsigned int memflags; int err; /* @@ -2630,7 +2822,7 @@ scsi_device_quiesce(struct scsi_device *sdev) blk_set_pm_only(q); - blk_mq_freeze_queue(q); + memflags = blk_mq_freeze_queue(q); /* * Ensure that the effect of blk_set_pm_only() will be visible * for percpu_ref_tryget() callers that occur after the queue @@ -2638,7 +2830,7 @@ scsi_device_quiesce(struct scsi_device *sdev) * was called. See also https://lwn.net/Articles/573497/. */ synchronize_rcu(); - blk_mq_unfreeze_queue(q); + blk_mq_unfreeze_queue(q, memflags); mutex_lock(&sdev->state_mutex); err = scsi_device_set_state(sdev, SDEV_QUIESCE); @@ -2718,24 +2910,16 @@ void scsi_start_queue(struct scsi_device *sdev) blk_mq_unquiesce_queue(sdev->request_queue); } -static void scsi_stop_queue(struct scsi_device *sdev, bool nowait) +static void scsi_stop_queue(struct scsi_device *sdev) { /* * The atomic variable of ->queue_stopped covers that * blk_mq_quiesce_queue* is balanced with blk_mq_unquiesce_queue. * - * However, we still need to wait until quiesce is done - * in case that queue has been stopped. + * The caller needs to wait until quiesce is done. */ - if (!cmpxchg(&sdev->queue_stopped, 0, 1)) { - if (nowait) - blk_mq_quiesce_queue_nowait(sdev->request_queue); - else - blk_mq_quiesce_queue(sdev->request_queue); - } else { - if (!nowait) - blk_mq_wait_quiesce_done(sdev->request_queue->tag_set); - } + if (!cmpxchg(&sdev->queue_stopped, 0, 1)) + blk_mq_quiesce_queue_nowait(sdev->request_queue); } /** @@ -2762,19 +2946,19 @@ int scsi_internal_device_block_nowait(struct scsi_device *sdev) * request queue. */ if (!ret) - scsi_stop_queue(sdev, true); + scsi_stop_queue(sdev); return ret; } EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); /** - * scsi_internal_device_block - try to transition to the SDEV_BLOCK state + * scsi_device_block - try to transition to the SDEV_BLOCK state * @sdev: device to block + * @data: dummy argument, ignored * - * Pause SCSI command processing on the specified device and wait until all - * ongoing scsi_request_fn() / scsi_queue_rq() calls have finished. May sleep. - * - * Returns zero if successful or a negative error code upon failure. + * Pause SCSI command processing on the specified device. Callers must wait + * until all ongoing scsi_queue_rq() calls have finished after this function + * returns. * * Note: * This routine transitions the device to the SDEV_BLOCK state (which must be @@ -2782,17 +2966,26 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); * is paused until the device leaves the SDEV_BLOCK state. See also * scsi_internal_device_unblock(). */ -static int scsi_internal_device_block(struct scsi_device *sdev) +static void scsi_device_block(struct scsi_device *sdev, void *data) { int err; + enum scsi_device_state state; mutex_lock(&sdev->state_mutex); err = __scsi_internal_device_block_nowait(sdev); + state = sdev->sdev_state; if (err == 0) - scsi_stop_queue(sdev, false); + /* + * scsi_stop_queue() must be called with the state_mutex + * held. Otherwise a simultaneous scsi_start_queue() call + * might unquiesce the queue before we quiesce it. + */ + scsi_stop_queue(sdev); + mutex_unlock(&sdev->state_mutex); - return err; + WARN_ONCE(err, "%s: failed to block %s in state %d\n", + __func__, dev_name(&sdev->sdev_gendev), state); } /** @@ -2875,36 +3068,35 @@ static int scsi_internal_device_unblock(struct scsi_device *sdev, return ret; } -static void -device_block(struct scsi_device *sdev, void *data) -{ - int ret; - - ret = scsi_internal_device_block(sdev); - - WARN_ONCE(ret, "scsi_internal_device_block(%s) failed: ret = %d\n", - dev_name(&sdev->sdev_gendev), ret); -} - static int target_block(struct device *dev, void *data) { if (scsi_is_target_device(dev)) starget_for_each_device(to_scsi_target(dev), NULL, - device_block); + scsi_device_block); return 0; } +/** + * scsi_block_targets - transition all SCSI child devices to SDEV_BLOCK state + * @dev: a parent device of one or more scsi_target devices + * @shost: the Scsi_Host to which this device belongs + * + * Iterate over all children of @dev, which should be scsi_target devices, + * and switch all subordinate scsi devices to SDEV_BLOCK state. Wait for + * ongoing scsi_queue_rq() calls to finish. May sleep. + * + * Note: + * @dev must not itself be a scsi_target device. + */ void -scsi_target_block(struct device *dev) +scsi_block_targets(struct Scsi_Host *shost, struct device *dev) { - if (scsi_is_target_device(dev)) - starget_for_each_device(to_scsi_target(dev), NULL, - device_block); - else - device_for_each_child(dev, NULL, target_block); + WARN_ON_ONCE(scsi_is_target_device(dev)); + device_for_each_child(dev, NULL, target_block); + blk_mq_wait_quiesce_done(&shost->tag_set); } -EXPORT_SYMBOL_GPL(scsi_target_block); +EXPORT_SYMBOL_GPL(scsi_block_targets); static void device_unblock(struct scsi_device *sdev, void *data) @@ -2932,11 +3124,20 @@ scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) } EXPORT_SYMBOL_GPL(scsi_target_unblock); +/** + * scsi_host_block - Try to transition all logical units to the SDEV_BLOCK state + * @shost: device to block + * + * Pause SCSI command processing for all logical units associated with the SCSI + * host and wait until pending scsi_queue_rq() calls have finished. + * + * Returns zero if successful or a negative error code upon failure. + */ int scsi_host_block(struct Scsi_Host *shost) { struct scsi_device *sdev; - int ret = 0; + int ret; /* * Call scsi_internal_device_block_nowait so we can avoid @@ -2948,20 +3149,14 @@ scsi_host_block(struct Scsi_Host *shost) mutex_unlock(&sdev->state_mutex); if (ret) { scsi_device_put(sdev); - break; + return ret; } } - /* - * SCSI never enables blk-mq's BLK_MQ_F_BLOCKING flag so - * calling synchronize_rcu() once is enough. - */ - WARN_ON_ONCE(shost->tag_set.flags & BLK_MQ_F_BLOCKING); - - if (!ret) - synchronize_rcu(); + /* Wait for ongoing scsi_queue_rq() calls to finish. */ + blk_mq_wait_quiesce_done(&shost->tag_set); - return ret; + return 0; } EXPORT_SYMBOL_GPL(scsi_host_block); @@ -3019,8 +3214,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, /* Offset starting from the beginning of first page in this sg-entry */ *offset = *offset - len_complete + sg->offset; - /* Assumption: contiguous pages can be accessed as "page + i" */ - page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); + page = sg_page(sg) + (*offset >> PAGE_SHIFT); *offset &= ~PAGE_MASK; /* Bytes in this sg-entry from *offset to the end of the page */ @@ -3257,14 +3451,16 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) } EXPORT_SYMBOL(scsi_vpd_lun_id); -/* +/** * scsi_vpd_tpg_id - return a target port group identifier * @sdev: SCSI device + * @rel_id: pointer to return relative target port in if not %NULL * * Returns the Target Port Group identifier from the information - * froom VPD page 0x83 of the device. + * from VPD page 0x83 of the device. + * Optionally sets @rel_id to the relative target port on success. * - * Returns the identifier or error on failure. + * Return: the identifier or error on failure. */ int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id) { @@ -3320,3 +3516,7 @@ void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq) scmd->result = SAM_STAT_CHECK_CONDITION; } EXPORT_SYMBOL_GPL(scsi_build_sense); + +#ifdef CONFIG_SCSI_LIB_KUNIT_TEST +#include "scsi_lib_test.c" +#endif |
