diff options
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
| -rw-r--r-- | drivers/scsi/scsi_lib.c | 1755 |
1 files changed, 1065 insertions, 690 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0ba7a65e7c8d..51ad2ad07e43 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -21,8 +21,9 @@ #include <linux/hardirq.h> #include <linux/scatterlist.h> #include <linux/blk-mq.h> +#include <linux/blk-integrity.h> #include <linux/ratelimit.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -31,7 +32,7 @@ #include <scsi/scsi_driver.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> -#include <scsi/scsi_transport.h> /* __scsi_init_queue() */ +#include <scsi/scsi_transport.h> /* scsi_init_limits() */ #include <scsi/scsi_dh.h> #include <trace/events/scsi.h> @@ -52,51 +53,17 @@ #define SCSI_INLINE_SG_CNT 2 #endif -static struct kmem_cache *scsi_sdb_cache; static struct kmem_cache *scsi_sense_cache; -static struct kmem_cache *scsi_sense_isadma_cache; static DEFINE_MUTEX(scsi_sense_cache_mutex); static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd); -static inline struct kmem_cache * -scsi_select_sense_cache(bool unchecked_isa_dma) -{ - return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache; -} - -static void scsi_free_sense_buffer(bool unchecked_isa_dma, - unsigned char *sense_buffer) -{ - kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma), - sense_buffer); -} - -static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma, - gfp_t gfp_mask, int numa_node) -{ - return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma), - gfp_mask, numa_node); -} - int scsi_init_sense_cache(struct Scsi_Host *shost) { - struct kmem_cache *cache; int ret = 0; mutex_lock(&scsi_sense_cache_mutex); - cache = scsi_select_sense_cache(shost->unchecked_isa_dma); - if (cache) - goto exit; - - if (shost->unchecked_isa_dma) { - scsi_sense_isadma_cache = - kmem_cache_create("scsi_sense_cache(DMA)", - SCSI_SENSE_BUFFERSIZE, 0, - SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL); - if (!scsi_sense_isadma_cache) - ret = -ENOMEM; - } else { + if (!scsi_sense_cache) { scsi_sense_cache = kmem_cache_create_usercopy("scsi_sense_cache", SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, @@ -104,18 +71,10 @@ int scsi_init_sense_cache(struct Scsi_Host *shost) if (!scsi_sense_cache) ret = -ENOMEM; } - exit: mutex_unlock(&scsi_sense_cache_mutex); return ret; } -/* - * When to reinvoke queueing after a resource shortage. It's 3 msecs to - * not change behaviour from the previous unplug mechanism, experimentation - * may prove this needs changing. - */ -#define SCSI_QUEUE_DELAY 3 - static void scsi_set_blocked(struct scsi_cmnd *cmd, int reason) { @@ -152,15 +111,20 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason) } } -static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd) +static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs) { - if (cmd->request->rq_flags & RQF_DONTPREP) { - cmd->request->rq_flags &= ~RQF_DONTPREP; + struct request *rq = scsi_cmd_to_rq(cmd); + + if (rq->rq_flags & RQF_DONTPREP) { + rq->rq_flags &= ~RQF_DONTPREP; scsi_mq_uninit_cmd(cmd); } else { WARN_ON_ONCE(true); } - blk_mq_requeue_request(cmd->request, true); + + blk_mq_requeue_request(rq, false); + if (!scsi_host_in_recovery(cmd->device->host)) + blk_mq_delay_kick_requeue_list(rq->q, msecs); } /** @@ -195,11 +159,12 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) * Requeue this command. It will go before all other commands * that are already in the queue. Schedule requeue work under * lock such that the kblockd_schedule_work() call happens - * before blk_cleanup_queue() finishes. + * before blk_mq_destroy_queue() finishes. */ cmd->result = 0; - blk_mq_requeue_request(cmd->request, true); + blk_mq_requeue_request(scsi_cmd_to_rq(cmd), + !scsi_host_in_recovery(cmd->device->host)); } /** @@ -219,57 +184,156 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) __scsi_queue_insert(cmd, reason, true); } +/** + * scsi_failures_reset_retries - reset all failures to zero + * @failures: &struct scsi_failures with specific failure modes set + */ +void scsi_failures_reset_retries(struct scsi_failures *failures) +{ + struct scsi_failure *failure; + + failures->total_retries = 0; + + for (failure = failures->failure_definitions; failure->result; + failure++) + failure->retries = 0; +} +EXPORT_SYMBOL_GPL(scsi_failures_reset_retries); + +/** + * scsi_check_passthrough - Determine if passthrough scsi_cmnd needs a retry. + * @scmd: scsi_cmnd to check. + * @failures: scsi_failures struct that lists failures to check for. + * + * Returns -EAGAIN if the caller should retry else 0. + */ +static int scsi_check_passthrough(struct scsi_cmnd *scmd, + struct scsi_failures *failures) +{ + struct scsi_failure *failure; + struct scsi_sense_hdr sshdr; + enum sam_status status; + + if (!scmd->result) + return 0; + + if (!failures) + return 0; + + for (failure = failures->failure_definitions; failure->result; + failure++) { + if (failure->result == SCMD_FAILURE_RESULT_ANY) + goto maybe_retry; + + if (host_byte(scmd->result) && + host_byte(scmd->result) == host_byte(failure->result)) + goto maybe_retry; + + status = status_byte(scmd->result); + if (!status) + continue; + + if (failure->result == SCMD_FAILURE_STAT_ANY && + !scsi_status_is_good(scmd->result)) + goto maybe_retry; + + if (status != status_byte(failure->result)) + continue; + + if (status_byte(failure->result) != SAM_STAT_CHECK_CONDITION || + failure->sense == SCMD_FAILURE_SENSE_ANY) + goto maybe_retry; + + if (!scsi_command_normalize_sense(scmd, &sshdr)) + return 0; + + if (failure->sense != sshdr.sense_key) + continue; + + if (failure->asc == SCMD_FAILURE_ASC_ANY) + goto maybe_retry; + + if (failure->asc != sshdr.asc) + continue; + + if (failure->ascq == SCMD_FAILURE_ASCQ_ANY || + failure->ascq == sshdr.ascq) + goto maybe_retry; + } + + return 0; + +maybe_retry: + if (failure->allowed) { + if (failure->allowed == SCMD_FAILURE_NO_LIMIT || + ++failure->retries <= failure->allowed) + return -EAGAIN; + } else { + if (failures->total_allowed == SCMD_FAILURE_NO_LIMIT || + ++failures->total_retries <= failures->total_allowed) + return -EAGAIN; + } + + return 0; +} /** - * __scsi_execute - insert request and wait for the result - * @sdev: scsi device + * scsi_execute_cmd - insert request and wait for the result + * @sdev: scsi_device * @cmd: scsi command - * @data_direction: data direction + * @opf: block layer request cmd_flags * @buffer: data buffer * @bufflen: len of buffer - * @sense: optional sense buffer - * @sshdr: optional decoded sense header - * @timeout: request timeout in seconds - * @retries: number of times to retry request - * @flags: flags for ->cmd_flags - * @rq_flags: flags for ->rq_flags - * @resid: optional residual length + * @timeout: request timeout in HZ + * @ml_retries: number of times SCSI midlayer will retry request + * @args: Optional args. See struct definition for field descriptions * * Returns the scsi_cmnd result field if a command was executed, or a negative * Linux error code if we didn't get that far. */ -int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, - int data_direction, void *buffer, unsigned bufflen, - unsigned char *sense, struct scsi_sense_hdr *sshdr, - int timeout, int retries, u64 flags, req_flags_t rq_flags, - int *resid) +int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd, + blk_opf_t opf, void *buffer, unsigned int bufflen, + int timeout, int ml_retries, + const struct scsi_exec_args *args) { + static const struct scsi_exec_args default_args; struct request *req; - struct scsi_request *rq; - int ret = DRIVER_ERROR << 24; + struct scsi_cmnd *scmd; + int ret; - req = blk_get_request(sdev->request_queue, - data_direction == DMA_TO_DEVICE ? - REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT); - if (IS_ERR(req)) - return ret; - rq = scsi_req(req); + if (!args) + args = &default_args; + else if (WARN_ON_ONCE(args->sense && + args->sense_len != SCSI_SENSE_BUFFERSIZE)) + return -EINVAL; - if (bufflen && blk_rq_map_kern(sdev->request_queue, req, - buffer, bufflen, GFP_NOIO)) - goto out; +retry: + req = scsi_alloc_request(sdev->request_queue, opf, args->req_flags); + if (IS_ERR(req)) + return PTR_ERR(req); - rq->cmd_len = COMMAND_SIZE(cmd[0]); - memcpy(rq->cmd, cmd, rq->cmd_len); - rq->retries = retries; + if (bufflen) { + ret = blk_rq_map_kern(req, buffer, bufflen, GFP_NOIO); + if (ret) + goto out; + } + scmd = blk_mq_rq_to_pdu(req); + scmd->cmd_len = COMMAND_SIZE(cmd[0]); + memcpy(scmd->cmnd, cmd, scmd->cmd_len); + scmd->allowed = ml_retries; + scmd->flags |= args->scmd_flags; req->timeout = timeout; - req->cmd_flags |= flags; - req->rq_flags |= rq_flags | RQF_QUIET; + req->rq_flags |= RQF_QUIET; /* * head injection *required* here otherwise quiesce won't work */ - blk_execute_rq(req->q, NULL, req, 1); + blk_execute_rq(req, true); + + if (scsi_check_passthrough(scmd, args->failures) == -EAGAIN) { + blk_mq_free_request(req); + goto retry; + } /* * Some devices (USB mass-storage in particular) may transfer @@ -277,37 +341,24 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, * is invalid. Prevent the garbage from being misinterpreted * and prevent security leaks by zeroing out the excess data. */ - if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen)) - memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len); - - if (resid) - *resid = rq->resid_len; - if (sense && rq->sense_len) - memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE); - if (sshdr) - scsi_normalize_sense(rq->sense, rq->sense_len, sshdr); - ret = rq->result; + if (unlikely(scmd->resid_len > 0 && scmd->resid_len <= bufflen)) + memset(buffer + bufflen - scmd->resid_len, 0, scmd->resid_len); + + if (args->resid) + *args->resid = scmd->resid_len; + if (args->sense) + memcpy(args->sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); + if (args->sshdr) + scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len, + args->sshdr); + + ret = scmd->result; out: - blk_put_request(req); + blk_mq_free_request(req); return ret; } -EXPORT_SYMBOL(__scsi_execute); - -/** - * scsi_init_cmd_errh - Initialize cmd fields related to error handling. - * @cmd: command that is ready to be queued. - * - * This function has the job of initializing a number of fields related to error - * handling. Typically this will be called once for each command, as required. - */ -static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) -{ - scsi_set_resid(cmd, 0); - memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); - if (cmd->cmd_len == 0) - cmd->cmd_len = scsi_command_size(cmd->cmnd); -} +EXPORT_SYMBOL(scsi_execute_cmd); /* * Wake up the error handler if necessary. Avoid as follows that the error @@ -325,9 +376,11 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd) rcu_read_lock(); __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state); if (unlikely(scsi_host_in_recovery(shost))) { + unsigned int busy = scsi_host_busy(shost); + spin_lock_irqsave(shost->host_lock, flags); if (shost->host_failed || shost->host_eh_scheduled) - scsi_eh_wakeup(shost); + scsi_eh_wakeup(shost, busy); spin_unlock_irqrestore(shost->host_lock, flags); } rcu_read_unlock(); @@ -343,12 +396,21 @@ void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd) if (starget->can_queue > 0) atomic_dec(&starget->target_busy); - atomic_dec(&sdev->device_busy); + if (sdev->budget_map.map) + sbitmap_put(&sdev->budget_map, cmd->budget_token); + cmd->budget_token = -1; } -static void scsi_kick_queue(struct request_queue *q) +/* + * Kick the queue of SCSI device @sdev if @sdev != current_sdev. Called with + * interrupts disabled. + */ +static void scsi_kick_sdev_queue(struct scsi_device *sdev, void *data) { - blk_mq_run_hw_queues(q, false); + struct scsi_device *current_sdev = data; + + if (sdev != current_sdev) + blk_mq_run_hw_queues(sdev->request_queue, true); } /* @@ -361,7 +423,6 @@ static void scsi_kick_queue(struct request_queue *q) static void scsi_single_lun_run(struct scsi_device *current_sdev) { struct Scsi_Host *shost = current_sdev->host; - struct scsi_device *sdev, *tmp; struct scsi_target *starget = scsi_target(current_sdev); unsigned long flags; @@ -375,31 +436,19 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) * but in most cases, we will be first. Ideally, each LU on the * target would get some limited time or requests on the target. */ - scsi_kick_queue(current_sdev->request_queue); + blk_mq_run_hw_queues(current_sdev->request_queue, + shost->queuecommand_may_block); spin_lock_irqsave(shost->host_lock, flags); - if (starget->starget_sdev_user) - goto out; - list_for_each_entry_safe(sdev, tmp, &starget->devices, - same_target_siblings) { - if (sdev == current_sdev) - continue; - if (scsi_device_get(sdev)) - continue; - - spin_unlock_irqrestore(shost->host_lock, flags); - scsi_kick_queue(sdev->request_queue); - spin_lock_irqsave(shost->host_lock, flags); - - scsi_device_put(sdev); - } - out: + if (!starget->starget_sdev_user) + __starget_for_each_device(starget, current_sdev, + scsi_kick_sdev_queue); spin_unlock_irqrestore(shost->host_lock, flags); } static inline bool scsi_device_is_busy(struct scsi_device *sdev) { - if (atomic_read(&sdev->device_busy) >= sdev->queue_depth) + if (scsi_device_busy(sdev) >= sdev->queue_depth) return true; if (atomic_read(&sdev->device_blocked) > 0) return true; @@ -466,16 +515,16 @@ static void scsi_starved_list_run(struct Scsi_Host *shost) * it and the queue. Mitigate by taking a reference to the * queue and never touching the sdev again after we drop the * host lock. Note: if __scsi_remove_device() invokes - * blk_cleanup_queue() before the queue is run from this + * blk_mq_destroy_queue() before the queue is run from this * function then blk_run_queue() will return immediately since - * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. + * blk_mq_destroy_queue() marks the queue with QUEUE_FLAG_DYING. */ slq = sdev->request_queue; if (!blk_get_queue(slq)) continue; spin_unlock_irqrestore(shost->host_lock, flags); - scsi_kick_queue(slq); + blk_mq_run_hw_queues(slq, false); blk_put_queue(slq); spin_lock_irqsave(shost->host_lock, flags); @@ -500,7 +549,8 @@ static void scsi_run_queue(struct request_queue *q) if (!list_empty(&sdev->host->starved_list)) scsi_starved_list_run(sdev->host); - blk_mq_run_hw_queues(q, false); + /* Note: blk_mq_kick_requeue_list() runs the queue asynchronously. */ + blk_mq_kick_requeue_list(q); } void scsi_requeue_run_queue(struct work_struct *work) @@ -523,7 +573,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost) static void scsi_uninit_cmd(struct scsi_cmnd *cmd) { - if (!blk_rq_is_passthrough(cmd->request)) { + if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) { struct scsi_driver *drv = scsi_cmd_to_driver(cmd); if (drv->uninit_command) @@ -531,7 +581,7 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd) } } -static void scsi_free_sgtables(struct scsi_cmnd *cmd) +void scsi_free_sgtables(struct scsi_cmnd *cmd) { if (cmd->sdb.table.nents) sg_free_table_chained(&cmd->sdb.table, @@ -540,6 +590,7 @@ static void scsi_free_sgtables(struct scsi_cmnd *cmd) sg_free_table_chained(&cmd->prot_sdb->table, SCSI_INLINE_PROT_SG_CNT); } +EXPORT_SYMBOL_GPL(scsi_free_sgtables); static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) { @@ -547,6 +598,35 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) scsi_uninit_cmd(cmd); } +static void scsi_run_queue_async(struct scsi_device *sdev) +{ + if (scsi_host_in_recovery(sdev->host)) + return; + + if (scsi_target(sdev)->single_lun || + !list_empty(&sdev->host->starved_list)) { + kblockd_schedule_work(&sdev->requeue_work); + } else { + /* + * smp_mb() present in sbitmap_queue_clear() or implied in + * .end_io is for ordering writing .device_busy in + * scsi_device_unbusy() and reading sdev->restarts. + */ + int old = atomic_read(&sdev->restarts); + + /* + * ->restarts has to be kept as non-zero if new budget + * contention occurs. + * + * No need to run queue when either another re-run + * queue wins in updating ->restarts or a new budget + * contention occurs. + */ + if (old && atomic_cmpxchg(&sdev->restarts, old, 0) == old) + blk_mq_run_hw_queues(sdev->request_queue, true); + } +} + /* Returns false when no more bytes to process, true if there are more */ static bool scsi_end_request(struct request *req, blk_status_t error, unsigned int bytes) @@ -558,13 +638,12 @@ static bool scsi_end_request(struct request *req, blk_status_t error, if (blk_update_request(req, error, bytes)) return true; - if (blk_queue_add_random(q)) - add_disk_randomness(req->rq_disk); + if (q->limits.features & BLK_FEAT_ADD_RANDOM) + add_disk_randomness(req->q->disk); - if (!blk_rq_is_scsi(req)) { - WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); - cmd->flags &= ~SCMD_INITIALIZED; - } + WARN_ON_ONCE(!blk_rq_is_passthrough(req) && + !(cmd->flags & SCMD_INITIALIZED)); + cmd->flags = 0; /* * Calling rcu_barrier() is not necessary here because the @@ -591,11 +670,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error, __blk_mq_end_request(req, error); - if (scsi_target(sdev)->single_lun || - !list_empty(&sdev->host->starved_list)) - kblockd_schedule_work(&sdev->requeue_work); - else - blk_mq_run_hw_queues(q, true); + scsi_run_queue_async(sdev); percpu_ref_put(&q->q_usage_counter); return false; @@ -603,60 +678,116 @@ static bool scsi_end_request(struct request *req, blk_status_t error, /** * scsi_result_to_blk_status - translate a SCSI result code into blk_status_t - * @cmd: SCSI command * @result: scsi error code * - * Translate a SCSI result code into a blk_status_t value. May reset the host - * byte of @cmd->result. + * Translate a SCSI result code into a blk_status_t value. */ -static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result) +static blk_status_t scsi_result_to_blk_status(int result) { + /* + * Check the scsi-ml byte first in case we converted a host or status + * byte. + */ + switch (scsi_ml_byte(result)) { + case SCSIML_STAT_OK: + break; + case SCSIML_STAT_RESV_CONFLICT: + return BLK_STS_RESV_CONFLICT; + case SCSIML_STAT_NOSPC: + return BLK_STS_NOSPC; + case SCSIML_STAT_MED_ERROR: + return BLK_STS_MEDIUM; + case SCSIML_STAT_TGT_FAILURE: + return BLK_STS_TARGET; + case SCSIML_STAT_DL_TIMEOUT: + return BLK_STS_DURATION_LIMIT; + } + switch (host_byte(result)) { case DID_OK: - /* - * Also check the other bytes than the status byte in result - * to handle the case when a SCSI LLD sets result to - * DRIVER_SENSE << 24 without setting SAM_STAT_CHECK_CONDITION. - */ - if (scsi_status_is_good(result) && (result & ~0xff) == 0) + if (scsi_status_is_good(result)) return BLK_STS_OK; return BLK_STS_IOERR; case DID_TRANSPORT_FAILFAST: + case DID_TRANSPORT_MARGINAL: return BLK_STS_TRANSPORT; - case DID_TARGET_FAILURE: - set_host_byte(cmd, DID_OK); - return BLK_STS_TARGET; - case DID_NEXUS_FAILURE: - set_host_byte(cmd, DID_OK); - return BLK_STS_NEXUS; - case DID_ALLOC_FAILURE: - set_host_byte(cmd, DID_OK); - return BLK_STS_NOSPC; - case DID_MEDIUM_ERROR: - set_host_byte(cmd, DID_OK); - return BLK_STS_MEDIUM; default: return BLK_STS_IOERR; } } -/* Helper for scsi_io_completion() when "reprep" action required. */ -static void scsi_io_completion_reprep(struct scsi_cmnd *cmd, - struct request_queue *q) +/** + * scsi_rq_err_bytes - determine number of bytes till the next failure boundary + * @rq: request to examine + * + * Description: + * A request could be merge of IOs which require different failure + * handling. This function determines the number of bytes which + * can be failed from the beginning of the request without + * crossing into area which need to be retried further. + * + * Return: + * The number of bytes to fail. + */ +static unsigned int scsi_rq_err_bytes(const struct request *rq) +{ + blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK; + unsigned int bytes = 0; + struct bio *bio; + + if (!(rq->rq_flags & RQF_MIXED_MERGE)) + return blk_rq_bytes(rq); + + /* + * Currently the only 'mixing' which can happen is between + * different fastfail types. We can safely fail portions + * which have all the failfast bits that the first one has - + * the ones which are at least as eager to fail as the first + * one. + */ + for (bio = rq->bio; bio; bio = bio->bi_next) { + if ((bio->bi_opf & ff) != ff) + break; + bytes += bio->bi_iter.bi_size; + } + + /* this could lead to infinite loop */ + BUG_ON(blk_rq_bytes(rq) && !bytes); + return bytes; +} + +static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd) { - /* A new command will be prepared and issued. */ - scsi_mq_requeue_cmd(cmd); + struct request *req = scsi_cmd_to_rq(cmd); + unsigned long wait_for; + + if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT) + return false; + + wait_for = (cmd->allowed + 1) * req->timeout; + if (time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { + scmd_printk(KERN_ERR, cmd, "timing out command, waited %lus\n", + wait_for/HZ); + return true; + } + return false; } +/* + * When ALUA transition state is returned, reprep the cmd to + * use the ALUA handler's transition timeout. Delay the reprep + * 1 sec to avoid aggressive retries of the target in that + * state. + */ +#define ALUA_TRANSITION_REPREP_DELAY 1000 + /* Helper for scsi_io_completion() when special action required. */ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) { - struct request_queue *q = cmd->device->request_queue; - struct request *req = cmd->request; + struct request *req = scsi_cmd_to_rq(cmd); int level = 0; - enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, - ACTION_DELAYED_RETRY} action; - unsigned long wait_for = (cmd->allowed + 1) * req->timeout; + enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP, + ACTION_RETRY, ACTION_DELAYED_RETRY} action; struct scsi_sense_hdr sshdr; bool sense_valid; bool sense_current = true; /* false implies "deferred sense" */ @@ -666,7 +797,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) if (sense_valid) sense_current = !scsi_sense_is_deferred(&sshdr); - blk_stat = scsi_result_to_blk_status(cmd, result); + blk_stat = scsi_result_to_blk_status(result); if (host_byte(result) == DID_RESET) { /* Third party bus reset or reset for error recovery @@ -736,13 +867,23 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) case 0x07: /* operation in progress */ case 0x08: /* Long write in progress */ case 0x09: /* self test in progress */ + case 0x11: /* notify (enable spinup) required */ case 0x14: /* space allocation in progress */ case 0x1a: /* start stop unit in progress */ case 0x1b: /* sanitize in progress */ case 0x1d: /* configuration in progress */ - case 0x24: /* depopulation in progress */ action = ACTION_DELAYED_RETRY; break; + case 0x0a: /* ALUA state transition */ + action = ACTION_DELAYED_REPREP; + break; + /* + * Depopulation might take many hours, + * thus it is not worthwhile to retry. + */ + case 0x24: /* depopulation in progress */ + case 0x25: /* depopulation restore in progress */ + fallthrough; default: action = ACTION_FAIL; break; @@ -754,6 +895,17 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) /* See SSC3rXX or current. */ action = ACTION_FAIL; break; + case DATA_PROTECT: + action = ACTION_FAIL; + if ((sshdr.asc == 0x0C && sshdr.ascq == 0x12) || + (sshdr.asc == 0x55 && + (sshdr.ascq == 0x0E || sshdr.ascq == 0x0F))) { + /* Insufficient zone resources */ + blk_stat = BLK_STS_ZONE_OPEN_RESOURCE; + } + break; + case COMPLETED: + fallthrough; default: action = ACTION_FAIL; break; @@ -761,8 +913,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) } else action = ACTION_FAIL; - if (action != ACTION_FAIL && - time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) + if (action != ACTION_FAIL && scsi_cmd_runtime_exceeced(cmd)) action = ACTION_FAIL; switch (action) { @@ -784,16 +935,19 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) */ if (!level && __ratelimit(&_rs)) { scsi_print_result(cmd, NULL, FAILED); - if (driver_byte(result) == DRIVER_SENSE) + if (sense_valid) scsi_print_sense(cmd); scsi_print_command(cmd); } } - if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req))) + if (!scsi_end_request(req, blk_stat, scsi_rq_err_bytes(req))) return; - /*FALLTHRU*/ + fallthrough; case ACTION_REPREP: - scsi_io_completion_reprep(cmd, q); + scsi_mq_requeue_cmd(cmd, 0); + break; + case ACTION_DELAYED_REPREP: + scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY); break; case ACTION_RETRY: /* Retry the same command immediately */ @@ -816,7 +970,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result, { bool sense_valid; bool sense_current = true; /* false implies "deferred sense" */ - struct request *req = cmd->request; + struct request *req = scsi_cmd_to_rq(cmd); struct scsi_sense_hdr sshdr; sense_valid = scsi_command_normalize_sense(cmd, &sshdr); @@ -828,19 +982,18 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result, /* * SG_IO wants current and deferred errors */ - scsi_req(req)->sense_len = - min(8 + cmd->sense_buffer[7], - SCSI_SENSE_BUFFERSIZE); + cmd->sense_len = min(8 + cmd->sense_buffer[7], + SCSI_SENSE_BUFFERSIZE); } if (sense_current) - *blk_statp = scsi_result_to_blk_status(cmd, result); + *blk_statp = scsi_result_to_blk_status(result); } else if (blk_rq_bytes(req) == 0 && sense_current) { /* * Flush commands do not transfers any data, and thus cannot use * good_bytes != blk_rq_bytes(req) as the signal for an error. * This sets *blk_statp explicitly for the problem case. */ - *blk_statp = scsi_result_to_blk_status(cmd, result); + *blk_statp = scsi_result_to_blk_status(result); } /* * Recovered errors need reporting, but they're always treated as @@ -872,7 +1025,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result, * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related * intermediate statuses (both obsolete in SAM-4) as good. */ - if (status_byte(result) && scsi_status_is_good(result)) { + if ((result & 0xff) && scsi_status_is_good(result)) { result = 0; *blk_statp = BLK_STS_OK; } @@ -888,7 +1041,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result, * command block will be released and the queue function will be goosed. If we * are not done then we have to figure out what to do next: * - * a) We can call scsi_io_completion_reprep(). The request will be + * a) We can call scsi_mq_requeue_cmd(). The request will be * unprepared and put back on the queue. Then a new command will * be created for it. This should be used if we made forward * progress, or if we want to switch from READ(10) to READ(6) for @@ -904,20 +1057,12 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result, void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) { int result = cmd->result; - struct request_queue *q = cmd->device->request_queue; - struct request *req = cmd->request; + struct request *req = scsi_cmd_to_rq(cmd); blk_status_t blk_stat = BLK_STS_OK; if (unlikely(result)) /* a nz result may or may not be an error */ result = scsi_io_completion_nz_result(cmd, result, &blk_stat); - if (unlikely(blk_rq_is_passthrough(req))) { - /* - * scsi_result_to_blk_status may have reset the host_byte - */ - scsi_req(req)->result = cmd->result; - } - /* * Next deal with any sectors which we were able to correctly * handle. @@ -945,10 +1090,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) /* * If there had been no error, but we have leftover bytes in the - * requeues just queue the command up again. + * request just queue the command up again. */ if (likely(result == 0)) - scsi_io_completion_reprep(cmd, q); + scsi_mq_requeue_cmd(cmd, 0); else scsi_io_completion_action(cmd, result); } @@ -962,18 +1107,21 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev, } /** - * scsi_init_io - SCSI I/O initialization function. - * @cmd: command descriptor we wish to initialize + * scsi_alloc_sgtables - Allocate and initialize data and integrity scatterlists + * @cmd: SCSI command data structure to initialize. + * + * Initializes @cmd->sdb and also @cmd->prot_sdb if data integrity is enabled + * for @cmd. * * Returns: * * BLK_STS_OK - on success * * BLK_STS_RESOURCE - if the failure is retryable * * BLK_STS_IOERR - if the failure is fatal */ -blk_status_t scsi_init_io(struct scsi_cmnd *cmd) +blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; - struct request *rq = cmd->request; + struct request *rq = scsi_cmd_to_rq(cmd); unsigned short nr_segs = blk_rq_nr_phys_segments(rq); struct scatterlist *last_sg = NULL; blk_status_t ret; @@ -1001,11 +1149,11 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd) * Next, walk the list, and fill in the addresses and sizes of * each segment. */ - count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg); + count = __blk_rq_map_sg(rq, cmd->sdb.table.sgl, &last_sg); - if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) { + if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) { unsigned int pad_len = - (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; + (rq->q->limits.dma_pad_mask & ~blk_rq_bytes(rq)) + 1; last_sg->length += pad_len; cmd->extra_len += pad_len; @@ -1027,7 +1175,6 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd) if (blk_integrity_rq(rq)) { struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; - int ivecs; if (WARN_ON_ONCE(!prot_sdb)) { /* @@ -1039,20 +1186,15 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd) goto out_free_sgtables; } - ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); - - if (sg_alloc_table_chained(&prot_sdb->table, ivecs, + if (sg_alloc_table_chained(&prot_sdb->table, + rq->nr_integrity_segments, prot_sdb->table.sgl, SCSI_INLINE_PROT_SG_CNT)) { ret = BLK_STS_RESOURCE; goto out_free_sgtables; } - count = blk_rq_map_integrity_sg(rq->q, rq->bio, - prot_sdb->table.sgl); - BUG_ON(count > ivecs); - BUG_ON(count > queue_max_integrity_segments(rq->q)); - + count = blk_rq_map_integrity_sg(rq, prot_sdb->table.sgl); cmd->prot_sdb = prot_sdb; cmd->prot_sdb->table.nents = count; } @@ -1062,7 +1204,7 @@ out_free_sgtables: scsi_free_sgtables(cmd); return ret; } -EXPORT_SYMBOL(scsi_init_io); +EXPORT_SYMBOL(scsi_alloc_sgtables); /** * scsi_initialize_rq - initialize struct scsi_cmnd partially @@ -1071,28 +1213,52 @@ EXPORT_SYMBOL(scsi_init_io); * This function initializes the members of struct scsi_cmnd that must be * initialized before request processing starts and that won't be * reinitialized if a SCSI command is requeued. - * - * Called from inside blk_get_request() for pass-through requests and from - * inside scsi_init_command() for filesystem requests. */ static void scsi_initialize_rq(struct request *rq) { struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); - scsi_req_init(&cmd->req); + memset(cmd->cmnd, 0, sizeof(cmd->cmnd)); + cmd->cmd_len = MAX_COMMAND_SIZE; + cmd->sense_len = 0; init_rcu_head(&cmd->rcu); cmd->jiffies_at_alloc = jiffies; cmd->retries = 0; } +/** + * scsi_alloc_request - allocate a block request and partially + * initialize its &scsi_cmnd + * @q: the device's request queue + * @opf: the request operation code + * @flags: block layer allocation flags + * + * Return: &struct request pointer on success or %NULL on failure + */ +struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf, + blk_mq_req_flags_t flags) +{ + struct request *rq; + + rq = blk_mq_alloc_request(q, opf, flags); + if (!IS_ERR(rq)) + scsi_initialize_rq(rq); + return rq; +} +EXPORT_SYMBOL_GPL(scsi_alloc_request); + /* * Only called when the request isn't completed by SCSI, and not freed by * SCSI */ static void scsi_cleanup_rq(struct request *rq) { + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); + + cmd->flags = 0; + if (rq->rq_flags & RQF_DONTPREP) { - scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq)); + scsi_mq_uninit_cmd(cmd); rq->rq_flags &= ~RQF_DONTPREP; } } @@ -1100,42 +1266,16 @@ static void scsi_cleanup_rq(struct request *rq) /* Called before a request is prepared. See also scsi_mq_prep_fn(). */ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) { - void *buf = cmd->sense_buffer; - void *prot = cmd->prot_sdb; - struct request *rq = blk_mq_rq_from_pdu(cmd); - unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS; - unsigned long jiffies_at_alloc; - int retries, to_clear; - bool in_flight; + struct request *rq = scsi_cmd_to_rq(cmd); - if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) { - flags |= SCMD_INITIALIZED; + if (!blk_rq_is_passthrough(rq) && !(cmd->flags & SCMD_INITIALIZED)) { + cmd->flags |= SCMD_INITIALIZED; scsi_initialize_rq(rq); } - jiffies_at_alloc = cmd->jiffies_at_alloc; - retries = cmd->retries; - in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state); - /* - * Zero out the cmd, except for the embedded scsi_request. Only clear - * the driver-private command data if the LLD does not supply a - * function to initialize that data. - */ - to_clear = sizeof(*cmd) - sizeof(cmd->req); - if (!dev->host->hostt->init_cmd_priv) - to_clear += dev->host->hostt->cmd_size; - memset((char *)cmd + sizeof(cmd->req), 0, to_clear); - cmd->device = dev; - cmd->sense_buffer = buf; - cmd->prot_sdb = prot; - cmd->flags = flags; + INIT_LIST_HEAD(&cmd->eh_entry); INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); - cmd->jiffies_at_alloc = jiffies_at_alloc; - cmd->retries = retries; - if (in_flight) - __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); - } static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev, @@ -1150,7 +1290,7 @@ static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev, * submit a request without an attached bio. */ if (req->bio) { - blk_status_t ret = scsi_init_io(cmd); + blk_status_t ret = scsi_alloc_sgtables(cmd); if (unlikely(ret != BLK_STS_OK)) return ret; } else { @@ -1159,61 +1299,16 @@ static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev, memset(&cmd->sdb, 0, sizeof(cmd->sdb)); } - cmd->cmd_len = scsi_req(req)->cmd_len; - cmd->cmnd = scsi_req(req)->cmd; cmd->transfersize = blk_rq_bytes(req); - cmd->allowed = scsi_req(req)->retries; return BLK_STS_OK; } -/* - * Setup a normal block command. These are simple request from filesystems - * that still need to be translated to SCSI CDBs from the ULD. - */ -static blk_status_t scsi_setup_fs_cmnd(struct scsi_device *sdev, - struct request *req) -{ - struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); - - if (unlikely(sdev->handler && sdev->handler->prep_fn)) { - blk_status_t ret = sdev->handler->prep_fn(sdev, req); - if (ret != BLK_STS_OK) - return ret; - } - - cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd; - memset(cmd->cmnd, 0, BLK_MAX_CDB); - return scsi_cmd_to_driver(cmd)->init_command(cmd); -} - -static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev, - struct request *req) -{ - struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); - blk_status_t ret; - - if (!blk_rq_bytes(req)) - cmd->sc_data_direction = DMA_NONE; - else if (rq_data_dir(req) == WRITE) - cmd->sc_data_direction = DMA_TO_DEVICE; - else - cmd->sc_data_direction = DMA_FROM_DEVICE; - - if (blk_rq_is_scsi(req)) - ret = scsi_setup_scsi_cmnd(sdev, req); - else - ret = scsi_setup_fs_cmnd(sdev, req); - - if (ret != BLK_STS_OK) - scsi_free_sgtables(cmd); - - return ret; -} - static blk_status_t -scsi_prep_state_check(struct scsi_device *sdev, struct request *req) +scsi_device_state_check(struct scsi_device *sdev, struct request *req) { switch (sdev->sdev_state) { + case SDEV_CREATED: + return BLK_STS_OK; case SDEV_OFFLINE: case SDEV_TRANSPORT_OFFLINE: /* @@ -1240,55 +1335,56 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req) return BLK_STS_RESOURCE; case SDEV_QUIESCE: /* - * If the devices is blocked we defer normal commands. + * If the device is blocked we only accept power management + * commands. */ - if (req && !(req->rq_flags & RQF_PREEMPT)) + if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM))) return BLK_STS_RESOURCE; return BLK_STS_OK; default: /* * For any other not fully online state we only allow - * special commands. In particular any user initiated - * command is not allowed. + * power management commands. */ - if (req && !(req->rq_flags & RQF_PREEMPT)) - return BLK_STS_IOERR; + if (req && !(req->rq_flags & RQF_PM)) + return BLK_STS_OFFLINE; return BLK_STS_OK; } } /* - * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else - * return 0. - * - * Called with the queue_lock held. + * scsi_dev_queue_ready: if we can send requests to sdev, assign one token + * and return the token else return -1. */ static inline int scsi_dev_queue_ready(struct request_queue *q, struct scsi_device *sdev) { - unsigned int busy; + int token; - busy = atomic_inc_return(&sdev->device_busy) - 1; - if (atomic_read(&sdev->device_blocked)) { - if (busy) - goto out_dec; + if (!sdev->budget_map.map) + return INT_MAX; - /* - * unblock after device_blocked iterates to zero - */ - if (atomic_dec_return(&sdev->device_blocked) > 0) - goto out_dec; - SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, - "unblocking device at zero depth\n")); + token = sbitmap_get(&sdev->budget_map); + if (token < 0) + return -1; + + if (!atomic_read(&sdev->device_blocked)) + return token; + + /* + * Only unblock if no other commands are pending and + * if device_blocked has decreased to zero + */ + if (scsi_device_busy(sdev) > 1 || + atomic_dec_return(&sdev->device_blocked) > 0) { + sbitmap_put(&sdev->budget_map, token); + return -1; } - if (busy >= sdev->queue_depth) - goto out_dec; + SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, + "unblocking device at zero depth\n")); - return 1; -out_dec: - atomic_dec(&sdev->device_busy); - return 0; + return token; } /* @@ -1355,9 +1451,6 @@ static inline int scsi_host_queue_ready(struct request_queue *q, struct scsi_device *sdev, struct scsi_cmnd *cmd) { - if (scsi_host_in_recovery(shost)) - return 0; - if (atomic_read(&shost->host_blocked) > 0) { if (scsi_host_busy(shost) > 0) goto starved; @@ -1432,11 +1525,22 @@ static bool scsi_mq_lld_busy(struct request_queue *q) return false; } -static void scsi_softirq_done(struct request *rq) +/* + * Block layer request completion callback. May be called from interrupt + * context. + */ +static void scsi_complete(struct request *rq) { struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); - unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; - int disposition; + enum scsi_disposition disposition; + + if (blk_mq_is_reserved_rq(rq)) { + /* Only pass-through requests are supported in this code path. */ + WARN_ON_ONCE(!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))); + scsi_mq_uninit_cmd(cmd); + __blk_mq_end_request(rq, scsi_result_to_blk_status(cmd->result)); + return; + } INIT_LIST_HEAD(&cmd->eh_entry); @@ -1445,34 +1549,29 @@ static void scsi_softirq_done(struct request *rq) atomic_inc(&cmd->device->ioerr_cnt); disposition = scsi_decide_disposition(cmd); - if (disposition != SUCCESS && - time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { - scmd_printk(KERN_ERR, cmd, - "timing out command, waited %lus\n", - wait_for/HZ); + if (disposition != SUCCESS && scsi_cmd_runtime_exceeced(cmd)) disposition = SUCCESS; - } scsi_log_completion(cmd, disposition); switch (disposition) { - case SUCCESS: - scsi_finish_command(cmd); - break; - case NEEDS_RETRY: - scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); - break; - case ADD_TO_MLQUEUE: - scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); - break; - default: - scsi_eh_scmd_add(cmd); - break; + case SUCCESS: + scsi_finish_command(cmd); + break; + case NEEDS_RETRY: + scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); + break; + case ADD_TO_MLQUEUE: + scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); + break; + default: + scsi_eh_scmd_add(cmd); + break; } } /** - * scsi_dispatch_command - Dispatch a command to the low-level driver. + * scsi_dispatch_cmd - Dispatch a command to the low-level driver. * @cmd: command block we are dispatching. * * Return: nonzero return request was rejected and device's queue needs to be @@ -1505,6 +1604,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) */ SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, "queuecommand : device blocked\n")); + atomic_dec(&cmd->device->iorequest_cnt); return SCSI_MLQUEUE_DEVICE_BUSY; } @@ -1537,6 +1637,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) trace_scsi_dispatch_cmd_start(cmd); rtn = host->hostt->queuecommand(host, cmd); if (rtn) { + atomic_dec(&cmd->device->iorequest_cnt); trace_scsi_dispatch_cmd_error(cmd, rtn); if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && rtn != SCSI_MLQUEUE_TARGET_BUSY) @@ -1548,7 +1649,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) return rtn; done: - cmd->scsi_done(cmd); + scsi_done(cmd); return 0; } @@ -1559,18 +1660,35 @@ static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost) sizeof(struct scatterlist); } -static blk_status_t scsi_mq_prep_fn(struct request *req) +static blk_status_t scsi_prepare_cmd(struct request *req) { struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); struct scsi_device *sdev = req->q->queuedata; struct Scsi_Host *shost = sdev->host; + bool in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state); struct scatterlist *sg; scsi_init_command(sdev, cmd); - cmd->request = req; - cmd->tag = req->tag; + cmd->eh_eflags = 0; + cmd->prot_type = 0; + cmd->prot_flags = 0; + cmd->submitter = 0; + memset(&cmd->sdb, 0, sizeof(cmd->sdb)); + cmd->underflow = 0; + cmd->transfersize = 0; + cmd->host_scribble = NULL; + cmd->result = 0; + cmd->extra_len = 0; + cmd->state = 0; + if (in_flight) + __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); + cmd->prot_op = SCSI_PROT_NORMAL; + if (blk_rq_bytes(req)) + cmd->sc_data_direction = rq_dma_dir(req); + else + cmd->sc_data_direction = DMA_NONE; sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; cmd->sdb.table.sgl = sg; @@ -1582,41 +1700,121 @@ static blk_status_t scsi_mq_prep_fn(struct request *req) (struct scatterlist *)(cmd->prot_sdb + 1); } - blk_mq_start_request(req); + /* + * Special handling for passthrough commands, which don't go to the ULP + * at all: + */ + if (blk_rq_is_passthrough(req)) + return scsi_setup_scsi_cmnd(sdev, req); - return scsi_setup_cmnd(sdev, req); + if (sdev->handler && sdev->handler->prep_fn) { + blk_status_t ret = sdev->handler->prep_fn(sdev, req); + + if (ret != BLK_STS_OK) + return ret; + } + + /* Usually overridden by the ULP */ + cmd->allowed = 0; + memset(cmd->cmnd, 0, sizeof(cmd->cmnd)); + return scsi_cmd_to_driver(cmd)->init_command(cmd); } -static void scsi_mq_done(struct scsi_cmnd *cmd) +static void scsi_done_internal(struct scsi_cmnd *cmd, bool complete_directly) { + struct request *req = scsi_cmd_to_rq(cmd); + + switch (cmd->submitter) { + case SUBMITTED_BY_BLOCK_LAYER: + break; + case SUBMITTED_BY_SCSI_ERROR_HANDLER: + return scsi_eh_done(cmd); + case SUBMITTED_BY_SCSI_RESET_IOCTL: + return; + } + + if (unlikely(blk_should_fake_timeout(scsi_cmd_to_rq(cmd)->q))) + return; if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state))) return; trace_scsi_dispatch_cmd_done(cmd); - /* - * If the block layer didn't complete the request due to a timeout - * injection, scsi must clear its internal completed state so that the - * timeout handler will see it needs to escalate its own error - * recovery. - */ - if (unlikely(!blk_mq_complete_request(cmd->request))) - clear_bit(SCMD_STATE_COMPLETE, &cmd->state); + if (complete_directly) + blk_mq_complete_request_direct(req, scsi_complete); + else + blk_mq_complete_request(req); } -static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx) +void scsi_done(struct scsi_cmnd *cmd) +{ + scsi_done_internal(cmd, false); +} +EXPORT_SYMBOL(scsi_done); + +void scsi_done_direct(struct scsi_cmnd *cmd) +{ + scsi_done_internal(cmd, true); +} +EXPORT_SYMBOL(scsi_done_direct); + +static void scsi_mq_put_budget(struct request_queue *q, int budget_token) { - struct request_queue *q = hctx->queue; struct scsi_device *sdev = q->queuedata; - atomic_dec(&sdev->device_busy); + if (sdev->budget_map.map) + sbitmap_put(&sdev->budget_map, budget_token); } -static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx) +/* + * When to reinvoke queueing after a resource shortage. It's 3 msecs to + * not change behaviour from the previous unplug mechanism, experimentation + * may prove this needs changing. + */ +#define SCSI_QUEUE_DELAY 3 + +static int scsi_mq_get_budget(struct request_queue *q) { - struct request_queue *q = hctx->queue; struct scsi_device *sdev = q->queuedata; + int token = scsi_dev_queue_ready(q, sdev); + + if (token >= 0) + return token; + + atomic_inc(&sdev->restarts); + + /* + * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy). + * .restarts must be incremented before .device_busy is read because the + * code in scsi_run_queue_async() depends on the order of these operations. + */ + smp_mb__after_atomic(); - return scsi_dev_queue_ready(q, sdev); + /* + * If all in-flight requests originated from this LUN are completed + * before reading .device_busy, sdev->device_busy will be observed as + * zero, then blk_mq_delay_run_hw_queues() will dispatch this request + * soon. Otherwise, completion of one of these requests will observe + * the .restarts flag, and the request queue will be run for handling + * this request, see scsi_end_request(). + */ + if (unlikely(scsi_device_busy(sdev) == 0 && + !scsi_device_blocked(sdev))) + blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY); + return -1; +} + +static void scsi_mq_set_rq_budget_token(struct request *req, int token) +{ + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); + + cmd->budget_token = token; +} + +static int scsi_mq_get_rq_budget_token(struct request *req) +{ + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); + + return cmd->budget_token; } static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, @@ -1630,30 +1828,49 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, blk_status_t ret; int reason; + WARN_ON_ONCE(cmd->budget_token < 0); + /* - * If the device is not in running state we will reject some or all - * commands. + * Bypass the SCSI device, SCSI target and SCSI host checks for + * reserved commands. */ - if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { - ret = scsi_prep_state_check(sdev, req); - if (ret != BLK_STS_OK) + if (!blk_mq_is_reserved_rq(req)) { + /* + * If the device is not in running state we will reject some or + * all commands. + */ + if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { + ret = scsi_device_state_check(sdev, req); + if (ret != BLK_STS_OK) + goto out_put_budget; + } + + ret = BLK_STS_RESOURCE; + if (!scsi_target_queue_ready(shost, sdev)) goto out_put_budget; + if (unlikely(scsi_host_in_recovery(shost))) { + if (cmd->flags & SCMD_FAIL_IF_RECOVERING) + ret = BLK_STS_OFFLINE; + goto out_dec_target_busy; + } + if (!scsi_host_queue_ready(q, shost, sdev, cmd)) + goto out_dec_target_busy; } - ret = BLK_STS_RESOURCE; - if (!scsi_target_queue_ready(shost, sdev)) - goto out_put_budget; - if (!scsi_host_queue_ready(q, shost, sdev, cmd)) - goto out_dec_target_busy; + /* + * Only clear the driver-private command data if the LLD does not supply + * a function to initialize that data. + */ + if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv) + memset(scsi_cmd_priv(cmd), 0, shost->hostt->cmd_size); if (!(req->rq_flags & RQF_DONTPREP)) { - ret = scsi_mq_prep_fn(req); + ret = scsi_prepare_cmd(req); if (ret != BLK_STS_OK) goto out_dec_host_busy; req->rq_flags |= RQF_DONTPREP; } else { clear_bit(SCMD_STATE_COMPLETE, &cmd->state); - blk_mq_start_request(req); } cmd->flags &= SCMD_PRESERVED_FLAGS; @@ -1662,9 +1879,19 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, if (bd->last) cmd->flags |= SCMD_LAST; - scsi_init_cmd_errh(cmd); - cmd->scsi_done = scsi_mq_done; + scsi_set_resid(cmd, 0); + memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + cmd->submitter = SUBMITTED_BY_BLOCK_LAYER; + blk_mq_start_request(req); + if (blk_mq_is_reserved_rq(req)) { + reason = shost->hostt->queue_reserved_command(shost, cmd); + if (reason) { + ret = BLK_STS_RESOURCE; + goto out_put_budget; + } + return BLK_STS_OK; + } reason = scsi_dispatch_cmd(cmd); if (reason) { scsi_set_blocked(cmd, reason); @@ -1680,21 +1907,25 @@ out_dec_target_busy: if (scsi_target(sdev)->can_queue > 0) atomic_dec(&scsi_target(sdev)->target_busy); out_put_budget: - scsi_mq_put_budget(hctx); + scsi_mq_put_budget(q, cmd->budget_token); + cmd->budget_token = -1; switch (ret) { case BLK_STS_OK: break; case BLK_STS_RESOURCE: - case BLK_STS_ZONE_RESOURCE: - if (atomic_read(&sdev->device_busy) || - scsi_device_blocked(sdev)) + if (scsi_device_blocked(sdev)) ret = BLK_STS_DEV_RESOURCE; break; + case BLK_STS_AGAIN: + cmd->result = DID_BUS_BUSY << 16; + if (req->rq_flags & RQF_DONTPREP) + scsi_mq_uninit_cmd(cmd); + break; default: if (unlikely(!scsi_device_online(sdev))) - scsi_req(req)->result = DID_NO_CONNECT << 16; + cmd->result = DID_NO_CONNECT << 16; else - scsi_req(req)->result = DID_ERROR << 16; + cmd->result = DID_ERROR << 16; /* * Make sure to release all allocated resources when * we hit an error, as we will never see this command @@ -1702,35 +1933,24 @@ out_put_budget: */ if (req->rq_flags & RQF_DONTPREP) scsi_mq_uninit_cmd(cmd); + scsi_run_queue_async(sdev); break; } return ret; } -static enum blk_eh_timer_return scsi_timeout(struct request *req, - bool reserved) -{ - if (reserved) - return BLK_EH_RESET_TIMER; - return scsi_times_out(req); -} - static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node) { struct Scsi_Host *shost = set->driver_data; - const bool unchecked_isa_dma = shost->unchecked_isa_dma; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); struct scatterlist *sg; int ret = 0; - if (unchecked_isa_dma) - cmd->flags |= SCMD_UNCHECKED_ISA_DMA; - cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, - GFP_KERNEL, numa_node); + cmd->sense_buffer = + kmem_cache_alloc_node(scsi_sense_cache, GFP_KERNEL, numa_node); if (!cmd->sense_buffer) return -ENOMEM; - cmd->req.sense = cmd->sense_buffer; if (scsi_host_get_prot(shost)) { sg = (void *)cmd + sizeof(struct scsi_cmnd) + @@ -1741,8 +1961,7 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, if (shost->hostt->init_cmd_priv) { ret = shost->hostt->init_cmd_priv(shost, cmd); if (ret < 0) - scsi_free_sense_buffer(unchecked_isa_dma, - cmd->sense_buffer); + kmem_cache_free(scsi_sense_cache, cmd->sense_buffer); } return ret; @@ -1756,85 +1975,97 @@ static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq, if (shost->hostt->exit_cmd_priv) shost->hostt->exit_cmd_priv(shost, cmd); - scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA, - cmd->sense_buffer); + kmem_cache_free(scsi_sense_cache, cmd->sense_buffer); } -static int scsi_map_queues(struct blk_mq_tag_set *set) + +static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) +{ + struct Scsi_Host *shost = hctx->driver_data; + + if (shost->hostt->mq_poll) + return shost->hostt->mq_poll(shost, hctx->queue_num); + + return 0; +} + +static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + struct Scsi_Host *shost = data; + + hctx->driver_data = shost; + return 0; +} + +static void scsi_map_queues(struct blk_mq_tag_set *set) { struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set); if (shost->hostt->map_queues) return shost->hostt->map_queues(shost); - return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); + blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); } -void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) +void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim) { struct device *dev = shost->dma_dev; - /* - * this limit is imposed by hardware restrictions - */ - blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, - SG_MAX_SEGMENTS)); + memset(lim, 0, sizeof(*lim)); + lim->max_segments = + min_t(unsigned short, shost->sg_tablesize, SG_MAX_SEGMENTS); if (scsi_host_prot_dma(shost)) { shost->sg_prot_tablesize = min_not_zero(shost->sg_prot_tablesize, (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); - blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); + lim->max_integrity_segments = shost->sg_prot_tablesize; } - if (dev->dma_mask) { - shost->max_sectors = min_t(unsigned int, shost->max_sectors, - dma_max_mapping_size(dev) >> SECTOR_SHIFT); - } - blk_queue_max_hw_sectors(q, shost->max_sectors); - if (shost->unchecked_isa_dma) - blk_queue_bounce_limit(q, BLK_BOUNCE_ISA); - blk_queue_segment_boundary(q, shost->dma_boundary); - dma_set_seg_boundary(dev, shost->dma_boundary); - - blk_queue_max_segment_size(q, shost->max_segment_size); - blk_queue_virt_boundary(q, shost->virt_boundary_mask); - dma_set_max_seg_size(dev, queue_max_segment_size(q)); + lim->max_hw_sectors = shost->max_sectors; + lim->seg_boundary_mask = shost->dma_boundary; + lim->max_segment_size = shost->max_segment_size; + lim->virt_boundary_mask = shost->virt_boundary_mask; + lim->dma_alignment = max_t(unsigned int, + shost->dma_alignment, dma_get_cache_alignment() - 1); /* - * Set a reasonable default alignment: The larger of 32-byte (dword), - * which is a common minimum for HBAs, and the minimum DMA alignment, - * which is set by the platform. - * - * Devices that require a bigger alignment can increase it later. + * Propagate the DMA formation properties to the dma-mapping layer as + * a courtesy service to the LLDDs. This needs to check that the buses + * actually support the DMA API first, though. */ - blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1); + if (dev->dma_parms) { + dma_set_seg_boundary(dev, shost->dma_boundary); + dma_set_max_seg_size(dev, shost->max_segment_size); + } } -EXPORT_SYMBOL_GPL(__scsi_init_queue); +EXPORT_SYMBOL_GPL(scsi_init_limits); static const struct blk_mq_ops scsi_mq_ops_no_commit = { .get_budget = scsi_mq_get_budget, .put_budget = scsi_mq_put_budget, .queue_rq = scsi_queue_rq, - .complete = scsi_softirq_done, + .complete = scsi_complete, .timeout = scsi_timeout, #ifdef CONFIG_BLK_DEBUG_FS .show_rq = scsi_show_rq, #endif .init_request = scsi_mq_init_request, .exit_request = scsi_mq_exit_request, - .initialize_rq_fn = scsi_initialize_rq, .cleanup_rq = scsi_cleanup_rq, .busy = scsi_mq_lld_busy, .map_queues = scsi_map_queues, + .init_hctx = scsi_init_hctx, + .poll = scsi_mq_poll, + .set_rq_budget_token = scsi_mq_set_rq_budget_token, + .get_rq_budget_token = scsi_mq_get_rq_budget_token, }; static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx) { - struct request_queue *q = hctx->queue; - struct scsi_device *sdev = q->queuedata; - struct Scsi_Host *shost = sdev->host; + struct Scsi_Host *shost = hctx->driver_data; shost->hostt->commit_rqs(shost, hctx->queue_num); } @@ -1844,31 +2075,22 @@ static const struct blk_mq_ops scsi_mq_ops = { .put_budget = scsi_mq_put_budget, .queue_rq = scsi_queue_rq, .commit_rqs = scsi_commit_rqs, - .complete = scsi_softirq_done, + .complete = scsi_complete, .timeout = scsi_timeout, #ifdef CONFIG_BLK_DEBUG_FS .show_rq = scsi_show_rq, #endif .init_request = scsi_mq_init_request, .exit_request = scsi_mq_exit_request, - .initialize_rq_fn = scsi_initialize_rq, .cleanup_rq = scsi_cleanup_rq, .busy = scsi_mq_lld_busy, .map_queues = scsi_map_queues, + .init_hctx = scsi_init_hctx, + .poll = scsi_mq_poll, + .set_rq_budget_token = scsi_mq_set_rq_budget_token, + .get_rq_budget_token = scsi_mq_get_rq_budget_token, }; -struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev) -{ - sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set); - if (IS_ERR(sdev->request_queue)) - return NULL; - - sdev->request_queue->queuedata = sdev; - __scsi_init_queue(sdev->host, sdev->request_queue); - blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, sdev->request_queue); - return sdev->request_queue; -} - int scsi_mq_setup_tags(struct Scsi_Host *shost) { unsigned int cmd_size, sgl_size; @@ -1887,21 +2109,68 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost) else tag_set->ops = &scsi_mq_ops_no_commit; tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1; - tag_set->queue_depth = shost->can_queue; + tag_set->nr_maps = shost->nr_maps ? : 1; + tag_set->queue_depth = shost->can_queue + shost->nr_reserved_cmds; + tag_set->reserved_tags = shost->nr_reserved_cmds; tag_set->cmd_size = cmd_size; - tag_set->numa_node = NUMA_NO_NODE; - tag_set->flags = BLK_MQ_F_SHOULD_MERGE; - tag_set->flags |= - BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); + tag_set->numa_node = dev_to_node(shost->dma_dev); + if (shost->hostt->tag_alloc_policy_rr) + tag_set->flags |= BLK_MQ_F_TAG_RR; + if (shost->queuecommand_may_block) + tag_set->flags |= BLK_MQ_F_BLOCKING; tag_set->driver_data = shost; + if (shost->host_tagset) + tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED; return blk_mq_alloc_tag_set(tag_set); } -void scsi_mq_destroy_tags(struct Scsi_Host *shost) +void scsi_mq_free_tags(struct kref *kref) { + struct Scsi_Host *shost = container_of(kref, typeof(*shost), + tagset_refcnt); + blk_mq_free_tag_set(&shost->tag_set); + complete(&shost->tagset_freed); +} + +/** + * scsi_get_internal_cmd() - Allocate an internal SCSI command. + * @sdev: SCSI device from which to allocate the command + * @data_direction: Data direction for the allocated command + * @flags: request allocation flags, e.g. BLK_MQ_REQ_RESERVED or + * BLK_MQ_REQ_NOWAIT. + * + * Allocates a SCSI command for internal LLDD use. + */ +struct scsi_cmnd *scsi_get_internal_cmd(struct scsi_device *sdev, + enum dma_data_direction data_direction, + blk_mq_req_flags_t flags) +{ + enum req_op op = data_direction == DMA_TO_DEVICE ? REQ_OP_DRV_OUT : + REQ_OP_DRV_IN; + struct scsi_cmnd *scmd; + struct request *rq; + + rq = scsi_alloc_request(sdev->request_queue, op, flags); + if (IS_ERR(rq)) + return NULL; + scmd = blk_mq_rq_to_pdu(rq); + scmd->device = sdev; + + return scmd; +} +EXPORT_SYMBOL_GPL(scsi_get_internal_cmd); + +/** + * scsi_put_internal_cmd() - Free an internal SCSI command. + * @scmd: SCSI command to be freed + */ +void scsi_put_internal_cmd(struct scsi_cmnd *scmd) +{ + blk_mq_free_request(blk_mq_rq_from_pdu(scmd)); } +EXPORT_SYMBOL_GPL(scsi_put_internal_cmd); /** * scsi_device_from_queue - return sdev associated with a request_queue @@ -1922,7 +2191,14 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q) return sdev; } +/* + * pktcdvd should have been integrated into the SCSI layers, but for historical + * reasons like the old IDE driver it isn't. This export allows it to safely + * probe if a given device is a SCSI one and only attach to that. + */ +#ifdef CONFIG_CDROM_PKTCDVD_MODULE EXPORT_SYMBOL_GPL(scsi_device_from_queue); +#endif /** * scsi_block_requests - Utility function used by low-level drivers to prevent @@ -1955,24 +2231,9 @@ void scsi_unblock_requests(struct Scsi_Host *shost) } EXPORT_SYMBOL(scsi_unblock_requests); -int __init scsi_init_queue(void) -{ - scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", - sizeof(struct scsi_data_buffer), - 0, 0, NULL); - if (!scsi_sdb_cache) { - printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); - return -ENOMEM; - } - - return 0; -} - void scsi_exit_queue(void) { kmem_cache_destroy(scsi_sense_cache); - kmem_cache_destroy(scsi_sense_isadma_cache); - kmem_cache_destroy(scsi_sdb_cache); } /** @@ -1980,7 +2241,6 @@ void scsi_exit_queue(void) * @sdev: SCSI device to be queried * @pf: Page format bit (1 == standard, 0 == vendor specific) * @sp: Save page bit (0 == don't save, 1 == save) - * @modepage: mode page being requested * @buffer: request buffer (may not be smaller than eight bytes) * @len: length of request buffer. * @timeout: command timeout @@ -1993,20 +2253,29 @@ void scsi_exit_queue(void) * status on error * */ -int -scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, - unsigned char *buffer, int len, int timeout, int retries, - struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) +int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, + unsigned char *buffer, int len, int timeout, int retries, + struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) { unsigned char cmd[10]; unsigned char *real_buffer; + const struct scsi_exec_args exec_args = { + .sshdr = sshdr, + }; int ret; memset(cmd, 0, sizeof(cmd)); cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); - if (sdev->use_10_for_ms) { - if (len > 65535) + /* + * Use MODE SELECT(10) if the device asked for it or if the mode page + * and the mode select header cannot fit within the maximumm 255 bytes + * of the MODE SELECT(6) command. + */ + if (sdev->use_10_for_ms || + len + 4 > 255 || + data->block_descriptor_length > 255) { + if (len > 65535 - 8) return -EINVAL; real_buffer = kmalloc(8 + len, GFP_KERNEL); if (!real_buffer) @@ -2019,15 +2288,13 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, real_buffer[3] = data->device_specific; real_buffer[4] = data->longlba ? 0x01 : 0; real_buffer[5] = 0; - real_buffer[6] = data->block_descriptor_length >> 8; - real_buffer[7] = data->block_descriptor_length; + put_unaligned_be16(data->block_descriptor_length, + &real_buffer[6]); cmd[0] = MODE_SELECT_10; - cmd[7] = len >> 8; - cmd[8] = len; + put_unaligned_be16(len, &cmd[7]); } else { - if (len > 255 || data->block_descriptor_length > 255 || - data->longlba) + if (data->longlba) return -EINVAL; real_buffer = kmalloc(4 + len, GFP_KERNEL); @@ -2039,14 +2306,13 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, real_buffer[1] = data->medium_type; real_buffer[2] = data->device_specific; real_buffer[3] = data->block_descriptor_length; - cmd[0] = MODE_SELECT; cmd[4] = len; } - ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, - sshdr, timeout, retries, NULL); + ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, real_buffer, len, + timeout, retries, &exec_args); kfree(real_buffer); return ret; } @@ -2055,8 +2321,9 @@ EXPORT_SYMBOL_GPL(scsi_mode_select); /** * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. * @sdev: SCSI device to be queried - * @dbd: set if mode sense will allow block descriptors to be returned + * @dbd: set to prevent mode sense from returning block descriptors * @modepage: mode page being requested + * @subpage: sub-page of the mode page being requested * @buffer: request buffer (may not be smaller than eight bytes) * @len: length of request buffer. * @timeout: command timeout @@ -2065,20 +2332,36 @@ EXPORT_SYMBOL_GPL(scsi_mode_select); * @sshdr: place to put sense data (or NULL if no sense to be collected). * must be SCSI_SENSE_BUFFERSIZE big. * - * Returns zero if unsuccessful, or the header offset (either 4 - * or 8 depending on whether a six or ten byte command was - * issued) if successful. + * Returns zero if successful, or a negative error number on failure */ int -scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, +scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage, unsigned char *buffer, int len, int timeout, int retries, struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) { unsigned char cmd[12]; int use_10_for_ms; int header_length; - int result, retry_count = retries; + int result; struct scsi_sense_hdr my_sshdr; + struct scsi_failure failure_defs[] = { + { + .sense = UNIT_ATTENTION, + .asc = SCMD_FAILURE_ASC_ANY, + .ascq = SCMD_FAILURE_ASCQ_ANY, + .allowed = retries, + .result = SAM_STAT_CHECK_CONDITION, + }, + {} + }; + struct scsi_failures failures = { + .failure_definitions = failure_defs, + }; + const struct scsi_exec_args exec_args = { + /* caller might not be interested in sense, but we need it */ + .sshdr = sshdr ? : &my_sshdr, + .failures = &failures, + }; memset(data, 0, sizeof(*data)); memset(&cmd[0], 0, 12); @@ -2086,24 +2369,23 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, dbd = sdev->set_dbd_for_ms ? 8 : dbd; cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ cmd[2] = modepage; + cmd[3] = subpage; - /* caller might not be interested in sense, but we need it */ - if (!sshdr) - sshdr = &my_sshdr; + sshdr = exec_args.sshdr; retry: - use_10_for_ms = sdev->use_10_for_ms; + use_10_for_ms = sdev->use_10_for_ms || len > 255; if (use_10_for_ms) { - if (len < 8) - len = 8; + if (len < 8 || len > 65535) + return -EINVAL; cmd[0] = MODE_SENSE_10; - cmd[8] = len; + put_unaligned_be16(len, &cmd[7]); header_length = 8; } else { if (len < 4) - len = 4; + return -EINVAL; cmd[0] = MODE_SENSE; cmd[4] = len; @@ -2112,60 +2394,61 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, memset(buffer, 0, len); - result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, - sshdr, timeout, retries, NULL); + result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len, + timeout, retries, &exec_args); + if (result < 0) + return result; /* This code looks awful: what it's doing is making sure an * ILLEGAL REQUEST sense return identifies the actual command * byte as the problem. MODE_SENSE commands can return * ILLEGAL REQUEST if the code page isn't supported */ - if (use_10_for_ms && !scsi_status_is_good(result) && - driver_byte(result) == DRIVER_SENSE) { + if (!scsi_status_is_good(result)) { if (scsi_sense_valid(sshdr)) { if ((sshdr->sense_key == ILLEGAL_REQUEST) && (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { - /* - * Invalid command operation code + /* + * Invalid command operation code: retry using + * MODE SENSE(6) if this was a MODE SENSE(10) + * request, except if the request mode page is + * too large for MODE SENSE single byte + * allocation length field. */ - sdev->use_10_for_ms = 0; - goto retry; + if (use_10_for_ms) { + if (len > 255) + return -EIO; + sdev->use_10_for_ms = 0; + goto retry; + } } } + return -EIO; + } + if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && + (modepage == 6 || modepage == 8))) { + /* Initio breakage? */ + header_length = 0; + data->length = 13; + data->medium_type = 0; + data->device_specific = 0; + data->longlba = 0; + data->block_descriptor_length = 0; + } else if (use_10_for_ms) { + data->length = get_unaligned_be16(&buffer[0]) + 2; + data->medium_type = buffer[2]; + data->device_specific = buffer[3]; + data->longlba = buffer[4] & 0x01; + data->block_descriptor_length = get_unaligned_be16(&buffer[6]); + } else { + data->length = buffer[0] + 1; + data->medium_type = buffer[1]; + data->device_specific = buffer[2]; + data->block_descriptor_length = buffer[3]; } + data->header_length = header_length; - if(scsi_status_is_good(result)) { - if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && - (modepage == 6 || modepage == 8))) { - /* Initio breakage? */ - header_length = 0; - data->length = 13; - data->medium_type = 0; - data->device_specific = 0; - data->longlba = 0; - data->block_descriptor_length = 0; - } else if(use_10_for_ms) { - data->length = buffer[0]*256 + buffer[1] + 2; - data->medium_type = buffer[2]; - data->device_specific = buffer[3]; - data->longlba = buffer[4] & 0x01; - data->block_descriptor_length = buffer[6]*256 - + buffer[7]; - } else { - data->length = buffer[0] + 1; - data->medium_type = buffer[1]; - data->device_specific = buffer[2]; - data->block_descriptor_length = buffer[3]; - } - data->header_length = header_length; - } else if ((status_byte(result) == CHECK_CONDITION) && - scsi_sense_valid(sshdr) && - sshdr->sense_key == UNIT_ATTENTION && retry_count) { - retry_count--; - goto retry; - } - - return result; + return 0; } EXPORT_SYMBOL(scsi_mode_sense); @@ -2186,16 +2469,19 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, char cmd[] = { TEST_UNIT_READY, 0, 0, 0, 0, 0, }; + const struct scsi_exec_args exec_args = { + .sshdr = sshdr, + }; int result; /* try to eat the UNIT_ATTENTION if there are enough retries */ do { - result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, - timeout, 1, NULL); - if (sdev->removable && scsi_sense_valid(sshdr) && + result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, NULL, 0, + timeout, 1, &exec_args); + if (sdev->removable && result > 0 && scsi_sense_valid(sshdr) && sshdr->sense_key == UNIT_ATTENTION) sdev->changed = 1; - } while (scsi_sense_valid(sshdr) && + } while (result > 0 && scsi_sense_valid(sshdr) && sshdr->sense_key == UNIT_ATTENTION && --retries); return result; @@ -2227,7 +2513,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) goto illegal; } break; - + case SDEV_RUNNING: switch (oldstate) { case SDEV_CREATED: @@ -2331,7 +2617,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) EXPORT_SYMBOL(scsi_device_set_state); /** - * sdev_evt_emit - emit a single SCSI device uevent + * scsi_evt_emit - emit a single SCSI device uevent * @sdev: associated SCSI device * @evt: event to emit * @@ -2347,7 +2633,7 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) envp[idx++] = "SDEV_MEDIA_CHANGE=1"; break; case SDEV_EVT_INQUIRY_CHANGE_REPORTED: - scsi_rescan_device(&sdev->sdev_gendev); + scsi_rescan_device(sdev); envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; break; case SDEV_EVT_CAPACITY_CHANGE_REPORTED: @@ -2379,7 +2665,7 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) } /** - * sdev_evt_thread - send a uevent for each scsi event + * scsi_evt_thread - send a uevent for each scsi event * @work: work struct for scsi_device * * Dispatch queued events to their associated scsi_device kobjects @@ -2505,15 +2791,13 @@ void sdev_evt_send_simple(struct scsi_device *sdev, EXPORT_SYMBOL_GPL(sdev_evt_send_simple); /** - * scsi_device_quiesce - Block user issued commands. + * scsi_device_quiesce - Block all commands except power management. * @sdev: scsi device to quiesce. * * This works by trying to transition to the SDEV_QUIESCE state * (which must be a legal transition). When the device is in this - * state, only special requests will be accepted, all others will - * be deferred. Since special requests may also be requeued requests, - * a successful return doesn't guarantee the device will be - * totally quiescent. + * state, only power management requests will be accepted, all others will + * be deferred. * * Must be called with user context, may sleep. * @@ -2523,6 +2807,7 @@ int scsi_device_quiesce(struct scsi_device *sdev) { struct request_queue *q = sdev->request_queue; + unsigned int memflags; int err; /* @@ -2537,7 +2822,7 @@ scsi_device_quiesce(struct scsi_device *sdev) blk_set_pm_only(q); - blk_mq_freeze_queue(q); + memflags = blk_mq_freeze_queue(q); /* * Ensure that the effect of blk_set_pm_only() will be visible * for percpu_ref_tryget() callers that occur after the queue @@ -2545,7 +2830,7 @@ scsi_device_quiesce(struct scsi_device *sdev) * was called. See also https://lwn.net/Articles/573497/. */ synchronize_rcu(); - blk_mq_unfreeze_queue(q); + blk_mq_unfreeze_queue(q, memflags); mutex_lock(&sdev->state_mutex); err = scsi_device_set_state(sdev, SDEV_QUIESCE); @@ -2575,12 +2860,12 @@ void scsi_device_resume(struct scsi_device *sdev) * device deleted during suspend) */ mutex_lock(&sdev->state_mutex); + if (sdev->sdev_state == SDEV_QUIESCE) + scsi_device_set_state(sdev, SDEV_RUNNING); if (sdev->quiesced_by) { sdev->quiesced_by = NULL; blk_clear_pm_only(sdev->request_queue); } - if (sdev->sdev_state == SDEV_QUIESCE) - scsi_device_set_state(sdev, SDEV_RUNNING); mutex_unlock(&sdev->state_mutex); } EXPORT_SYMBOL(scsi_device_resume); @@ -2611,6 +2896,32 @@ scsi_target_resume(struct scsi_target *starget) } EXPORT_SYMBOL(scsi_target_resume); +static int __scsi_internal_device_block_nowait(struct scsi_device *sdev) +{ + if (scsi_device_set_state(sdev, SDEV_BLOCK)) + return scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); + + return 0; +} + +void scsi_start_queue(struct scsi_device *sdev) +{ + if (cmpxchg(&sdev->queue_stopped, 1, 0)) + blk_mq_unquiesce_queue(sdev->request_queue); +} + +static void scsi_stop_queue(struct scsi_device *sdev) +{ + /* + * The atomic variable of ->queue_stopped covers that + * blk_mq_quiesce_queue* is balanced with blk_mq_unquiesce_queue. + * + * The caller needs to wait until quiesce is done. + */ + if (!cmpxchg(&sdev->queue_stopped, 0, 1)) + blk_mq_quiesce_queue_nowait(sdev->request_queue); +} + /** * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state * @sdev: device to block @@ -2627,35 +2938,27 @@ EXPORT_SYMBOL(scsi_target_resume); */ int scsi_internal_device_block_nowait(struct scsi_device *sdev) { - struct request_queue *q = sdev->request_queue; - int err = 0; - - err = scsi_device_set_state(sdev, SDEV_BLOCK); - if (err) { - err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); - - if (err) - return err; - } + int ret = __scsi_internal_device_block_nowait(sdev); - /* + /* * The device has transitioned to SDEV_BLOCK. Stop the * block layer from calling the midlayer with this device's - * request queue. + * request queue. */ - blk_mq_quiesce_queue_nowait(q); - return 0; + if (!ret) + scsi_stop_queue(sdev); + return ret; } EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); /** - * scsi_internal_device_block - try to transition to the SDEV_BLOCK state + * scsi_device_block - try to transition to the SDEV_BLOCK state * @sdev: device to block + * @data: dummy argument, ignored * - * Pause SCSI command processing on the specified device and wait until all - * ongoing scsi_request_fn() / scsi_queue_rq() calls have finished. May sleep. - * - * Returns zero if successful or a negative error code upon failure. + * Pause SCSI command processing on the specified device. Callers must wait + * until all ongoing scsi_queue_rq() calls have finished after this function + * returns. * * Note: * This routine transitions the device to the SDEV_BLOCK state (which must be @@ -2663,25 +2966,26 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); * is paused until the device leaves the SDEV_BLOCK state. See also * scsi_internal_device_unblock(). */ -static int scsi_internal_device_block(struct scsi_device *sdev) +static void scsi_device_block(struct scsi_device *sdev, void *data) { - struct request_queue *q = sdev->request_queue; int err; + enum scsi_device_state state; mutex_lock(&sdev->state_mutex); - err = scsi_internal_device_block_nowait(sdev); + err = __scsi_internal_device_block_nowait(sdev); + state = sdev->sdev_state; if (err == 0) - blk_mq_quiesce_queue(q); - mutex_unlock(&sdev->state_mutex); + /* + * scsi_stop_queue() must be called with the state_mutex + * held. Otherwise a simultaneous scsi_start_queue() call + * might unquiesce the queue before we quiesce it. + */ + scsi_stop_queue(sdev); - return err; -} - -void scsi_start_queue(struct scsi_device *sdev) -{ - struct request_queue *q = sdev->request_queue; + mutex_unlock(&sdev->state_mutex); - blk_mq_unquiesce_queue(q); + WARN_ONCE(err, "%s: failed to block %s in state %d\n", + __func__, dev_name(&sdev->sdev_gendev), state); } /** @@ -2764,36 +3068,35 @@ static int scsi_internal_device_unblock(struct scsi_device *sdev, return ret; } -static void -device_block(struct scsi_device *sdev, void *data) -{ - int ret; - - ret = scsi_internal_device_block(sdev); - - WARN_ONCE(ret, "scsi_internal_device_block(%s) failed: ret = %d\n", - dev_name(&sdev->sdev_gendev), ret); -} - static int target_block(struct device *dev, void *data) { if (scsi_is_target_device(dev)) starget_for_each_device(to_scsi_target(dev), NULL, - device_block); + scsi_device_block); return 0; } +/** + * scsi_block_targets - transition all SCSI child devices to SDEV_BLOCK state + * @dev: a parent device of one or more scsi_target devices + * @shost: the Scsi_Host to which this device belongs + * + * Iterate over all children of @dev, which should be scsi_target devices, + * and switch all subordinate scsi devices to SDEV_BLOCK state. Wait for + * ongoing scsi_queue_rq() calls to finish. May sleep. + * + * Note: + * @dev must not itself be a scsi_target device. + */ void -scsi_target_block(struct device *dev) +scsi_block_targets(struct Scsi_Host *shost, struct device *dev) { - if (scsi_is_target_device(dev)) - starget_for_each_device(to_scsi_target(dev), NULL, - device_block); - else - device_for_each_child(dev, NULL, target_block); + WARN_ON_ONCE(scsi_is_target_device(dev)); + device_for_each_child(dev, NULL, target_block); + blk_mq_wait_quiesce_done(&shost->tag_set); } -EXPORT_SYMBOL_GPL(scsi_target_block); +EXPORT_SYMBOL_GPL(scsi_block_targets); static void device_unblock(struct scsi_device *sdev, void *data) @@ -2821,11 +3124,20 @@ scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) } EXPORT_SYMBOL_GPL(scsi_target_unblock); +/** + * scsi_host_block - Try to transition all logical units to the SDEV_BLOCK state + * @shost: device to block + * + * Pause SCSI command processing for all logical units associated with the SCSI + * host and wait until pending scsi_queue_rq() calls have finished. + * + * Returns zero if successful or a negative error code upon failure. + */ int scsi_host_block(struct Scsi_Host *shost) { struct scsi_device *sdev; - int ret = 0; + int ret; /* * Call scsi_internal_device_block_nowait so we can avoid @@ -2835,20 +3147,16 @@ scsi_host_block(struct Scsi_Host *shost) mutex_lock(&sdev->state_mutex); ret = scsi_internal_device_block_nowait(sdev); mutex_unlock(&sdev->state_mutex); - if (ret) - break; + if (ret) { + scsi_device_put(sdev); + return ret; + } } - /* - * SCSI never enables blk-mq's BLK_MQ_F_BLOCKING flag so - * calling synchronize_rcu() once is enough. - */ - WARN_ON_ONCE(shost->tag_set.flags & BLK_MQ_F_BLOCKING); + /* Wait for ongoing scsi_queue_rq() calls to finish. */ + blk_mq_wait_quiesce_done(&shost->tag_set); - if (!ret) - synchronize_rcu(); - - return ret; + return 0; } EXPORT_SYMBOL_GPL(scsi_host_block); @@ -2906,8 +3214,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, /* Offset starting from the beginning of first page in this sg-entry */ *offset = *offset - len_complete + sg->offset; - /* Assumption: contiguous pages can be accessed as "page + i" */ - page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); + page = sg_page(sg) + (*offset >> PAGE_SHIFT); *offset &= ~PAGE_MASK; /* Bytes in this sg-entry from *offset to the end of the page */ @@ -2943,6 +3250,78 @@ void sdev_enable_disk_events(struct scsi_device *sdev) } EXPORT_SYMBOL(sdev_enable_disk_events); +static unsigned char designator_prio(const unsigned char *d) +{ + if (d[1] & 0x30) + /* not associated with LUN */ + return 0; + + if (d[3] == 0) + /* invalid length */ + return 0; + + /* + * Order of preference for lun descriptor: + * - SCSI name string + * - NAA IEEE Registered Extended + * - EUI-64 based 16-byte + * - EUI-64 based 12-byte + * - NAA IEEE Registered + * - NAA IEEE Extended + * - EUI-64 based 8-byte + * - SCSI name string (truncated) + * - T10 Vendor ID + * as longer descriptors reduce the likelyhood + * of identification clashes. + */ + + switch (d[1] & 0xf) { + case 8: + /* SCSI name string, variable-length UTF-8 */ + return 9; + case 3: + switch (d[4] >> 4) { + case 6: + /* NAA registered extended */ + return 8; + case 5: + /* NAA registered */ + return 5; + case 4: + /* NAA extended */ + return 4; + case 3: + /* NAA locally assigned */ + return 1; + default: + break; + } + break; + case 2: + switch (d[3]) { + case 16: + /* EUI64-based, 16 byte */ + return 7; + case 12: + /* EUI64-based, 12 byte */ + return 6; + case 8: + /* EUI64-based, 8 byte */ + return 3; + default: + break; + } + break; + case 1: + /* T10 vendor ID */ + return 1; + default: + break; + } + + return 0; +} + /** * scsi_vpd_lun_id - return a unique device identification * @sdev: SCSI device @@ -2959,7 +3338,7 @@ EXPORT_SYMBOL(sdev_enable_disk_events); */ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) { - u8 cur_id_type = 0xff; + u8 cur_id_prio = 0; u8 cur_id_size = 0; const unsigned char *d, *cur_id_str; const struct scsi_vpd *vpd_pg83; @@ -2972,20 +3351,6 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) return -ENXIO; } - /* - * Look for the correct descriptor. - * Order of preference for lun descriptor: - * - SCSI name string - * - NAA IEEE Registered Extended - * - EUI-64 based 16-byte - * - EUI-64 based 12-byte - * - NAA IEEE Registered - * - NAA IEEE Extended - * - T10 Vendor ID - * as longer descriptors reduce the likelyhood - * of identification clashes. - */ - /* The id string must be at least 20 bytes + terminating NULL byte */ if (id_len < 21) { rcu_read_unlock(); @@ -2993,39 +3358,32 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) } memset(id, 0, id_len); - d = vpd_pg83->data + 4; - while (d < vpd_pg83->data + vpd_pg83->len) { - /* Skip designators not referring to the LUN */ - if ((d[1] & 0x30) != 0x00) - goto next_desig; + for (d = vpd_pg83->data + 4; + d < vpd_pg83->data + vpd_pg83->len; + d += d[3] + 4) { + u8 prio = designator_prio(d); + + if (prio == 0 || cur_id_prio > prio) + continue; switch (d[1] & 0xf) { case 0x1: /* T10 Vendor ID */ if (cur_id_size > d[3]) break; - /* Prefer anything */ - if (cur_id_type > 0x01 && cur_id_type != 0xff) - break; + cur_id_prio = prio; cur_id_size = d[3]; if (cur_id_size + 4 > id_len) cur_id_size = id_len - 4; cur_id_str = d + 4; - cur_id_type = d[1] & 0xf; id_size = snprintf(id, id_len, "t10.%*pE", cur_id_size, cur_id_str); break; case 0x2: /* EUI-64 */ - if (cur_id_size > d[3]) - break; - /* Prefer NAA IEEE Registered Extended */ - if (cur_id_type == 0x3 && - cur_id_size == d[3]) - break; + cur_id_prio = prio; cur_id_size = d[3]; cur_id_str = d + 4; - cur_id_type = d[1] & 0xf; switch (cur_id_size) { case 8: id_size = snprintf(id, id_len, @@ -3043,17 +3401,14 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) cur_id_str); break; default: - cur_id_size = 0; break; } break; case 0x3: /* NAA */ - if (cur_id_size > d[3]) - break; + cur_id_prio = prio; cur_id_size = d[3]; cur_id_str = d + 4; - cur_id_type = d[1] & 0xf; switch (cur_id_size) { case 8: id_size = snprintf(id, id_len, @@ -3066,32 +3421,29 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) cur_id_str); break; default: - cur_id_size = 0; break; } break; case 0x8: /* SCSI name string */ - if (cur_id_size + 4 > d[3]) + if (cur_id_size > d[3]) break; /* Prefer others for truncated descriptor */ - if (cur_id_size && d[3] > id_len) - break; + if (d[3] > id_len) { + prio = 2; + if (cur_id_prio > prio) + break; + } + cur_id_prio = prio; cur_id_size = id_size = d[3]; cur_id_str = d + 4; - cur_id_type = d[1] & 0xf; if (cur_id_size >= id_len) cur_id_size = id_len - 1; memcpy(id, cur_id_str, cur_id_size); - /* Decrease priority for truncated descriptor */ - if (cur_id_size != id_size) - cur_id_size = 6; break; default: break; } -next_desig: - d += d[3] + 4; } rcu_read_unlock(); @@ -3099,14 +3451,16 @@ next_desig: } EXPORT_SYMBOL(scsi_vpd_lun_id); -/* +/** * scsi_vpd_tpg_id - return a target port group identifier * @sdev: SCSI device + * @rel_id: pointer to return relative target port in if not %NULL * * Returns the Target Port Group identifier from the information - * froom VPD page 0x83 of the device. + * from VPD page 0x83 of the device. + * Optionally sets @rel_id to the relative target port on success. * - * Returns the identifier or error on failure. + * Return: the identifier or error on failure. */ int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id) { @@ -3145,3 +3499,24 @@ int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id) return group_id; } EXPORT_SYMBOL(scsi_vpd_tpg_id); + +/** + * scsi_build_sense - build sense data for a command + * @scmd: scsi command for which the sense should be formatted + * @desc: Sense format (non-zero == descriptor format, + * 0 == fixed format) + * @key: Sense key + * @asc: Additional sense code + * @ascq: Additional sense code qualifier + * + **/ +void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq) +{ + scsi_build_sense_buffer(desc, scmd->sense_buffer, key, asc, ascq); + scmd->result = SAM_STAT_CHECK_CONDITION; +} +EXPORT_SYMBOL_GPL(scsi_build_sense); + +#ifdef CONFIG_SCSI_LIB_KUNIT_TEST +#include "scsi_lib_test.c" +#endif |
