diff options
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 263 |
1 files changed, 191 insertions, 72 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index df5ac03d5d6c..144c72f0737a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -23,7 +23,7 @@ #include <linux/blk-mq.h> #include <linux/blk-integrity.h> #include <linux/ratelimit.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -32,7 +32,7 @@ #include <scsi/scsi_driver.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> -#include <scsi/scsi_transport.h> /* __scsi_init_queue() */ +#include <scsi/scsi_transport.h> /* scsi_init_limits() */ #include <scsi/scsi_dh.h> #include <trace/events/scsi.h> @@ -185,6 +185,99 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) } /** + * scsi_failures_reset_retries - reset all failures to zero + * @failures: &struct scsi_failures with specific failure modes set + */ +void scsi_failures_reset_retries(struct scsi_failures *failures) +{ + struct scsi_failure *failure; + + failures->total_retries = 0; + + for (failure = failures->failure_definitions; failure->result; + failure++) + failure->retries = 0; +} +EXPORT_SYMBOL_GPL(scsi_failures_reset_retries); + +/** + * scsi_check_passthrough - Determine if passthrough scsi_cmnd needs a retry. + * @scmd: scsi_cmnd to check. + * @failures: scsi_failures struct that lists failures to check for. + * + * Returns -EAGAIN if the caller should retry else 0. + */ +static int scsi_check_passthrough(struct scsi_cmnd *scmd, + struct scsi_failures *failures) +{ + struct scsi_failure *failure; + struct scsi_sense_hdr sshdr; + enum sam_status status; + + if (!scmd->result) + return 0; + + if (!failures) + return 0; + + for (failure = failures->failure_definitions; failure->result; + failure++) { + if (failure->result == SCMD_FAILURE_RESULT_ANY) + goto maybe_retry; + + if (host_byte(scmd->result) && + host_byte(scmd->result) == host_byte(failure->result)) + goto maybe_retry; + + status = status_byte(scmd->result); + if (!status) + continue; + + if (failure->result == SCMD_FAILURE_STAT_ANY && + !scsi_status_is_good(scmd->result)) + goto maybe_retry; + + if (status != status_byte(failure->result)) + continue; + + if (status_byte(failure->result) != SAM_STAT_CHECK_CONDITION || + failure->sense == SCMD_FAILURE_SENSE_ANY) + goto maybe_retry; + + if (!scsi_command_normalize_sense(scmd, &sshdr)) + return 0; + + if (failure->sense != sshdr.sense_key) + continue; + + if (failure->asc == SCMD_FAILURE_ASC_ANY) + goto maybe_retry; + + if (failure->asc != sshdr.asc) + continue; + + if (failure->ascq == SCMD_FAILURE_ASCQ_ANY || + failure->ascq == sshdr.ascq) + goto maybe_retry; + } + + return 0; + +maybe_retry: + if (failure->allowed) { + if (failure->allowed == SCMD_FAILURE_NO_LIMIT || + ++failure->retries <= failure->allowed) + return -EAGAIN; + } else { + if (failures->total_allowed == SCMD_FAILURE_NO_LIMIT || + ++failures->total_retries <= failures->total_allowed) + return -EAGAIN; + } + + return 0; +} + +/** * scsi_execute_cmd - insert request and wait for the result * @sdev: scsi_device * @cmd: scsi command @@ -192,7 +285,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) * @buffer: data buffer * @bufflen: len of buffer * @timeout: request timeout in HZ - * @retries: number of times to retry request + * @ml_retries: number of times SCSI midlayer will retry request * @args: Optional args. See struct definition for field descriptions * * Returns the scsi_cmnd result field if a command was executed, or a negative @@ -200,7 +293,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) */ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd, blk_opf_t opf, void *buffer, unsigned int bufflen, - int timeout, int retries, + int timeout, int ml_retries, const struct scsi_exec_args *args) { static const struct scsi_exec_args default_args; @@ -214,20 +307,20 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd, args->sense_len != SCSI_SENSE_BUFFERSIZE)) return -EINVAL; +retry: req = scsi_alloc_request(sdev->request_queue, opf, args->req_flags); if (IS_ERR(req)) return PTR_ERR(req); if (bufflen) { - ret = blk_rq_map_kern(sdev->request_queue, req, - buffer, bufflen, GFP_NOIO); + ret = blk_rq_map_kern(req, buffer, bufflen, GFP_NOIO); if (ret) goto out; } scmd = blk_mq_rq_to_pdu(req); scmd->cmd_len = COMMAND_SIZE(cmd[0]); memcpy(scmd->cmnd, cmd, scmd->cmd_len); - scmd->allowed = retries; + scmd->allowed = ml_retries; scmd->flags |= args->scmd_flags; req->timeout = timeout; req->rq_flags |= RQF_QUIET; @@ -237,6 +330,11 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd, */ blk_execute_rq(req, true); + if (scsi_check_passthrough(scmd, args->failures) == -EAGAIN) { + blk_mq_free_request(req); + goto retry; + } + /* * Some devices (USB mass-storage in particular) may transfer * garbage data together with a residue indicating that the data @@ -539,14 +637,12 @@ static bool scsi_end_request(struct request *req, blk_status_t error, if (blk_update_request(req, error, bytes)) return true; - // XXX: - if (blk_queue_add_random(q)) + if (q->limits.features & BLK_FEAT_ADD_RANDOM) add_disk_randomness(req->q->disk); - if (!blk_rq_is_passthrough(req)) { - WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); - cmd->flags &= ~SCMD_INITIALIZED; - } + WARN_ON_ONCE(!blk_rq_is_passthrough(req) && + !(cmd->flags & SCMD_INITIALIZED)); + cmd->flags = 0; /* * Calling rcu_barrier() is not necessary here because the @@ -775,13 +871,18 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) case 0x1a: /* start stop unit in progress */ case 0x1b: /* sanitize in progress */ case 0x1d: /* configuration in progress */ - case 0x24: /* depopulation in progress */ - case 0x25: /* depopulation restore in progress */ action = ACTION_DELAYED_RETRY; break; case 0x0a: /* ALUA state transition */ action = ACTION_DELAYED_REPREP; break; + /* + * Depopulation might take many hours, + * thus it is not worthwhile to retry. + */ + case 0x24: /* depopulation in progress */ + case 0x25: /* depopulation restore in progress */ + fallthrough; default: action = ACTION_FAIL; break; @@ -1047,11 +1148,11 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) * Next, walk the list, and fill in the addresses and sizes of * each segment. */ - count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg); + count = __blk_rq_map_sg(rq, cmd->sdb.table.sgl, &last_sg); - if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) { + if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) { unsigned int pad_len = - (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; + (rq->q->limits.dma_pad_mask & ~blk_rq_bytes(rq)) + 1; last_sg->length += pad_len; cmd->extra_len += pad_len; @@ -1073,7 +1174,6 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) if (blk_integrity_rq(rq)) { struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; - int ivecs; if (WARN_ON_ONCE(!prot_sdb)) { /* @@ -1085,20 +1185,15 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) goto out_free_sgtables; } - ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); - - if (sg_alloc_table_chained(&prot_sdb->table, ivecs, + if (sg_alloc_table_chained(&prot_sdb->table, + rq->nr_integrity_segments, prot_sdb->table.sgl, SCSI_INLINE_PROT_SG_CNT)) { ret = BLK_STS_RESOURCE; goto out_free_sgtables; } - count = blk_rq_map_integrity_sg(rq->q, rq->bio, - prot_sdb->table.sgl); - BUG_ON(count > ivecs); - BUG_ON(count > queue_max_integrity_segments(rq->q)); - + count = blk_rq_map_integrity_sg(rq, prot_sdb->table.sgl); cmd->prot_sdb = prot_sdb; cmd->prot_sdb->table.nents = count; } @@ -1130,6 +1225,15 @@ static void scsi_initialize_rq(struct request *rq) cmd->retries = 0; } +/** + * scsi_alloc_request - allocate a block request and partially + * initialize its &scsi_cmnd + * @q: the device's request queue + * @opf: the request operation code + * @flags: block layer allocation flags + * + * Return: &struct request pointer on success or %NULL on failure + */ struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf, blk_mq_req_flags_t flags) { @@ -1148,8 +1252,12 @@ EXPORT_SYMBOL_GPL(scsi_alloc_request); */ static void scsi_cleanup_rq(struct request *rq) { + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); + + cmd->flags = 0; + if (rq->rq_flags & RQF_DONTPREP) { - scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq)); + scsi_mq_uninit_cmd(cmd); rq->rq_flags &= ~RQF_DONTPREP; } } @@ -1564,13 +1672,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req) if (in_flight) __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); - /* - * Only clear the driver-private command data if the LLD does not supply - * a function to initialize that data. - */ - if (!shost->hostt->init_cmd_priv) - memset(cmd + 1, 0, shost->hostt->cmd_size); - cmd->prot_op = SCSI_PROT_NORMAL; if (blk_rq_bytes(req)) cmd->sc_data_direction = rq_dma_dir(req); @@ -1737,6 +1838,13 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, if (!scsi_host_queue_ready(q, shost, sdev, cmd)) goto out_dec_target_busy; + /* + * Only clear the driver-private command data if the LLD does not supply + * a function to initialize that data. + */ + if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv) + memset(cmd + 1, 0, shost->hostt->cmd_size); + if (!(req->rq_flags & RQF_DONTPREP)) { ret = scsi_prepare_cmd(req); if (ret != BLK_STS_OK) @@ -1778,7 +1886,6 @@ out_put_budget: case BLK_STS_OK: break; case BLK_STS_RESOURCE: - case BLK_STS_ZONE_RESOURCE: if (scsi_device_blocked(sdev)) ret = BLK_STS_DEV_RESOURCE; break; @@ -1873,42 +1980,40 @@ static void scsi_map_queues(struct blk_mq_tag_set *set) blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); } -void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) +void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim) { struct device *dev = shost->dma_dev; - /* - * this limit is imposed by hardware restrictions - */ - blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, - SG_MAX_SEGMENTS)); + memset(lim, 0, sizeof(*lim)); + lim->max_segments = + min_t(unsigned short, shost->sg_tablesize, SG_MAX_SEGMENTS); if (scsi_host_prot_dma(shost)) { shost->sg_prot_tablesize = min_not_zero(shost->sg_prot_tablesize, (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); - blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); + lim->max_integrity_segments = shost->sg_prot_tablesize; } - blk_queue_max_hw_sectors(q, shost->max_sectors); - blk_queue_segment_boundary(q, shost->dma_boundary); - dma_set_seg_boundary(dev, shost->dma_boundary); - - blk_queue_max_segment_size(q, shost->max_segment_size); - blk_queue_virt_boundary(q, shost->virt_boundary_mask); - dma_set_max_seg_size(dev, queue_max_segment_size(q)); + lim->max_hw_sectors = shost->max_sectors; + lim->seg_boundary_mask = shost->dma_boundary; + lim->max_segment_size = shost->max_segment_size; + lim->virt_boundary_mask = shost->virt_boundary_mask; + lim->dma_alignment = max_t(unsigned int, + shost->dma_alignment, dma_get_cache_alignment() - 1); /* - * Set a reasonable default alignment: The larger of 32-byte (dword), - * which is a common minimum for HBAs, and the minimum DMA alignment, - * which is set by the platform. - * - * Devices that require a bigger alignment can increase it later. + * Propagate the DMA formation properties to the dma-mapping layer as + * a courtesy service to the LLDDs. This needs to check that the buses + * actually support the DMA API first, though. */ - blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1); + if (dev->dma_parms) { + dma_set_seg_boundary(dev, shost->dma_boundary); + dma_set_max_seg_size(dev, shost->max_segment_size); + } } -EXPORT_SYMBOL_GPL(__scsi_init_queue); +EXPORT_SYMBOL_GPL(scsi_init_limits); static const struct blk_mq_ops scsi_mq_ops_no_commit = { .get_budget = scsi_mq_get_budget, @@ -1981,9 +2086,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost) tag_set->queue_depth = shost->can_queue; tag_set->cmd_size = cmd_size; tag_set->numa_node = dev_to_node(shost->dma_dev); - tag_set->flags = BLK_MQ_F_SHOULD_MERGE; - tag_set->flags |= - BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); + if (shost->hostt->tag_alloc_policy_rr) + tag_set->flags |= BLK_MQ_F_TAG_RR; if (shost->queuecommand_may_block) tag_set->flags |= BLK_MQ_F_BLOCKING; tag_set->driver_data = shost; @@ -2172,11 +2276,25 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage, unsigned char cmd[12]; int use_10_for_ms; int header_length; - int result, retry_count = retries; + int result; struct scsi_sense_hdr my_sshdr; + struct scsi_failure failure_defs[] = { + { + .sense = UNIT_ATTENTION, + .asc = SCMD_FAILURE_ASC_ANY, + .ascq = SCMD_FAILURE_ASCQ_ANY, + .allowed = retries, + .result = SAM_STAT_CHECK_CONDITION, + }, + {} + }; + struct scsi_failures failures = { + .failure_definitions = failure_defs, + }; const struct scsi_exec_args exec_args = { /* caller might not be interested in sense, but we need it */ .sshdr = sshdr ? : &my_sshdr, + .failures = &failures, }; memset(data, 0, sizeof(*data)); @@ -2238,12 +2356,6 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage, goto retry; } } - if (scsi_status_is_check_condition(result) && - sshdr->sense_key == UNIT_ATTENTION && - retry_count) { - retry_count--; - goto retry; - } } return -EIO; } @@ -2629,6 +2741,7 @@ int scsi_device_quiesce(struct scsi_device *sdev) { struct request_queue *q = sdev->request_queue; + unsigned int memflags; int err; /* @@ -2643,7 +2756,7 @@ scsi_device_quiesce(struct scsi_device *sdev) blk_set_pm_only(q); - blk_mq_freeze_queue(q); + memflags = blk_mq_freeze_queue(q); /* * Ensure that the effect of blk_set_pm_only() will be visible * for percpu_ref_tryget() callers that occur after the queue @@ -2651,7 +2764,7 @@ scsi_device_quiesce(struct scsi_device *sdev) * was called. See also https://lwn.net/Articles/573497/. */ synchronize_rcu(); - blk_mq_unfreeze_queue(q); + blk_mq_unfreeze_queue(q, memflags); mutex_lock(&sdev->state_mutex); err = scsi_device_set_state(sdev, SDEV_QUIESCE); @@ -3273,14 +3386,16 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) } EXPORT_SYMBOL(scsi_vpd_lun_id); -/* +/** * scsi_vpd_tpg_id - return a target port group identifier * @sdev: SCSI device + * @rel_id: pointer to return relative target port in if not %NULL * * Returns the Target Port Group identifier from the information - * froom VPD page 0x83 of the device. + * from VPD page 0x83 of the device. + * Optionally sets @rel_id to the relative target port on success. * - * Returns the identifier or error on failure. + * Return: the identifier or error on failure. */ int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id) { @@ -3336,3 +3451,7 @@ void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq) scmd->result = SAM_STAT_CHECK_CONDITION; } EXPORT_SYMBOL_GPL(scsi_build_sense); + +#ifdef CONFIG_SCSI_LIB_KUNIT_TEST +#include "scsi_lib_test.c" +#endif |