summaryrefslogtreecommitdiff
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c124
1 files changed, 101 insertions, 23 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 65d0a10c76ad..91c007d26c1e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -40,6 +40,18 @@
#include "scsi_priv.h"
#include "scsi_logging.h"
+/*
+ * Size of integrity metadata is usually small, 1 inline sg should
+ * cover normal cases.
+ */
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
+#define SCSI_INLINE_PROT_SG_CNT 0
+#define SCSI_INLINE_SG_CNT 0
+#else
+#define SCSI_INLINE_PROT_SG_CNT 1
+#define SCSI_INLINE_SG_CNT 2
+#endif
+
static struct kmem_cache *scsi_sdb_cache;
static struct kmem_cache *scsi_sense_cache;
static struct kmem_cache *scsi_sense_isadma_cache;
@@ -72,11 +84,11 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
struct kmem_cache *cache;
int ret = 0;
+ mutex_lock(&scsi_sense_cache_mutex);
cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
if (cache)
- return 0;
+ goto exit;
- mutex_lock(&scsi_sense_cache_mutex);
if (shost->unchecked_isa_dma) {
scsi_sense_isadma_cache =
kmem_cache_create("scsi_sense_cache(DMA)",
@@ -92,7 +104,7 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
if (!scsi_sense_cache)
ret = -ENOMEM;
}
-
+ exit:
mutex_unlock(&scsi_sense_cache_mutex);
return ret;
}
@@ -542,9 +554,11 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
{
if (cmd->sdb.table.nents)
- sg_free_table_chained(&cmd->sdb.table, true);
+ sg_free_table_chained(&cmd->sdb.table,
+ SCSI_INLINE_SG_CNT);
if (scsi_prot_sg_count(cmd))
- sg_free_table_chained(&cmd->prot_sdb->table, true);
+ sg_free_table_chained(&cmd->prot_sdb->table,
+ SCSI_INLINE_PROT_SG_CNT);
}
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
@@ -977,7 +991,8 @@ static blk_status_t scsi_init_sgtable(struct request *req,
* If sg table allocation fails, requeue request later.
*/
if (unlikely(sg_alloc_table_chained(&sdb->table,
- blk_rq_nr_phys_segments(req), sdb->table.sgl)))
+ blk_rq_nr_phys_segments(req), sdb->table.sgl,
+ SCSI_INLINE_SG_CNT)))
return BLK_STS_RESOURCE;
/*
@@ -1031,7 +1046,8 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
- prot_sdb->table.sgl)) {
+ prot_sdb->table.sgl,
+ SCSI_INLINE_PROT_SG_CNT)) {
ret = BLK_STS_RESOURCE;
goto out_free_sgtables;
}
@@ -1073,6 +1089,18 @@ static void scsi_initialize_rq(struct request *rq)
cmd->retries = 0;
}
+/*
+ * Only called when the request isn't completed by SCSI, and not freed by
+ * SCSI
+ */
+static void scsi_cleanup_rq(struct request *rq)
+{
+ if (rq->rq_flags & RQF_DONTPREP) {
+ scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
+ rq->rq_flags &= ~RQF_DONTPREP;
+ }
+}
+
/* Add a command to the list used by the aacraid and dpt_i2o drivers */
void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
{
@@ -1436,7 +1464,7 @@ static void scsi_softirq_done(struct request *rq)
disposition = scsi_decide_disposition(cmd);
if (disposition != SUCCESS &&
time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
- sdev_printk(KERN_ERR, cmd->device,
+ scmd_printk(KERN_ERR, cmd,
"timing out command, waited %lus\n",
wait_for/HZ);
disposition = SUCCESS;
@@ -1542,9 +1570,9 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
}
/* Size in bytes of the sg-list stored in the scsi-mq command-private data. */
-static unsigned int scsi_mq_sgl_size(struct Scsi_Host *shost)
+static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
{
- return min_t(unsigned int, shost->sg_tablesize, SG_CHUNK_SIZE) *
+ return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) *
sizeof(struct scatterlist);
}
@@ -1650,10 +1678,11 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(req);
}
+ cmd->flags &= SCMD_PRESERVED_FLAGS;
if (sdev->simple_tags)
cmd->flags |= SCMD_TAGGED;
- else
- cmd->flags &= ~SCMD_TAGGED;
+ if (bd->last)
+ cmd->flags |= SCMD_LAST;
scsi_init_cmd_errh(cmd);
cmd->scsi_done = scsi_mq_done;
@@ -1726,7 +1755,7 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
if (scsi_host_get_prot(shost)) {
sg = (void *)cmd + sizeof(struct scsi_cmnd) +
shost->hostt->cmd_size;
- cmd->prot_sdb = (void *)sg + scsi_mq_sgl_size(shost);
+ cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
}
return 0;
@@ -1768,6 +1797,10 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
}
+ if (dev->dma_mask) {
+ shost->max_sectors = min_t(unsigned int, shost->max_sectors,
+ dma_max_mapping_size(dev) >> SECTOR_SHIFT);
+ }
blk_queue_max_hw_sectors(q, shost->max_sectors);
if (shost->unchecked_isa_dma)
blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
@@ -1775,7 +1808,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
dma_set_seg_boundary(dev, shost->dma_boundary);
blk_queue_max_segment_size(q, shost->max_segment_size);
- dma_set_max_seg_size(dev, shost->max_segment_size);
+ blk_queue_virt_boundary(q, shost->virt_boundary_mask);
+ dma_set_max_seg_size(dev, queue_max_segment_size(q));
/*
* Set a reasonable default alignment: The larger of 32-byte (dword),
@@ -1788,10 +1822,38 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
}
EXPORT_SYMBOL_GPL(__scsi_init_queue);
+static const struct blk_mq_ops scsi_mq_ops_no_commit = {
+ .get_budget = scsi_mq_get_budget,
+ .put_budget = scsi_mq_put_budget,
+ .queue_rq = scsi_queue_rq,
+ .complete = scsi_softirq_done,
+ .timeout = scsi_timeout,
+#ifdef CONFIG_BLK_DEBUG_FS
+ .show_rq = scsi_show_rq,
+#endif
+ .init_request = scsi_mq_init_request,
+ .exit_request = scsi_mq_exit_request,
+ .initialize_rq_fn = scsi_initialize_rq,
+ .cleanup_rq = scsi_cleanup_rq,
+ .busy = scsi_mq_lld_busy,
+ .map_queues = scsi_map_queues,
+};
+
+
+static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+ struct scsi_device *sdev = q->queuedata;
+ struct Scsi_Host *shost = sdev->host;
+
+ shost->hostt->commit_rqs(shost, hctx->queue_num);
+}
+
static const struct blk_mq_ops scsi_mq_ops = {
.get_budget = scsi_mq_get_budget,
.put_budget = scsi_mq_put_budget,
.queue_rq = scsi_queue_rq,
+ .commit_rqs = scsi_commit_rqs,
.complete = scsi_softirq_done,
.timeout = scsi_timeout,
#ifdef CONFIG_BLK_DEBUG_FS
@@ -1800,6 +1862,7 @@ static const struct blk_mq_ops scsi_mq_ops = {
.init_request = scsi_mq_init_request,
.exit_request = scsi_mq_exit_request,
.initialize_rq_fn = scsi_initialize_rq,
+ .cleanup_rq = scsi_cleanup_rq,
.busy = scsi_mq_lld_busy,
.map_queues = scsi_map_queues,
};
@@ -1820,13 +1883,18 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
{
unsigned int cmd_size, sgl_size;
- sgl_size = scsi_mq_sgl_size(shost);
+ sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
+ scsi_mq_inline_sgl_size(shost));
cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
if (scsi_host_get_prot(shost))
- cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;
+ cmd_size += sizeof(struct scsi_data_buffer) +
+ sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
memset(&shost->tag_set, 0, sizeof(shost->tag_set));
- shost->tag_set.ops = &scsi_mq_ops;
+ if (shost->hostt->commit_rqs)
+ shost->tag_set.ops = &scsi_mq_ops;
+ else
+ shost->tag_set.ops = &scsi_mq_ops_no_commit;
shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
shost->tag_set.queue_depth = shost->can_queue;
shost->tag_set.cmd_size = cmd_size;
@@ -1855,7 +1923,8 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q)
{
struct scsi_device *sdev = NULL;
- if (q->mq_ops == &scsi_mq_ops)
+ if (q->mq_ops == &scsi_mq_ops_no_commit ||
+ q->mq_ops == &scsi_mq_ops)
sdev = q->queuedata;
if (!sdev || !get_device(&sdev->sdev_gendev))
sdev = NULL;
@@ -2616,10 +2685,6 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
* a legal transition). When the device is in this state, command processing
* is paused until the device leaves the SDEV_BLOCK state. See also
* scsi_internal_device_unblock().
- *
- * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
- * scsi_internal_device_block() has blocked a SCSI device and also
- * remove the rport mutex lock and unlock calls from srp_queuecommand().
*/
static int scsi_internal_device_block(struct scsi_device *sdev)
{
@@ -2660,6 +2725,14 @@ void scsi_start_queue(struct scsi_device *sdev)
int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
enum scsi_device_state new_state)
{
+ switch (new_state) {
+ case SDEV_RUNNING:
+ case SDEV_TRANSPORT_OFFLINE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
/*
* Try to transition the scsi device to SDEV_RUNNING or one of the
* offlined states and goose the device queue if successful.
@@ -2717,7 +2790,12 @@ static int scsi_internal_device_unblock(struct scsi_device *sdev,
static void
device_block(struct scsi_device *sdev, void *data)
{
- scsi_internal_device_block(sdev);
+ int ret;
+
+ ret = scsi_internal_device_block(sdev);
+
+ WARN_ONCE(ret, "scsi_internal_device_block(%s) failed: ret = %d\n",
+ dev_name(&sdev->sdev_gendev), ret);
}
static int