summaryrefslogtreecommitdiff
path: root/drivers/scsi/virtio_scsi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/virtio_scsi.c')
-rw-r--r--drivers/scsi/virtio_scsi.c490
1 files changed, 229 insertions, 261 deletions
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 9be211d68b15..96a69edddbe5 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Virtio SCSI HBA driver
*
@@ -7,10 +8,6 @@
* Authors:
* Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
* Paolo Bonzini <pbonzini@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -25,18 +22,25 @@
#include <linux/virtio_scsi.h>
#include <linux/cpu.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_devinfo.h>
#include <linux/seqlock.h>
-#include <linux/blk-mq-virtio.h>
+
+#include "sd.h"
#define VIRTIO_SCSI_MEMPOOL_SZ 64
#define VIRTIO_SCSI_EVENT_LEN 8
#define VIRTIO_SCSI_VQ_BASE 2
+static unsigned int virtscsi_poll_queues;
+module_param(virtscsi_poll_queues, uint, 0644);
+MODULE_PARM_DESC(virtscsi_poll_queues,
+ "The number of dedicated virtqueues for polling I/O");
+
/* Command queue element */
struct virtio_scsi_cmd {
struct scsi_cmnd *sc;
@@ -68,36 +72,6 @@ struct virtio_scsi_vq {
struct virtqueue *vq;
};
-/*
- * Per-target queue state.
- *
- * This struct holds the data needed by the queue steering policy. When a
- * target is sent multiple requests, we need to drive them to the same queue so
- * that FIFO processing order is kept. However, if a target was idle, we can
- * choose a queue arbitrarily. In this case the queue is chosen according to
- * the current VCPU, so the driver expects the number of request queues to be
- * equal to the number of VCPUs. This makes it easy and fast to select the
- * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
- * (each virtqueue's affinity is set to the CPU that "owns" the queue).
- *
- * tgt_seq is held to serialize reading and writing req_vq.
- *
- * Decrements of reqs are never concurrent with writes of req_vq: before the
- * decrement reqs will be != 0; after the decrement the virtqueue completion
- * routine will not use the req_vq so it can be changed by a new request.
- * Thus they can happen outside the tgt_seq, provided of course we make reqs
- * an atomic_t.
- */
-struct virtio_scsi_target_state {
- seqcount_t tgt_seq;
-
- /* Count of outstanding requests. */
- atomic_t reqs;
-
- /* Currently active virtqueue for requests sent to this target. */
- struct virtio_scsi_vq *req_vq;
-};
-
/* Driver instance state */
struct virtio_scsi {
struct virtio_device *vdev;
@@ -106,9 +80,7 @@ struct virtio_scsi {
struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
u32 num_queues;
-
- /* If the affinity hint is set for virtqueues */
- bool affinity_hint_set;
+ int io_queues[HCTX_MAX_TYPES];
struct hlist_node node;
@@ -130,19 +102,11 @@ static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
{
- if (!resid)
- return;
-
- if (!scsi_bidi_cmnd(sc)) {
- scsi_set_resid(sc, resid);
- return;
- }
-
- scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
- scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
+ if (resid)
+ scsi_set_resid(sc, min(resid, scsi_bufflen(sc)));
}
-/**
+/*
* virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
*
* Called with vq_lock held.
@@ -152,8 +116,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
struct virtio_scsi_cmd *cmd = buf;
struct scsi_cmnd *sc = cmd->sc;
struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
- struct virtio_scsi_target_state *tgt =
- scsi_target(sc->device)->hostdata;
dev_dbg(&sc->device->sdev_gendev,
"cmd %p response %u status %#02x sense_len %u\n",
@@ -184,15 +146,15 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
break;
case VIRTIO_SCSI_S_TARGET_FAILURE:
- set_host_byte(sc, DID_TARGET_FAILURE);
+ set_host_byte(sc, DID_BAD_TARGET);
break;
case VIRTIO_SCSI_S_NEXUS_FAILURE:
- set_host_byte(sc, DID_NEXUS_FAILURE);
+ set_status_byte(sc, SAM_STAT_RESERVATION_CONFLICT);
break;
default:
scmd_printk(KERN_WARNING, sc, "Unknown response %d",
resp->response);
- /* fall through */
+ fallthrough;
case VIRTIO_SCSI_S_FAILURE:
set_host_byte(sc, DID_ERROR);
break;
@@ -200,18 +162,14 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
VIRTIO_SCSI_SENSE_SIZE);
- if (sc->sense_buffer) {
+ if (resp->sense_len) {
memcpy(sc->sense_buffer, resp->sense,
min_t(u32,
virtio32_to_cpu(vscsi->vdev, resp->sense_len),
VIRTIO_SCSI_SENSE_SIZE));
- if (resp->sense_len)
- set_driver_byte(sc, DRIVER_SENSE);
}
- sc->scsi_done(sc);
-
- atomic_dec(&tgt->reqs);
+ scsi_done(sc);
}
static void virtscsi_vq_done(struct virtio_scsi *vscsi,
@@ -229,8 +187,6 @@ static void virtscsi_vq_done(struct virtio_scsi *vscsi,
while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
fn(vscsi, buf);
- if (unlikely(virtqueue_is_broken(vq)))
- break;
} while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
}
@@ -330,7 +286,12 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
case VIRTIO_SCSI_EVT_RESET_RESCAN:
- scsi_add_device(shost, 0, target, lun);
+ if (lun == 0) {
+ scsi_scan_target(&shost->shost_gendev, 0, target,
+ SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
+ } else {
+ scsi_add_device(shost, 0, target, lun);
+ }
break;
case VIRTIO_SCSI_EVT_RESET_REMOVED:
sdev = scsi_device_lookup(shost, 0, target, lun);
@@ -343,7 +304,7 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
}
break;
default:
- pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
+ pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
}
}
@@ -367,11 +328,53 @@ static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
/* Handle "Parameters changed", "Mode parameters changed", and
"Capacity data has changed". */
if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
- scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_rescan_device(sdev);
scsi_device_put(sdev);
}
+static int virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
+{
+ struct scsi_device *sdev;
+ struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+ unsigned char scsi_cmd[MAX_COMMAND_SIZE];
+ int result, inquiry_len, inq_result_len = 256;
+ char *inq_result = kmalloc(inq_result_len, GFP_KERNEL);
+
+ if (!inq_result)
+ return -ENOMEM;
+
+ shost_for_each_device(sdev, shost) {
+ inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
+
+ memset(scsi_cmd, 0, sizeof(scsi_cmd));
+ scsi_cmd[0] = INQUIRY;
+ scsi_cmd[4] = (unsigned char) inquiry_len;
+
+ memset(inq_result, 0, inq_result_len);
+
+ result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
+ inq_result, inquiry_len,
+ SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+
+ if (result == 0 && inq_result[0] >> 5) {
+ /* PQ indicates the LUN is not attached */
+ scsi_remove_device(sdev);
+ } else if (result > 0 && host_byte(result) == DID_BAD_TARGET) {
+ /*
+ * If all LUNs of a virtio-scsi device are unplugged
+ * it will respond with BAD TARGET on any INQUIRY
+ * command.
+ * Remove the device in this case as well.
+ */
+ scsi_remove_device(sdev);
+ }
+ }
+
+ kfree(inq_result);
+ return 0;
+}
+
static void virtscsi_handle_event(struct work_struct *work)
{
struct virtio_scsi_event_node *event_node =
@@ -381,8 +384,13 @@ static void virtscsi_handle_event(struct work_struct *work)
if (event->event &
cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
+ int ret;
+
event->event &= ~cpu_to_virtio32(vscsi->vdev,
VIRTIO_SCSI_T_EVENTS_MISSED);
+ ret = virtscsi_rescan_hotunplug(vscsi);
+ if (ret)
+ return;
scsi_scan_host(virtio_scsi_host(vscsi->vdev));
}
@@ -396,7 +404,7 @@ static void virtscsi_handle_event(struct work_struct *work)
virtscsi_handle_param_change(vscsi, event);
break;
default:
- pr_err("Unsupport virtio scsi event %x\n", event->event);
+ pr_err("Unsupported virtio scsi event %x\n", event->event);
}
virtscsi_kick_event(vscsi, event_node);
}
@@ -417,14 +425,7 @@ static void virtscsi_event_done(struct virtqueue *vq)
virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
};
-/**
- * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue
- * @vq : the struct virtqueue we're talking about
- * @cmd : command structure
- * @req_size : size of the request buffer
- * @resp_size : size of the response buffer
- */
-static int virtscsi_add_cmd(struct virtqueue *vq,
+static int __virtscsi_add_cmd(struct virtqueue *vq,
struct virtio_scsi_cmd *cmd,
size_t req_size, size_t resp_size)
{
@@ -437,9 +438,9 @@ static int virtscsi_add_cmd(struct virtqueue *vq,
if (sc && sc->sc_data_direction != DMA_NONE) {
if (sc->sc_data_direction != DMA_FROM_DEVICE)
- out = &scsi_out(sc)->table;
+ out = &sc->sdb.table;
if (sc->sc_data_direction != DMA_TO_DEVICE)
- in = &scsi_in(sc)->table;
+ in = &sc->sdb.table;
}
/* Request header. */
@@ -469,17 +470,39 @@ static int virtscsi_add_cmd(struct virtqueue *vq,
return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
}
-static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
+static void virtscsi_kick_vq(struct virtio_scsi_vq *vq)
+{
+ bool needs_kick;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vq->vq_lock, flags);
+ needs_kick = virtqueue_kick_prepare(vq->vq);
+ spin_unlock_irqrestore(&vq->vq_lock, flags);
+
+ if (needs_kick)
+ virtqueue_notify(vq->vq);
+}
+
+/**
+ * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it
+ * @vq : the struct virtqueue we're talking about
+ * @cmd : command structure
+ * @req_size : size of the request buffer
+ * @resp_size : size of the response buffer
+ * @kick : whether to kick the virtqueue immediately
+ */
+static int virtscsi_add_cmd(struct virtio_scsi_vq *vq,
struct virtio_scsi_cmd *cmd,
- size_t req_size, size_t resp_size)
+ size_t req_size, size_t resp_size,
+ bool kick)
{
unsigned long flags;
int err;
bool needs_kick = false;
spin_lock_irqsave(&vq->vq_lock, flags);
- err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
- if (!err)
+ err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
+ if (!err && kick)
needs_kick = virtqueue_kick_prepare(vq->vq);
spin_unlock_irqrestore(&vq->vq_lock, flags);
@@ -508,7 +531,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
struct virtio_scsi_cmd_req_pi *cmd_pi,
struct scsi_cmnd *sc)
{
- struct request *rq = sc->request;
+ struct request *rq = scsi_cmd_to_rq(sc);
struct blk_integrity *bi;
virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
@@ -516,25 +539,35 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
if (!rq || !scsi_prot_sg_count(sc))
return;
- bi = blk_get_integrity(rq->rq_disk);
+ bi = blk_get_integrity(rq->q->disk);
if (sc->sc_data_direction == DMA_TO_DEVICE)
cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
- blk_rq_sectors(rq) *
- bi->tuple_size);
+ bio_integrity_bytes(bi,
+ blk_rq_sectors(rq)));
else if (sc->sc_data_direction == DMA_FROM_DEVICE)
cmd_pi->pi_bytesin = cpu_to_virtio32(vdev,
- blk_rq_sectors(rq) *
- bi->tuple_size);
+ bio_integrity_bytes(bi,
+ blk_rq_sectors(rq)));
}
#endif
-static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
- struct virtio_scsi_vq *req_vq,
+static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
+ struct scsi_cmnd *sc)
+{
+ u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc));
+ u16 hwq = blk_mq_unique_tag_to_hwq(tag);
+
+ return &vscsi->req_vqs[hwq];
+}
+
+static int virtscsi_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *sc)
{
- struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+ struct virtio_scsi *vscsi = shost_priv(shost);
+ struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+ bool kick;
unsigned long flags;
int req_size;
int ret;
@@ -564,7 +597,8 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
req_size = sizeof(cmd->req.cmd);
}
- ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
+ kick = (sc->flags & SCMD_LAST) != 0;
+ ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick);
if (ret == -EIO) {
cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
spin_lock_irqsave(&req_vq->vq_lock, flags);
@@ -576,87 +610,14 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
return 0;
}
-static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
- struct scsi_cmnd *sc)
-{
- struct virtio_scsi *vscsi = shost_priv(sh);
- struct virtio_scsi_target_state *tgt =
- scsi_target(sc->device)->hostdata;
-
- atomic_inc(&tgt->reqs);
- return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
-}
-
-static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
- struct scsi_cmnd *sc)
-{
- u32 tag = blk_mq_unique_tag(sc->request);
- u16 hwq = blk_mq_unique_tag_to_hwq(tag);
-
- return &vscsi->req_vqs[hwq];
-}
-
-static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
- struct virtio_scsi_target_state *tgt)
-{
- struct virtio_scsi_vq *vq;
- unsigned long flags;
- u32 queue_num;
-
- local_irq_save(flags);
- if (atomic_inc_return(&tgt->reqs) > 1) {
- unsigned long seq;
-
- do {
- seq = read_seqcount_begin(&tgt->tgt_seq);
- vq = tgt->req_vq;
- } while (read_seqcount_retry(&tgt->tgt_seq, seq));
- } else {
- /* no writes can be concurrent because of atomic_t */
- write_seqcount_begin(&tgt->tgt_seq);
-
- /* keep previous req_vq if a reader just arrived */
- if (unlikely(atomic_read(&tgt->reqs) > 1)) {
- vq = tgt->req_vq;
- goto unlock;
- }
-
- queue_num = smp_processor_id();
- while (unlikely(queue_num >= vscsi->num_queues))
- queue_num -= vscsi->num_queues;
- tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
- unlock:
- write_seqcount_end(&tgt->tgt_seq);
- }
- local_irq_restore(flags);
-
- return vq;
-}
-
-static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
- struct scsi_cmnd *sc)
-{
- struct virtio_scsi *vscsi = shost_priv(sh);
- struct virtio_scsi_target_state *tgt =
- scsi_target(sc->device)->hostdata;
- struct virtio_scsi_vq *req_vq;
-
- if (shost_use_blk_mq(sh))
- req_vq = virtscsi_pick_vq_mq(vscsi, sc);
- else
- req_vq = virtscsi_pick_vq(vscsi, tgt);
-
- return virtscsi_queuecommand(vscsi, req_vq, sc);
-}
-
static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
{
DECLARE_COMPLETION_ONSTACK(comp);
int ret = FAILED;
cmd->comp = &comp;
- if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
- sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0)
+ if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd,
+ sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0)
goto out;
wait_for_completion(&comp);
@@ -670,9 +631,8 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
* we're using independent interrupts (e.g. MSI). Poll the
* virtqueues once.
*
- * In the abort case, sc->scsi_done will do nothing, because
- * the block layer must have detected a timeout and as a result
- * REQ_ATOM_COMPLETE has been set.
+ * In the abort case, scsi_done() will do nothing, because the
+ * command timed out and hence SCMD_STATE_COMPLETE has been set.
*/
virtscsi_poll_requests(vscsi);
@@ -692,7 +652,6 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
return FAILED;
memset(cmd, 0, sizeof(*cmd));
- cmd->sc = sc;
cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
.type = VIRTIO_SCSI_T_TMF,
.subtype = cpu_to_virtio32(vscsi->vdev,
@@ -751,7 +710,6 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
return FAILED;
memset(cmd, 0, sizeof(*cmd));
- cmd->sc = sc;
cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
.type = VIRTIO_SCSI_T_TMF,
.subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
@@ -764,35 +722,59 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
return virtscsi_tmf(vscsi, cmd);
}
-static int virtscsi_target_alloc(struct scsi_target *starget)
+static void virtscsi_map_queues(struct Scsi_Host *shost)
{
- struct Scsi_Host *sh = dev_to_shost(starget->dev.parent);
- struct virtio_scsi *vscsi = shost_priv(sh);
+ struct virtio_scsi *vscsi = shost_priv(shost);
+ int i, qoff;
+
+ for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &shost->tag_set.map[i];
+
+ map->nr_queues = vscsi->io_queues[i];
+ map->queue_offset = qoff;
+ qoff += map->nr_queues;
+
+ if (map->nr_queues == 0)
+ continue;
+
+ /*
+ * Regular queues have interrupts and hence CPU affinity is
+ * defined by the core virtio code, but polling queues have
+ * no interrupts so we let the block layer assign CPU affinity.
+ */
+ if (i == HCTX_TYPE_POLL)
+ blk_mq_map_queues(map);
+ else
+ blk_mq_map_hw_queues(map, &vscsi->vdev->dev, 2);
+ }
+}
- struct virtio_scsi_target_state *tgt =
- kmalloc(sizeof(*tgt), GFP_KERNEL);
- if (!tgt)
- return -ENOMEM;
+static int virtscsi_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ struct virtio_scsi *vscsi = shost_priv(shost);
+ struct virtio_scsi_vq *virtscsi_vq = &vscsi->req_vqs[queue_num];
+ unsigned long flags;
+ unsigned int len;
+ int found = 0;
+ void *buf;
- seqcount_init(&tgt->tgt_seq);
- atomic_set(&tgt->reqs, 0);
- tgt->req_vq = &vscsi->req_vqs[0];
+ spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
- starget->hostdata = tgt;
- return 0;
-}
+ while ((buf = virtqueue_get_buf(virtscsi_vq->vq, &len)) != NULL) {
+ virtscsi_complete_cmd(vscsi, buf);
+ found++;
+ }
-static void virtscsi_target_destroy(struct scsi_target *starget)
-{
- struct virtio_scsi_target_state *tgt = starget->hostdata;
- kfree(tgt);
+ spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
+
+ return found;
}
-static int virtscsi_map_queues(struct Scsi_Host *shost)
+static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
{
struct virtio_scsi *vscsi = shost_priv(shost);
- return blk_mq_virtio_map_queues(&shost->tag_set, vscsi->vdev, 2);
+ virtscsi_kick_vq(&vscsi->req_vqs[hwq]);
}
/*
@@ -800,64 +782,41 @@ static int virtscsi_map_queues(struct Scsi_Host *shost)
* latencies might be higher than on bare metal. Reset the timer
* unconditionally to give the host a chance to perform EH.
*/
-static enum blk_eh_timer_return virtscsi_eh_timed_out(struct scsi_cmnd *scmnd)
+static enum scsi_timeout_action virtscsi_eh_timed_out(struct scsi_cmnd *scmnd)
{
- return BLK_EH_RESET_TIMER;
+ return SCSI_EH_RESET_TIMER;
}
-static struct scsi_host_template virtscsi_host_template_single = {
- .module = THIS_MODULE,
- .name = "Virtio SCSI HBA",
- .proc_name = "virtio_scsi",
- .this_id = -1,
- .cmd_size = sizeof(struct virtio_scsi_cmd),
- .queuecommand = virtscsi_queuecommand_single,
- .change_queue_depth = virtscsi_change_queue_depth,
- .eh_abort_handler = virtscsi_abort,
- .eh_device_reset_handler = virtscsi_device_reset,
- .eh_timed_out = virtscsi_eh_timed_out,
- .slave_alloc = virtscsi_device_alloc,
-
- .can_queue = 1024,
- .dma_boundary = UINT_MAX,
- .use_clustering = ENABLE_CLUSTERING,
- .target_alloc = virtscsi_target_alloc,
- .target_destroy = virtscsi_target_destroy,
- .track_queue_depth = 1,
-};
-
-static struct scsi_host_template virtscsi_host_template_multi = {
+static const struct scsi_host_template virtscsi_host_template = {
.module = THIS_MODULE,
.name = "Virtio SCSI HBA",
.proc_name = "virtio_scsi",
.this_id = -1,
.cmd_size = sizeof(struct virtio_scsi_cmd),
- .queuecommand = virtscsi_queuecommand_multi,
+ .queuecommand = virtscsi_queuecommand,
+ .mq_poll = virtscsi_mq_poll,
+ .commit_rqs = virtscsi_commit_rqs,
.change_queue_depth = virtscsi_change_queue_depth,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
.eh_timed_out = virtscsi_eh_timed_out,
- .slave_alloc = virtscsi_device_alloc,
+ .sdev_init = virtscsi_device_alloc,
- .can_queue = 1024,
.dma_boundary = UINT_MAX,
- .use_clustering = ENABLE_CLUSTERING,
- .target_alloc = virtscsi_target_alloc,
- .target_destroy = virtscsi_target_destroy,
.map_queues = virtscsi_map_queues,
.track_queue_depth = 1,
};
#define virtscsi_config_get(vdev, fld) \
({ \
- typeof(((struct virtio_scsi_config *)0)->fld) __val; \
+ __virtio_native_type(struct virtio_scsi_config, fld) __val; \
virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
__val; \
})
#define virtscsi_config_set(vdev, fld, val) \
do { \
- typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
+ __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \
virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
} while(0)
@@ -871,7 +830,7 @@ static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
static void virtscsi_remove_vqs(struct virtio_device *vdev)
{
/* Stop all the virtqueues. */
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
}
@@ -880,33 +839,46 @@ static int virtscsi_init(struct virtio_device *vdev,
{
int err;
u32 i;
- u32 num_vqs;
- vq_callback_t **callbacks;
- const char **names;
+ u32 num_vqs, num_poll_vqs, num_req_vqs;
+ struct virtqueue_info *vqs_info;
struct virtqueue **vqs;
struct irq_affinity desc = { .pre_vectors = 2 };
- num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
- vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL);
- callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL);
- names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL);
+ num_req_vqs = vscsi->num_queues;
+ num_vqs = num_req_vqs + VIRTIO_SCSI_VQ_BASE;
+ vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL);
+ vqs_info = kcalloc(num_vqs, sizeof(*vqs_info), GFP_KERNEL);
- if (!callbacks || !vqs || !names) {
+ if (!vqs || !vqs_info) {
err = -ENOMEM;
goto out;
}
- callbacks[0] = virtscsi_ctrl_done;
- callbacks[1] = virtscsi_event_done;
- names[0] = "control";
- names[1] = "event";
- for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
- callbacks[i] = virtscsi_req_done;
- names[i] = "request";
+ num_poll_vqs = min_t(unsigned int, virtscsi_poll_queues,
+ num_req_vqs - 1);
+ vscsi->io_queues[HCTX_TYPE_DEFAULT] = num_req_vqs - num_poll_vqs;
+ vscsi->io_queues[HCTX_TYPE_READ] = 0;
+ vscsi->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
+
+ dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
+ vscsi->io_queues[HCTX_TYPE_DEFAULT],
+ vscsi->io_queues[HCTX_TYPE_READ],
+ vscsi->io_queues[HCTX_TYPE_POLL]);
+
+ vqs_info[0].callback = virtscsi_ctrl_done;
+ vqs_info[0].name = "control";
+ vqs_info[1].callback = virtscsi_event_done;
+ vqs_info[1].name = "event";
+ for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs - num_poll_vqs; i++) {
+ vqs_info[i].callback = virtscsi_req_done;
+ vqs_info[i].name = "request";
}
+ for (; i < num_vqs; i++)
+ vqs_info[i].name = "request_poll";
+
/* Discover virtqueues and write information to configuration. */
- err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
+ err = virtio_find_vqs(vdev, num_vqs, vqs, vqs_info, &desc);
if (err)
goto out;
@@ -922,8 +894,7 @@ static int virtscsi_init(struct virtio_device *vdev,
err = 0;
out:
- kfree(names);
- kfree(callbacks);
+ kfree(vqs_info);
kfree(vqs);
if (err)
virtscsi_remove_vqs(vdev);
@@ -938,7 +909,6 @@ static int virtscsi_probe(struct virtio_device *vdev)
u32 sg_elems, num_targets;
u32 cmd_per_lun;
u32 num_queues;
- struct scsi_host_template *hostt;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
@@ -948,21 +918,19 @@ static int virtscsi_probe(struct virtio_device *vdev)
/* We need to know how many queues before we allocate. */
num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
+ num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
+ num_queues = blk_mq_num_possible_queues(num_queues);
num_targets = virtscsi_config_get(vdev, max_target) + 1;
- if (num_queues == 1)
- hostt = &virtscsi_host_template_single;
- else
- hostt = &virtscsi_host_template_multi;
-
- shost = scsi_host_alloc(hostt,
- sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues);
+ shost = scsi_host_alloc(&virtscsi_host_template,
+ struct_size(vscsi, req_vqs, num_queues));
if (!shost)
return -ENOMEM;
sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
shost->sg_tablesize = sg_elems;
+ shost->nr_maps = 1;
vscsi = shost_priv(shost);
vscsi->vdev = vdev;
vscsi->num_queues = num_queues;
@@ -972,6 +940,11 @@ static int virtscsi_probe(struct virtio_device *vdev)
if (err)
goto virtscsi_init_failed;
+ if (vscsi->io_queues[HCTX_TYPE_POLL])
+ shost->nr_maps = HCTX_TYPE_POLL + 1;
+
+ shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq);
+
cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
@@ -1073,7 +1046,6 @@ static struct virtio_driver virtio_scsi_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtscsi_probe,
#ifdef CONFIG_PM_SLEEP
@@ -1083,7 +1055,7 @@ static struct virtio_driver virtio_scsi_driver = {
.remove = virtscsi_remove,
};
-static int __init init(void)
+static int __init virtio_scsi_init(void)
{
int ret = -ENOMEM;
@@ -1108,25 +1080,21 @@ static int __init init(void)
return 0;
error:
- if (virtscsi_cmd_pool) {
- mempool_destroy(virtscsi_cmd_pool);
- virtscsi_cmd_pool = NULL;
- }
- if (virtscsi_cmd_cache) {
- kmem_cache_destroy(virtscsi_cmd_cache);
- virtscsi_cmd_cache = NULL;
- }
+ mempool_destroy(virtscsi_cmd_pool);
+ virtscsi_cmd_pool = NULL;
+ kmem_cache_destroy(virtscsi_cmd_cache);
+ virtscsi_cmd_cache = NULL;
return ret;
}
-static void __exit fini(void)
+static void __exit virtio_scsi_fini(void)
{
unregister_virtio_driver(&virtio_scsi_driver);
mempool_destroy(virtscsi_cmd_pool);
kmem_cache_destroy(virtscsi_cmd_cache);
}
-module_init(init);
-module_exit(fini);
+module_init(virtio_scsi_init);
+module_exit(virtio_scsi_fini);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio SCSI HBA driver");