summaryrefslogtreecommitdiff
path: root/drivers/scsi/virtio_scsi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/virtio_scsi.c')
-rw-r--r--drivers/scsi/virtio_scsi.c848
1 files changed, 452 insertions, 396 deletions
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 2168258fb2c3..96a69edddbe5 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Virtio SCSI HBA driver
*
@@ -7,10 +8,6 @@
* Authors:
* Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
* Paolo Bonzini <pbonzini@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -18,25 +15,39 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mempool.h>
+#include <linux/interrupt.h>
#include <linux/virtio.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
#include <linux/virtio_scsi.h>
#include <linux/cpu.h>
+#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_devinfo.h>
+#include <linux/seqlock.h>
+
+#include "sd.h"
#define VIRTIO_SCSI_MEMPOOL_SZ 64
#define VIRTIO_SCSI_EVENT_LEN 8
#define VIRTIO_SCSI_VQ_BASE 2
+static unsigned int virtscsi_poll_queues;
+module_param(virtscsi_poll_queues, uint, 0644);
+MODULE_PARM_DESC(virtscsi_poll_queues,
+ "The number of dedicated virtqueues for polling I/O");
+
/* Command queue element */
struct virtio_scsi_cmd {
struct scsi_cmnd *sc;
struct completion *comp;
union {
struct virtio_scsi_cmd_req cmd;
+ struct virtio_scsi_cmd_req_pi cmd_pi;
struct virtio_scsi_ctrl_tmf_req tmf;
struct virtio_scsi_ctrl_an_req an;
} req;
@@ -61,43 +72,6 @@ struct virtio_scsi_vq {
struct virtqueue *vq;
};
-/*
- * Per-target queue state.
- *
- * This struct holds the data needed by the queue steering policy. When a
- * target is sent multiple requests, we need to drive them to the same queue so
- * that FIFO processing order is kept. However, if a target was idle, we can
- * choose a queue arbitrarily. In this case the queue is chosen according to
- * the current VCPU, so the driver expects the number of request queues to be
- * equal to the number of VCPUs. This makes it easy and fast to select the
- * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
- * (each virtqueue's affinity is set to the CPU that "owns" the queue).
- *
- * An interesting effect of this policy is that only writes to req_vq need to
- * take the tgt_lock. Read can be done outside the lock because:
- *
- * - writes of req_vq only occur when atomic_inc_return(&tgt->reqs) returns 1.
- * In that case, no other CPU is reading req_vq: even if they were in
- * virtscsi_queuecommand_multi, they would be spinning on tgt_lock.
- *
- * - reads of req_vq only occur when the target is not idle (reqs != 0).
- * A CPU that enters virtscsi_queuecommand_multi will not modify req_vq.
- *
- * Similarly, decrements of reqs are never concurrent with writes of req_vq.
- * Thus they can happen outside the tgt_lock, provided of course we make reqs
- * an atomic_t.
- */
-struct virtio_scsi_target_state {
- /* This spinlock never held at the same time as vq_lock. */
- spinlock_t tgt_lock;
-
- /* Count of outstanding requests. */
- atomic_t reqs;
-
- /* Currently active virtqueue for requests sent to this target. */
- struct virtio_scsi_vq *req_vq;
-};
-
/* Driver instance state */
struct virtio_scsi {
struct virtio_device *vdev;
@@ -106,12 +80,12 @@ struct virtio_scsi {
struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
u32 num_queues;
+ int io_queues[HCTX_MAX_TYPES];
- /* If the affinity hint is set for virtqueues */
- bool affinity_hint_set;
+ struct hlist_node node;
- /* CPU hotplug notifier */
- struct notifier_block nb;
+ /* Protected by event_vq lock */
+ bool stop_events;
struct virtio_scsi_vq ctrl_vq;
struct virtio_scsi_vq event_vq;
@@ -128,19 +102,11 @@ static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
{
- if (!resid)
- return;
-
- if (!scsi_bidi_cmnd(sc)) {
- scsi_set_resid(sc, resid);
- return;
- }
-
- scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
- scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
+ if (resid)
+ scsi_set_resid(sc, min(resid, scsi_bufflen(sc)));
}
-/**
+/*
* virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
*
* Called with vq_lock held.
@@ -150,15 +116,13 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
struct virtio_scsi_cmd *cmd = buf;
struct scsi_cmnd *sc = cmd->sc;
struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
- struct virtio_scsi_target_state *tgt =
- scsi_target(sc->device)->hostdata;
dev_dbg(&sc->device->sdev_gendev,
"cmd %p response %u status %#02x sense_len %u\n",
sc, resp->response, resp->status, resp->sense_len);
sc->result = resp->status;
- virtscsi_compute_resid(sc, resp->resid);
+ virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid));
switch (resp->response) {
case VIRTIO_SCSI_S_OK:
set_host_byte(sc, DID_OK);
@@ -182,32 +146,30 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
break;
case VIRTIO_SCSI_S_TARGET_FAILURE:
- set_host_byte(sc, DID_TARGET_FAILURE);
+ set_host_byte(sc, DID_BAD_TARGET);
break;
case VIRTIO_SCSI_S_NEXUS_FAILURE:
- set_host_byte(sc, DID_NEXUS_FAILURE);
+ set_status_byte(sc, SAM_STAT_RESERVATION_CONFLICT);
break;
default:
scmd_printk(KERN_WARNING, sc, "Unknown response %d",
resp->response);
- /* fall through */
+ fallthrough;
case VIRTIO_SCSI_S_FAILURE:
set_host_byte(sc, DID_ERROR);
break;
}
- WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE);
- if (sc->sense_buffer) {
+ WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
+ VIRTIO_SCSI_SENSE_SIZE);
+ if (resp->sense_len) {
memcpy(sc->sense_buffer, resp->sense,
- min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE));
- if (resp->sense_len)
- set_driver_byte(sc, DRIVER_SENSE);
+ min_t(u32,
+ virtio32_to_cpu(vscsi->vdev, resp->sense_len),
+ VIRTIO_SCSI_SENSE_SIZE));
}
- mempool_free(cmd, virtscsi_cmd_pool);
- sc->scsi_done(sc);
-
- atomic_dec(&tgt->reqs);
+ scsi_done(sc);
}
static void virtscsi_vq_done(struct virtio_scsi *vscsi,
@@ -224,6 +186,7 @@ static void virtscsi_vq_done(struct virtio_scsi *vscsi,
virtqueue_disable_cb(vq);
while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
fn(vscsi, buf);
+
} while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
}
@@ -235,49 +198,25 @@ static void virtscsi_req_done(struct virtqueue *vq)
int index = vq->index - VIRTIO_SCSI_VQ_BASE;
struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
- /*
- * Read req_vq before decrementing the reqs field in
- * virtscsi_complete_cmd.
- *
- * With barriers:
- *
- * CPU #0 virtscsi_queuecommand_multi (CPU #1)
- * ------------------------------------------------------------
- * lock vq_lock
- * read req_vq
- * read reqs (reqs = 1)
- * write reqs (reqs = 0)
- * increment reqs (reqs = 1)
- * write req_vq
- *
- * Possible reordering without barriers:
- *
- * CPU #0 virtscsi_queuecommand_multi (CPU #1)
- * ------------------------------------------------------------
- * lock vq_lock
- * read reqs (reqs = 1)
- * write reqs (reqs = 0)
- * increment reqs (reqs = 1)
- * write req_vq
- * read (wrong) req_vq
- *
- * We do not need a full smp_rmb, because req_vq is required to get
- * to tgt->reqs: tgt is &vscsi->tgt[sc->device->id], where sc is stored
- * in the virtqueue as the user token.
- */
- smp_read_barrier_depends();
-
virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
};
+static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
+{
+ int i, num_vqs;
+
+ num_vqs = vscsi->num_queues;
+ for (i = 0; i < num_vqs; i++)
+ virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
+ virtscsi_complete_cmd);
+}
+
static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
{
struct virtio_scsi_cmd *cmd = buf;
if (cmd->comp)
- complete_all(cmd->comp);
- else
- mempool_free(cmd, virtscsi_cmd_pool);
+ complete(cmd->comp);
}
static void virtscsi_ctrl_done(struct virtqueue *vq)
@@ -288,6 +227,8 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)
virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
};
+static void virtscsi_handle_event(struct work_struct *work);
+
static int virtscsi_kick_event(struct virtio_scsi *vscsi,
struct virtio_scsi_event_node *event_node)
{
@@ -295,6 +236,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
struct scatterlist sg;
unsigned long flags;
+ INIT_WORK(&event_node->work, virtscsi_handle_event);
sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
@@ -325,6 +267,11 @@ static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
{
int i;
+ /* Stop scheduling work before calling cancel_work_sync. */
+ spin_lock_irq(&vscsi->event_vq.vq_lock);
+ vscsi->stop_events = true;
+ spin_unlock_irq(&vscsi->event_vq.vq_lock);
+
for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
cancel_work_sync(&vscsi->event_list[i].work);
}
@@ -337,9 +284,14 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
unsigned int target = event->lun[1];
unsigned int lun = (event->lun[2] << 8) | event->lun[3];
- switch (event->reason) {
+ switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
case VIRTIO_SCSI_EVT_RESET_RESCAN:
- scsi_add_device(shost, 0, target, lun);
+ if (lun == 0) {
+ scsi_scan_target(&shost->shost_gendev, 0, target,
+ SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
+ } else {
+ scsi_add_device(shost, 0, target, lun);
+ }
break;
case VIRTIO_SCSI_EVT_RESET_REMOVED:
sdev = scsi_device_lookup(shost, 0, target, lun);
@@ -352,7 +304,7 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
}
break;
default:
- pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
+ pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
}
}
@@ -363,8 +315,8 @@ static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
unsigned int target = event->lun[1];
unsigned int lun = (event->lun[2] << 8) | event->lun[3];
- u8 asc = event->reason & 255;
- u8 ascq = event->reason >> 8;
+ u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255;
+ u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8;
sdev = scsi_device_lookup(shost, 0, target, lun);
if (!sdev) {
@@ -376,11 +328,53 @@ static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
/* Handle "Parameters changed", "Mode parameters changed", and
"Capacity data has changed". */
if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
- scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_rescan_device(sdev);
scsi_device_put(sdev);
}
+static int virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
+{
+ struct scsi_device *sdev;
+ struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+ unsigned char scsi_cmd[MAX_COMMAND_SIZE];
+ int result, inquiry_len, inq_result_len = 256;
+ char *inq_result = kmalloc(inq_result_len, GFP_KERNEL);
+
+ if (!inq_result)
+ return -ENOMEM;
+
+ shost_for_each_device(sdev, shost) {
+ inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
+
+ memset(scsi_cmd, 0, sizeof(scsi_cmd));
+ scsi_cmd[0] = INQUIRY;
+ scsi_cmd[4] = (unsigned char) inquiry_len;
+
+ memset(inq_result, 0, inq_result_len);
+
+ result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
+ inq_result, inquiry_len,
+ SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+
+ if (result == 0 && inq_result[0] >> 5) {
+ /* PQ indicates the LUN is not attached */
+ scsi_remove_device(sdev);
+ } else if (result > 0 && host_byte(result) == DID_BAD_TARGET) {
+ /*
+ * If all LUNs of a virtio-scsi device are unplugged
+ * it will respond with BAD TARGET on any INQUIRY
+ * command.
+ * Remove the device in this case as well.
+ */
+ scsi_remove_device(sdev);
+ }
+ }
+
+ kfree(inq_result);
+ return 0;
+}
+
static void virtscsi_handle_event(struct work_struct *work)
{
struct virtio_scsi_event_node *event_node =
@@ -388,12 +382,19 @@ static void virtscsi_handle_event(struct work_struct *work)
struct virtio_scsi *vscsi = event_node->vscsi;
struct virtio_scsi_event *event = &event_node->event;
- if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) {
- event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED;
+ if (event->event &
+ cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
+ int ret;
+
+ event->event &= ~cpu_to_virtio32(vscsi->vdev,
+ VIRTIO_SCSI_T_EVENTS_MISSED);
+ ret = virtscsi_rescan_hotunplug(vscsi);
+ if (ret)
+ return;
scsi_scan_host(virtio_scsi_host(vscsi->vdev));
}
- switch (event->event) {
+ switch (virtio32_to_cpu(vscsi->vdev, event->event)) {
case VIRTIO_SCSI_T_NO_EVENT:
break;
case VIRTIO_SCSI_T_TRANSPORT_RESET:
@@ -403,7 +404,7 @@ static void virtscsi_handle_event(struct work_struct *work)
virtscsi_handle_param_change(vscsi, event);
break;
default:
- pr_err("Unsupport virtio scsi event %x\n", event->event);
+ pr_err("Unsupported virtio scsi event %x\n", event->event);
}
virtscsi_kick_event(vscsi, event_node);
}
@@ -412,8 +413,8 @@ static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
{
struct virtio_scsi_event_node *event_node = buf;
- INIT_WORK(&event_node->work, virtscsi_handle_event);
- schedule_work(&event_node->work);
+ if (!vscsi->stop_events)
+ queue_work(system_freezable_wq, &event_node->work);
}
static void virtscsi_event_done(struct virtqueue *vq)
@@ -424,20 +425,12 @@ static void virtscsi_event_done(struct virtqueue *vq)
virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
};
-/**
- * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue
- * @vq : the struct virtqueue we're talking about
- * @cmd : command structure
- * @req_size : size of the request buffer
- * @resp_size : size of the response buffer
- * @gfp : flags to use for memory allocations
- */
-static int virtscsi_add_cmd(struct virtqueue *vq,
+static int __virtscsi_add_cmd(struct virtqueue *vq,
struct virtio_scsi_cmd *cmd,
- size_t req_size, size_t resp_size, gfp_t gfp)
+ size_t req_size, size_t resp_size)
{
struct scsi_cmnd *sc = cmd->sc;
- struct scatterlist *sgs[4], req, resp;
+ struct scatterlist *sgs[6], req, resp;
struct sg_table *out, *in;
unsigned out_num = 0, in_num = 0;
@@ -445,9 +438,9 @@ static int virtscsi_add_cmd(struct virtqueue *vq,
if (sc && sc->sc_data_direction != DMA_NONE) {
if (sc->sc_data_direction != DMA_FROM_DEVICE)
- out = &scsi_out(sc)->table;
+ out = &sc->sdb.table;
if (sc->sc_data_direction != DMA_TO_DEVICE)
- in = &scsi_in(sc)->table;
+ in = &sc->sdb.table;
}
/* Request header. */
@@ -455,31 +448,61 @@ static int virtscsi_add_cmd(struct virtqueue *vq,
sgs[out_num++] = &req;
/* Data-out buffer. */
- if (out)
+ if (out) {
+ /* Place WRITE protection SGLs before Data OUT payload */
+ if (scsi_prot_sg_count(sc))
+ sgs[out_num++] = scsi_prot_sglist(sc);
sgs[out_num++] = out->sgl;
+ }
/* Response header. */
sg_init_one(&resp, &cmd->resp, resp_size);
sgs[out_num + in_num++] = &resp;
/* Data-in buffer */
- if (in)
+ if (in) {
+ /* Place READ protection SGLs before Data IN payload */
+ if (scsi_prot_sg_count(sc))
+ sgs[out_num + in_num++] = scsi_prot_sglist(sc);
sgs[out_num + in_num++] = in->sgl;
+ }
- return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, gfp);
+ return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
}
-static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
+static void virtscsi_kick_vq(struct virtio_scsi_vq *vq)
+{
+ bool needs_kick;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vq->vq_lock, flags);
+ needs_kick = virtqueue_kick_prepare(vq->vq);
+ spin_unlock_irqrestore(&vq->vq_lock, flags);
+
+ if (needs_kick)
+ virtqueue_notify(vq->vq);
+}
+
+/**
+ * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it
+ * @vq : the struct virtqueue we're talking about
+ * @cmd : command structure
+ * @req_size : size of the request buffer
+ * @resp_size : size of the response buffer
+ * @kick : whether to kick the virtqueue immediately
+ */
+static int virtscsi_add_cmd(struct virtio_scsi_vq *vq,
struct virtio_scsi_cmd *cmd,
- size_t req_size, size_t resp_size, gfp_t gfp)
+ size_t req_size, size_t resp_size,
+ bool kick)
{
unsigned long flags;
int err;
bool needs_kick = false;
spin_lock_irqsave(&vq->vq_lock, flags);
- err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size, gfp);
- if (!err)
+ err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
+ if (!err && kick)
needs_kick = virtqueue_kick_prepare(vq->vq);
spin_unlock_irqrestore(&vq->vq_lock, flags);
@@ -489,101 +512,102 @@ static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
return err;
}
-static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
- struct virtio_scsi_vq *req_vq,
+static void virtio_scsi_init_hdr(struct virtio_device *vdev,
+ struct virtio_scsi_cmd_req *cmd,
struct scsi_cmnd *sc)
{
- struct virtio_scsi_cmd *cmd;
- int ret;
-
- struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
- BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
-
- /* TODO: check feature bit and fail if unsupported? */
- BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
-
- dev_dbg(&sc->device->sdev_gendev,
- "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
+ cmd->lun[0] = 1;
+ cmd->lun[1] = sc->device->id;
+ cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
+ cmd->lun[3] = sc->device->lun & 0xff;
+ cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc);
+ cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
+ cmd->prio = 0;
+ cmd->crn = 0;
+}
- ret = SCSI_MLQUEUE_HOST_BUSY;
- cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC);
- if (!cmd)
- goto out;
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
+ struct virtio_scsi_cmd_req_pi *cmd_pi,
+ struct scsi_cmnd *sc)
+{
+ struct request *rq = scsi_cmd_to_rq(sc);
+ struct blk_integrity *bi;
- memset(cmd, 0, sizeof(*cmd));
- cmd->sc = sc;
- cmd->req.cmd = (struct virtio_scsi_cmd_req){
- .lun[0] = 1,
- .lun[1] = sc->device->id,
- .lun[2] = (sc->device->lun >> 8) | 0x40,
- .lun[3] = sc->device->lun & 0xff,
- .tag = (unsigned long)sc,
- .task_attr = VIRTIO_SCSI_S_SIMPLE,
- .prio = 0,
- .crn = 0,
- };
+ virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
- BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
- memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
+ if (!rq || !scsi_prot_sg_count(sc))
+ return;
- if (virtscsi_kick_cmd(req_vq, cmd,
- sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
- GFP_ATOMIC) == 0)
- ret = 0;
- else
- mempool_free(cmd, virtscsi_cmd_pool);
+ bi = blk_get_integrity(rq->q->disk);
-out:
- return ret;
+ if (sc->sc_data_direction == DMA_TO_DEVICE)
+ cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
+ bio_integrity_bytes(bi,
+ blk_rq_sectors(rq)));
+ else if (sc->sc_data_direction == DMA_FROM_DEVICE)
+ cmd_pi->pi_bytesin = cpu_to_virtio32(vdev,
+ bio_integrity_bytes(bi,
+ blk_rq_sectors(rq)));
}
+#endif
-static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
- struct scsi_cmnd *sc)
+static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
+ struct scsi_cmnd *sc)
{
- struct virtio_scsi *vscsi = shost_priv(sh);
- struct virtio_scsi_target_state *tgt =
- scsi_target(sc->device)->hostdata;
+ u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc));
+ u16 hwq = blk_mq_unique_tag_to_hwq(tag);
- atomic_inc(&tgt->reqs);
- return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
+ return &vscsi->req_vqs[hwq];
}
-static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
- struct virtio_scsi_target_state *tgt)
+static int virtscsi_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *sc)
{
- struct virtio_scsi_vq *vq;
+ struct virtio_scsi *vscsi = shost_priv(shost);
+ struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
+ struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+ bool kick;
unsigned long flags;
- u32 queue_num;
+ int req_size;
+ int ret;
- spin_lock_irqsave(&tgt->tgt_lock, flags);
+ BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
- /*
- * The memory barrier after atomic_inc_return matches
- * the smp_read_barrier_depends() in virtscsi_req_done.
- */
- if (atomic_inc_return(&tgt->reqs) > 1)
- vq = ACCESS_ONCE(tgt->req_vq);
- else {
- queue_num = smp_processor_id();
- while (unlikely(queue_num >= vscsi->num_queues))
- queue_num -= vscsi->num_queues;
-
- tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
- }
+ /* TODO: check feature bit and fail if unsupported? */
+ BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
- spin_unlock_irqrestore(&tgt->tgt_lock, flags);
- return vq;
-}
+ dev_dbg(&sc->device->sdev_gendev,
+ "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
-static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
- struct scsi_cmnd *sc)
-{
- struct virtio_scsi *vscsi = shost_priv(sh);
- struct virtio_scsi_target_state *tgt =
- scsi_target(sc->device)->hostdata;
- struct virtio_scsi_vq *req_vq = virtscsi_pick_vq(vscsi, tgt);
+ cmd->sc = sc;
- return virtscsi_queuecommand(vscsi, req_vq, sc);
+ BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
+ virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc);
+ memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
+ req_size = sizeof(cmd->req.cmd_pi);
+ } else
+#endif
+ {
+ virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc);
+ memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
+ req_size = sizeof(cmd->req.cmd);
+ }
+
+ kick = (sc->flags & SCMD_LAST) != 0;
+ ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick);
+ if (ret == -EIO) {
+ cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
+ spin_lock_irqsave(&req_vq->vq_lock, flags);
+ virtscsi_complete_cmd(vscsi, cmd);
+ spin_unlock_irqrestore(&req_vq->vq_lock, flags);
+ } else if (ret != 0) {
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ return 0;
}
static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
@@ -592,9 +616,8 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
int ret = FAILED;
cmd->comp = &comp;
- if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
- sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
- GFP_NOIO) < 0)
+ if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd,
+ sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0)
goto out;
wait_for_completion(&comp);
@@ -602,6 +625,17 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
ret = SUCCESS;
+ /*
+ * The spec guarantees that all requests related to the TMF have
+ * been completed, but the callback might not have run yet if
+ * we're using independent interrupts (e.g. MSI). Poll the
+ * virtqueues once.
+ *
+ * In the abort case, scsi_done() will do nothing, because the
+ * command timed out and hence SCMD_STATE_COMPLETE has been set.
+ */
+ virtscsi_poll_requests(vscsi);
+
out:
mempool_free(cmd, virtscsi_cmd_pool);
return ret;
@@ -618,10 +652,10 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
return FAILED;
memset(cmd, 0, sizeof(*cmd));
- cmd->sc = sc;
cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
.type = VIRTIO_SCSI_T_TMF,
- .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET,
+ .subtype = cpu_to_virtio32(vscsi->vdev,
+ VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET),
.lun[0] = 1,
.lun[1] = sc->device->id,
.lun[2] = (sc->device->lun >> 8) | 0x40,
@@ -630,6 +664,41 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
return virtscsi_tmf(vscsi, cmd);
}
+static int virtscsi_device_alloc(struct scsi_device *sdevice)
+{
+ /*
+ * Passed through SCSI targets (e.g. with qemu's 'scsi-block')
+ * may have transfer limits which come from the host SCSI
+ * controller or something on the host side other than the
+ * target itself.
+ *
+ * To make this work properly, the hypervisor can adjust the
+ * target's VPD information to advertise these limits. But
+ * for that to work, the guest has to look at the VPD pages,
+ * which we won't do by default if it is an SPC-2 device, even
+ * if it does actually support it.
+ *
+ * So, set the blist to always try to read the VPD pages.
+ */
+ sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES;
+
+ return 0;
+}
+
+
+/**
+ * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
+ * @sdev: Virtscsi target whose queue depth to change
+ * @qdepth: New queue depth
+ */
+static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ struct Scsi_Host *shost = sdev->host;
+ int max_depth = shost->cmd_per_lun;
+
+ return scsi_change_queue_depth(sdev, min(max_depth, qdepth));
+}
+
static int virtscsi_abort(struct scsi_cmnd *sc)
{
struct virtio_scsi *vscsi = shost_priv(sc->device->host);
@@ -641,7 +710,6 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
return FAILED;
memset(cmd, 0, sizeof(*cmd));
- cmd->sc = sc;
cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
.type = VIRTIO_SCSI_T_TMF,
.subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
@@ -649,138 +717,108 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
.lun[1] = sc->device->id,
.lun[2] = (sc->device->lun >> 8) | 0x40,
.lun[3] = sc->device->lun & 0xff,
- .tag = (unsigned long)sc,
+ .tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc),
};
return virtscsi_tmf(vscsi, cmd);
}
-static int virtscsi_target_alloc(struct scsi_target *starget)
+static void virtscsi_map_queues(struct Scsi_Host *shost)
{
- struct virtio_scsi_target_state *tgt =
- kmalloc(sizeof(*tgt), GFP_KERNEL);
- if (!tgt)
- return -ENOMEM;
+ struct virtio_scsi *vscsi = shost_priv(shost);
+ int i, qoff;
- spin_lock_init(&tgt->tgt_lock);
- atomic_set(&tgt->reqs, 0);
- tgt->req_vq = NULL;
+ for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &shost->tag_set.map[i];
- starget->hostdata = tgt;
- return 0;
+ map->nr_queues = vscsi->io_queues[i];
+ map->queue_offset = qoff;
+ qoff += map->nr_queues;
+
+ if (map->nr_queues == 0)
+ continue;
+
+ /*
+ * Regular queues have interrupts and hence CPU affinity is
+ * defined by the core virtio code, but polling queues have
+ * no interrupts so we let the block layer assign CPU affinity.
+ */
+ if (i == HCTX_TYPE_POLL)
+ blk_mq_map_queues(map);
+ else
+ blk_mq_map_hw_queues(map, &vscsi->vdev->dev, 2);
+ }
}
-static void virtscsi_target_destroy(struct scsi_target *starget)
+static int virtscsi_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
{
- struct virtio_scsi_target_state *tgt = starget->hostdata;
- kfree(tgt);
+ struct virtio_scsi *vscsi = shost_priv(shost);
+ struct virtio_scsi_vq *virtscsi_vq = &vscsi->req_vqs[queue_num];
+ unsigned long flags;
+ unsigned int len;
+ int found = 0;
+ void *buf;
+
+ spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
+
+ while ((buf = virtqueue_get_buf(virtscsi_vq->vq, &len)) != NULL) {
+ virtscsi_complete_cmd(vscsi, buf);
+ found++;
+ }
+
+ spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
+
+ return found;
}
-static struct scsi_host_template virtscsi_host_template_single = {
- .module = THIS_MODULE,
- .name = "Virtio SCSI HBA",
- .proc_name = "virtio_scsi",
- .this_id = -1,
- .queuecommand = virtscsi_queuecommand_single,
- .eh_abort_handler = virtscsi_abort,
- .eh_device_reset_handler = virtscsi_device_reset,
+static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
+{
+ struct virtio_scsi *vscsi = shost_priv(shost);
- .can_queue = 1024,
- .dma_boundary = UINT_MAX,
- .use_clustering = ENABLE_CLUSTERING,
- .target_alloc = virtscsi_target_alloc,
- .target_destroy = virtscsi_target_destroy,
-};
+ virtscsi_kick_vq(&vscsi->req_vqs[hwq]);
+}
-static struct scsi_host_template virtscsi_host_template_multi = {
+/*
+ * The host guarantees to respond to each command, although I/O
+ * latencies might be higher than on bare metal. Reset the timer
+ * unconditionally to give the host a chance to perform EH.
+ */
+static enum scsi_timeout_action virtscsi_eh_timed_out(struct scsi_cmnd *scmnd)
+{
+ return SCSI_EH_RESET_TIMER;
+}
+
+static const struct scsi_host_template virtscsi_host_template = {
.module = THIS_MODULE,
.name = "Virtio SCSI HBA",
.proc_name = "virtio_scsi",
.this_id = -1,
- .queuecommand = virtscsi_queuecommand_multi,
+ .cmd_size = sizeof(struct virtio_scsi_cmd),
+ .queuecommand = virtscsi_queuecommand,
+ .mq_poll = virtscsi_mq_poll,
+ .commit_rqs = virtscsi_commit_rqs,
+ .change_queue_depth = virtscsi_change_queue_depth,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
+ .eh_timed_out = virtscsi_eh_timed_out,
+ .sdev_init = virtscsi_device_alloc,
- .can_queue = 1024,
.dma_boundary = UINT_MAX,
- .use_clustering = ENABLE_CLUSTERING,
- .target_alloc = virtscsi_target_alloc,
- .target_destroy = virtscsi_target_destroy,
+ .map_queues = virtscsi_map_queues,
+ .track_queue_depth = 1,
};
#define virtscsi_config_get(vdev, fld) \
({ \
- typeof(((struct virtio_scsi_config *)0)->fld) __val; \
- vdev->config->get(vdev, \
- offsetof(struct virtio_scsi_config, fld), \
- &__val, sizeof(__val)); \
+ __virtio_native_type(struct virtio_scsi_config, fld) __val; \
+ virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
__val; \
})
#define virtscsi_config_set(vdev, fld, val) \
- (void)({ \
- typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
- vdev->config->set(vdev, \
- offsetof(struct virtio_scsi_config, fld), \
- &__val, sizeof(__val)); \
- })
-
-static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
-{
- int i;
- int cpu;
-
- /* In multiqueue mode, when the number of cpu is equal
- * to the number of request queues, we let the qeueues
- * to be private to one cpu by setting the affinity hint
- * to eliminate the contention.
- */
- if ((vscsi->num_queues == 1 ||
- vscsi->num_queues != num_online_cpus()) && affinity) {
- if (vscsi->affinity_hint_set)
- affinity = false;
- else
- return;
- }
-
- if (affinity) {
- i = 0;
- for_each_online_cpu(cpu) {
- virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu);
- i++;
- }
-
- vscsi->affinity_hint_set = true;
- } else {
- for (i = 0; i < vscsi->num_queues - VIRTIO_SCSI_VQ_BASE; i++)
- virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
-
- vscsi->affinity_hint_set = false;
- }
-}
-
-static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
-{
- get_online_cpus();
- __virtscsi_set_affinity(vscsi, affinity);
- put_online_cpus();
-}
-
-static int virtscsi_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb);
- switch(action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- __virtscsi_set_affinity(vscsi, true);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
+ do { \
+ __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \
+ virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
+ } while(0)
static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
struct virtqueue *vq)
@@ -789,23 +827,10 @@ static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
virtscsi_vq->vq = vq;
}
-static void virtscsi_scan(struct virtio_device *vdev)
-{
- struct Scsi_Host *shost = (struct Scsi_Host *)vdev->priv;
-
- scsi_scan_host(shost);
-}
-
static void virtscsi_remove_vqs(struct virtio_device *vdev)
{
- struct Scsi_Host *sh = virtio_scsi_host(vdev);
- struct virtio_scsi *vscsi = shost_priv(sh);
-
- virtscsi_set_affinity(vscsi, false);
-
/* Stop all the virtqueues. */
- vdev->config->reset(vdev);
-
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
}
@@ -814,32 +839,46 @@ static int virtscsi_init(struct virtio_device *vdev,
{
int err;
u32 i;
- u32 num_vqs;
- vq_callback_t **callbacks;
- const char **names;
+ u32 num_vqs, num_poll_vqs, num_req_vqs;
+ struct virtqueue_info *vqs_info;
struct virtqueue **vqs;
+ struct irq_affinity desc = { .pre_vectors = 2 };
- num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
- vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL);
- callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL);
- names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL);
+ num_req_vqs = vscsi->num_queues;
+ num_vqs = num_req_vqs + VIRTIO_SCSI_VQ_BASE;
+ vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL);
+ vqs_info = kcalloc(num_vqs, sizeof(*vqs_info), GFP_KERNEL);
- if (!callbacks || !vqs || !names) {
+ if (!vqs || !vqs_info) {
err = -ENOMEM;
goto out;
}
- callbacks[0] = virtscsi_ctrl_done;
- callbacks[1] = virtscsi_event_done;
- names[0] = "control";
- names[1] = "event";
- for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
- callbacks[i] = virtscsi_req_done;
- names[i] = "request";
+ num_poll_vqs = min_t(unsigned int, virtscsi_poll_queues,
+ num_req_vqs - 1);
+ vscsi->io_queues[HCTX_TYPE_DEFAULT] = num_req_vqs - num_poll_vqs;
+ vscsi->io_queues[HCTX_TYPE_READ] = 0;
+ vscsi->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
+
+ dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
+ vscsi->io_queues[HCTX_TYPE_DEFAULT],
+ vscsi->io_queues[HCTX_TYPE_READ],
+ vscsi->io_queues[HCTX_TYPE_POLL]);
+
+ vqs_info[0].callback = virtscsi_ctrl_done;
+ vqs_info[0].name = "control";
+ vqs_info[1].callback = virtscsi_event_done;
+ vqs_info[1].name = "event";
+ for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs - num_poll_vqs; i++) {
+ vqs_info[i].callback = virtscsi_req_done;
+ vqs_info[i].name = "request";
}
+ for (; i < num_vqs; i++)
+ vqs_info[i].name = "request_poll";
+
/* Discover virtqueues and write information to configuration. */
- err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
+ err = virtio_find_vqs(vdev, num_vqs, vqs, vqs_info, &desc);
if (err)
goto out;
@@ -849,19 +888,13 @@ static int virtscsi_init(struct virtio_device *vdev,
virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
vqs[i]);
- virtscsi_set_affinity(vscsi, true);
-
virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
- if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
- virtscsi_kick_event_all(vscsi);
-
err = 0;
out:
- kfree(names);
- kfree(callbacks);
+ kfree(vqs_info);
kfree(vqs);
if (err)
virtscsi_remove_vqs(vdev);
@@ -876,25 +909,28 @@ static int virtscsi_probe(struct virtio_device *vdev)
u32 sg_elems, num_targets;
u32 cmd_per_lun;
u32 num_queues;
- struct scsi_host_template *hostt;
+
+ if (!vdev->config->get) {
+ dev_err(&vdev->dev, "%s failure: config access disabled\n",
+ __func__);
+ return -EINVAL;
+ }
/* We need to know how many queues before we allocate. */
num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
+ num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
+ num_queues = blk_mq_num_possible_queues(num_queues);
num_targets = virtscsi_config_get(vdev, max_target) + 1;
- if (num_queues == 1)
- hostt = &virtscsi_host_template_single;
- else
- hostt = &virtscsi_host_template_multi;
-
- shost = scsi_host_alloc(hostt,
- sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues);
+ shost = scsi_host_alloc(&virtscsi_host_template,
+ struct_size(vscsi, req_vqs, num_queues));
if (!shost)
return -ENOMEM;
sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
shost->sg_tablesize = sg_elems;
+ shost->nr_maps = 1;
vscsi = shost_priv(shost);
vscsi->vdev = vdev;
vscsi->num_queues = num_queues;
@@ -904,12 +940,10 @@ static int virtscsi_probe(struct virtio_device *vdev)
if (err)
goto virtscsi_init_failed;
- vscsi->nb.notifier_call = &virtscsi_cpu_callback;
- err = register_hotcpu_notifier(&vscsi->nb);
- if (err) {
- pr_err("registering cpu notifier failed\n");
- goto scsi_add_host_failed;
- }
+ if (vscsi->io_queues[HCTX_TYPE_POLL])
+ shost->nr_maps = HCTX_TYPE_POLL + 1;
+
+ shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq);
cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
@@ -922,13 +956,31 @@ static int virtscsi_probe(struct virtio_device *vdev)
shost->max_id = num_targets;
shost->max_channel = 0;
shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
+ shost->nr_hw_queues = num_queues;
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
+ int host_prot;
+
+ host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
+
+ scsi_host_set_prot(shost, host_prot);
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+ }
+#endif
+
err = scsi_add_host(shost, &vdev->dev);
if (err)
goto scsi_add_host_failed;
- /*
- * scsi_scan_host() happens in virtscsi_scan() via virtio_driver->scan()
- * after VIRTIO_CONFIG_S_DRIVER_OK has been set..
- */
+
+ virtio_device_ready(vdev);
+
+ if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
+ virtscsi_kick_event_all(vscsi);
+
+ scsi_scan_host(shost);
return 0;
scsi_add_host_failed:
@@ -947,14 +999,11 @@ static void virtscsi_remove(struct virtio_device *vdev)
virtscsi_cancel_event_work(vscsi);
scsi_remove_host(shost);
-
- unregister_hotcpu_notifier(&vscsi->nb);
-
virtscsi_remove_vqs(vdev);
scsi_host_put(shost);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtscsi_freeze(struct virtio_device *vdev)
{
virtscsi_remove_vqs(vdev);
@@ -965,8 +1014,18 @@ static int virtscsi_restore(struct virtio_device *vdev)
{
struct Scsi_Host *sh = virtio_scsi_host(vdev);
struct virtio_scsi *vscsi = shost_priv(sh);
+ int err;
+
+ err = virtscsi_init(vdev, vscsi);
+ if (err)
+ return err;
+
+ virtio_device_ready(vdev);
+
+ if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
+ virtscsi_kick_event_all(vscsi);
- return virtscsi_init(vdev, vscsi);
+ return err;
}
#endif
@@ -978,24 +1037,25 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_SCSI_F_HOTPLUG,
VIRTIO_SCSI_F_CHANGE,
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ VIRTIO_SCSI_F_T10_PI,
+#endif
};
static struct virtio_driver virtio_scsi_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtscsi_probe,
- .scan = virtscsi_scan,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.freeze = virtscsi_freeze,
.restore = virtscsi_restore,
#endif
.remove = virtscsi_remove,
};
-static int __init init(void)
+static int __init virtio_scsi_init(void)
{
int ret = -ENOMEM;
@@ -1020,25 +1080,21 @@ static int __init init(void)
return 0;
error:
- if (virtscsi_cmd_pool) {
- mempool_destroy(virtscsi_cmd_pool);
- virtscsi_cmd_pool = NULL;
- }
- if (virtscsi_cmd_cache) {
- kmem_cache_destroy(virtscsi_cmd_cache);
- virtscsi_cmd_cache = NULL;
- }
+ mempool_destroy(virtscsi_cmd_pool);
+ virtscsi_cmd_pool = NULL;
+ kmem_cache_destroy(virtscsi_cmd_cache);
+ virtscsi_cmd_cache = NULL;
return ret;
}
-static void __exit fini(void)
+static void __exit virtio_scsi_fini(void)
{
unregister_virtio_driver(&virtio_scsi_driver);
mempool_destroy(virtscsi_cmd_pool);
kmem_cache_destroy(virtscsi_cmd_cache);
}
-module_init(init);
-module_exit(fini);
+module_init(virtio_scsi_init);
+module_exit(virtio_scsi_fini);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio SCSI HBA driver");