diff options
Diffstat (limited to 'drivers/vhost/scsi.c')
| -rw-r--r-- | drivers/vhost/scsi.c | 1193 |
1 files changed, 839 insertions, 354 deletions
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index dca6346d75b3..f43c1fe9fad9 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -25,7 +25,9 @@ #include <linux/fs.h> #include <linux/vmalloc.h> #include <linux/miscdevice.h> -#include <asm/unaligned.h> +#include <linux/blk_types.h> +#include <linux/bio.h> +#include <linux/unaligned.h> #include <scsi/scsi_common.h> #include <scsi/scsi_proto.h> #include <target/target_core_base.h> @@ -43,6 +45,55 @@ #define VHOST_SCSI_PREALLOC_SGLS 2048 #define VHOST_SCSI_PREALLOC_UPAGES 2048 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048 +/* + * For the legacy descriptor case we allocate an iov per byte in the + * virtio_scsi_cmd_resp struct. + */ +#define VHOST_SCSI_MAX_RESP_IOVS sizeof(struct virtio_scsi_cmd_resp) + +static unsigned int vhost_scsi_inline_sg_cnt = VHOST_SCSI_PREALLOC_SGLS; + +#ifdef CONFIG_ARCH_NO_SG_CHAIN +static int vhost_scsi_set_inline_sg_cnt(const char *buf, + const struct kernel_param *kp) +{ + pr_err("Setting inline_sg_cnt is not supported.\n"); + return -EOPNOTSUPP; +} +#else +static int vhost_scsi_set_inline_sg_cnt(const char *buf, + const struct kernel_param *kp) +{ + unsigned int cnt; + int ret; + + ret = kstrtouint(buf, 10, &cnt); + if (ret) + return ret; + + if (cnt > VHOST_SCSI_PREALLOC_SGLS) { + pr_err("Max inline_sg_cnt is %u\n", VHOST_SCSI_PREALLOC_SGLS); + return -EINVAL; + } + + vhost_scsi_inline_sg_cnt = cnt; + return 0; +} +#endif + +static int vhost_scsi_get_inline_sg_cnt(char *buf, + const struct kernel_param *kp) +{ + return sprintf(buf, "%u\n", vhost_scsi_inline_sg_cnt); +} + +static const struct kernel_param_ops vhost_scsi_inline_sg_cnt_op = { + .get = vhost_scsi_get_inline_sg_cnt, + .set = vhost_scsi_set_inline_sg_cnt, +}; + +module_param_cb(inline_sg_cnt, &vhost_scsi_inline_sg_cnt_op, NULL, 0644); +MODULE_PARM_DESC(inline_sg_cnt, "Set the number of scatterlist entries to pre-allocate. The default is 2048."); /* Max number of requests before requeueing the job. * Using this limit prevents one virtqueue from starving others with @@ -60,39 +111,33 @@ struct vhost_scsi_inflight { struct vhost_scsi_cmd { /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ int tvc_vq_desc; - /* virtio-scsi initiator task attribute */ - int tvc_task_attr; - /* virtio-scsi response incoming iovecs */ - int tvc_in_iovs; - /* virtio-scsi initiator data direction */ - enum dma_data_direction tvc_data_direction; - /* Expected data transfer length from virtio-scsi header */ - u32 tvc_exp_data_len; - /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */ - u64 tvc_tag; /* The number of scatterlists associated with this cmd */ u32 tvc_sgl_count; u32 tvc_prot_sgl_count; - /* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */ - u32 tvc_lun; - /* Pointer to the SGL formatted memory from virtio-scsi */ - struct scatterlist *tvc_sgl; - struct scatterlist *tvc_prot_sgl; - struct page **tvc_upages; - /* Pointer to response header iovec */ + u32 copied_iov:1; + const void *read_iov; + struct iov_iter *read_iter; + struct scatterlist *sgl; + struct sg_table table; + struct scatterlist *prot_sgl; + struct sg_table prot_table; + /* Fast path response header iovec used when only one vec is needed */ struct iovec tvc_resp_iov; - /* Pointer to vhost_scsi for our device */ - struct vhost_scsi *tvc_vhost; + /* Number of iovs for response */ + unsigned int tvc_resp_iovs_cnt; + /* Pointer to response header iovecs if more than one is needed */ + struct iovec *tvc_resp_iovs; /* Pointer to vhost_virtqueue for the cmd */ struct vhost_virtqueue *tvc_vq; - /* Pointer to vhost nexus memory */ - struct vhost_scsi_nexus *tvc_nexus; /* The TCM I/O descriptor that is accessed via container_of() */ struct se_cmd tvc_se_cmd; - /* Copy of the incoming SCSI command descriptor block (CDB) */ - unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE]; /* Sense buffer that will be mapped into outgoing status */ unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; + /* + * Dirty write descriptors of this command. + */ + struct vhost_log *tvc_log; + unsigned int tvc_log_num; /* Completed commands list, serviced from vhost worker thread */ struct llist_node tvc_completion_list; /* Used to track inflight cmd */ @@ -107,7 +152,7 @@ struct vhost_scsi_nexus { struct vhost_scsi_tpg { /* Vhost port target portal group tag for TCM */ u16 tport_tpgt; - /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ + /* Used to track number of TPG Port/Lun Links wrt to explicit I_T Nexus shutdown */ int tv_tpg_port_count; /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ int tv_tpg_vhost_count; @@ -125,7 +170,6 @@ struct vhost_scsi_tpg { struct se_portal_group se_tpg; /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */ struct vhost_scsi *vhost_scsi; - struct list_head tmf_queue; }; struct vhost_scsi_tport { @@ -153,11 +197,14 @@ enum { }; /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */ -enum { - VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | - (1ULL << VIRTIO_SCSI_F_T10_PI) +static const int vhost_scsi_bits[] = { + VHOST_FEATURES, + VIRTIO_SCSI_F_HOTPLUG, + VIRTIO_SCSI_F_T10_PI }; +#define VHOST_SCSI_FEATURES VHOST_FEATURES_U64(vhost_scsi_bits, 0) + #define VHOST_SCSI_MAX_TARGET 256 #define VHOST_SCSI_MAX_IO_VQ 1024 #define VHOST_SCSI_MAX_EVENT 128 @@ -168,6 +215,7 @@ MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi d struct vhost_scsi_virtqueue { struct vhost_virtqueue vq; + struct vhost_scsi *vs; /* * Reference counting for inflight reqs, used for flush operation. At * each time, one reference tracks new commands submitted, while we @@ -182,6 +230,10 @@ struct vhost_scsi_virtqueue { struct vhost_scsi_cmd *scsi_cmds; struct sbitmap scsi_tags; int max_cmds; + struct page **upages; + + struct vhost_work completion_work; + struct llist_head completion_list; }; struct vhost_scsi { @@ -191,25 +243,22 @@ struct vhost_scsi { struct vhost_dev dev; struct vhost_scsi_virtqueue *vqs; - unsigned long *compl_bitmap; struct vhost_scsi_inflight **old_inflight; - struct vhost_work vs_completion_work; /* cmd completion work item */ - struct llist_head vs_completion_list; /* cmd completion queue */ - struct vhost_work vs_event_work; /* evt injection work item */ struct llist_head vs_event_list; /* evt injection queue */ bool vs_events_missed; /* any missed events, protected by vq->mutex */ int vs_events_nr; /* num of pending events, protected by vq->mutex */ + + unsigned int inline_sg_cnt; }; struct vhost_scsi_tmf { struct vhost_work vwork; - struct vhost_scsi_tpg *tpg; + struct work_struct flush_work; struct vhost_scsi *vhost; struct vhost_scsi_virtqueue *svq; - struct list_head queue_entry; struct se_cmd se_cmd; u8 scsi_resp; @@ -217,6 +266,12 @@ struct vhost_scsi_tmf { struct iovec resp_iov; int in_iovs; int vq_desc; + + /* + * Dirty write descriptors of this command. + */ + struct vhost_log *tmf_log; + unsigned int tmf_log_num; }; /* @@ -232,7 +287,10 @@ struct vhost_scsi_ctx { struct iov_iter out_iter; }; -/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */ +/* + * Global mutex to protect vhost_scsi TPG list for vhost IOCTLs and LIO + * configfs management operations. + */ static DEFINE_MUTEX(vhost_scsi_mutex); static LIST_HEAD(vhost_scsi_list); @@ -256,12 +314,12 @@ static void vhost_scsi_init_inflight(struct vhost_scsi *vs, mutex_lock(&vq->mutex); - /* store old infight */ + /* store old inflight */ idx = vs->vqs[i].inflight_idx; if (old_inflight) old_inflight[i] = &vs->vqs[i].inflights[idx]; - /* setup new infight */ + /* setup new inflight */ vs->vqs[i].inflight_idx = idx ^ 1; new_inflight = &vs->vqs[i].inflights[idx ^ 1]; kref_init(&new_inflight->kref); @@ -294,11 +352,6 @@ static int vhost_scsi_check_true(struct se_portal_group *se_tpg) return 1; } -static int vhost_scsi_check_false(struct se_portal_group *se_tpg) -{ - return 0; -} - static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg) { struct vhost_scsi_tpg *tpg = container_of(se_tpg, @@ -323,9 +376,43 @@ static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg) return tpg->tv_fabric_prot_type; } -static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg) +static int vhost_scsi_copy_cmd_log(struct vhost_virtqueue *vq, + struct vhost_scsi_cmd *cmd, + struct vhost_log *log, + unsigned int log_num) { - return 1; + if (!cmd->tvc_log) + cmd->tvc_log = kmalloc_array(vq->dev->iov_limit, + sizeof(*cmd->tvc_log), + GFP_KERNEL); + + if (unlikely(!cmd->tvc_log)) { + vq_err(vq, "Failed to alloc tvc_log\n"); + return -ENOMEM; + } + + memcpy(cmd->tvc_log, log, sizeof(*cmd->tvc_log) * log_num); + cmd->tvc_log_num = log_num; + + return 0; +} + +static void vhost_scsi_log_write(struct vhost_virtqueue *vq, + struct vhost_log *log, + unsigned int log_num) +{ + if (likely(!vhost_has_feature(vq, VHOST_F_LOG_ALL))) + return; + + if (likely(!log_num || !log)) + return; + + /* + * vhost-scsi doesn't support VIRTIO_F_ACCESS_PLATFORM. + * No requirement for vq->iotlb case. + */ + WARN_ON_ONCE(unlikely(vq->iotlb)); + vhost_log_write(vq, log, log_num, U64_MAX, NULL, 0); } static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd) @@ -334,55 +421,83 @@ static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd) struct vhost_scsi_cmd, tvc_se_cmd); struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); + struct vhost_scsi *vs = svq->vs; struct vhost_scsi_inflight *inflight = tv_cmd->inflight; + struct scatterlist *sg; + struct page *page; int i; if (tv_cmd->tvc_sgl_count) { - for (i = 0; i < tv_cmd->tvc_sgl_count; i++) - put_page(sg_page(&tv_cmd->tvc_sgl[i])); + for_each_sgtable_sg(&tv_cmd->table, sg, i) { + page = sg_page(sg); + if (!page) + continue; + + if (tv_cmd->copied_iov) + __free_page(page); + else + put_page(page); + } + kfree(tv_cmd->read_iter); + kfree(tv_cmd->read_iov); + sg_free_table_chained(&tv_cmd->table, vs->inline_sg_cnt); } if (tv_cmd->tvc_prot_sgl_count) { - for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++) - put_page(sg_page(&tv_cmd->tvc_prot_sgl[i])); + for_each_sgtable_sg(&tv_cmd->prot_table, sg, i) { + page = sg_page(sg); + if (page) + put_page(page); + } + sg_free_table_chained(&tv_cmd->prot_table, vs->inline_sg_cnt); } + if (tv_cmd->tvc_resp_iovs != &tv_cmd->tvc_resp_iov) + kfree(tv_cmd->tvc_resp_iovs); sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag); vhost_scsi_put_inflight(inflight); } static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf) { - struct vhost_scsi_tpg *tpg = tmf->tpg; struct vhost_scsi_inflight *inflight = tmf->inflight; - mutex_lock(&tpg->tv_tpg_mutex); - list_add_tail(&tpg->tmf_queue, &tmf->queue_entry); - mutex_unlock(&tpg->tv_tpg_mutex); + /* + * tmf->tmf_log is default NULL unless VHOST_F_LOG_ALL is set. + */ + kfree(tmf->tmf_log); + kfree(tmf); vhost_scsi_put_inflight(inflight); } +static void vhost_scsi_drop_cmds(struct vhost_scsi_virtqueue *svq) +{ + struct vhost_scsi_cmd *cmd, *t; + struct llist_node *llnode; + + llnode = llist_del_all(&svq->completion_list); + llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) + vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd); +} + static void vhost_scsi_release_cmd(struct se_cmd *se_cmd) { if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf, se_cmd); - vhost_work_queue(&tmf->vhost->dev, &tmf->vwork); + schedule_work(&tmf->flush_work); } else { struct vhost_scsi_cmd *cmd = container_of(se_cmd, struct vhost_scsi_cmd, tvc_se_cmd); - struct vhost_scsi *vs = cmd->tvc_vhost; + struct vhost_scsi_virtqueue *svq = container_of(cmd->tvc_vq, + struct vhost_scsi_virtqueue, vq); - llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list); - vhost_work_queue(&vs->dev, &vs->vs_completion_work); + llist_add(&cmd->tvc_completion_list, &svq->completion_list); + if (!vhost_vq_work_queue(&svq->vq, &svq->completion_work)) + vhost_scsi_drop_cmds(svq); } } -static u32 vhost_scsi_sess_get_index(struct se_session *se_sess) -{ - return 0; -} - static int vhost_scsi_write_pending(struct se_cmd *se_cmd) { /* Go ahead and process the write immediately */ @@ -390,16 +505,6 @@ static int vhost_scsi_write_pending(struct se_cmd *se_cmd) return 0; } -static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl) -{ - return; -} - -static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd) -{ - return 0; -} - static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd) { transport_generic_free_cmd(se_cmd, 0); @@ -469,6 +574,8 @@ vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; struct virtio_scsi_event *event = &evt->event; struct virtio_scsi_event __user *eventp; + struct vhost_log *vq_log; + unsigned int log_num; unsigned out, in; int head, ret; @@ -479,9 +586,19 @@ vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) again: vhost_disable_notify(&vs->dev, vq); + + vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ? + vq->log : NULL; + + /* + * Reset 'log_num' since vhost_get_vq_desc() may reset it only + * after certain condition checks. + */ + log_num = 0; + head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), &out, &in, - NULL, NULL); + vq_log, &log_num); if (head < 0) { vs->vs_events_missed = true; return; @@ -511,12 +628,12 @@ again: vhost_add_used_and_signal(&vs->dev, vq, head, 0); else vq_err(vq, "Faulted on vhost_scsi_send_event\n"); + + vhost_scsi_log_write(vq, vq_log, log_num); } -static void vhost_scsi_evt_work(struct vhost_work *work) +static void vhost_scsi_complete_events(struct vhost_scsi *vs, bool drop) { - struct vhost_scsi *vs = container_of(work, struct vhost_scsi, - vs_event_work); struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; struct vhost_scsi_evt *evt, *t; struct llist_node *llnode; @@ -524,12 +641,45 @@ static void vhost_scsi_evt_work(struct vhost_work *work) mutex_lock(&vq->mutex); llnode = llist_del_all(&vs->vs_event_list); llist_for_each_entry_safe(evt, t, llnode, list) { - vhost_scsi_do_evt_work(vs, evt); + if (!drop) + vhost_scsi_do_evt_work(vs, evt); vhost_scsi_free_evt(vs, evt); } mutex_unlock(&vq->mutex); } +static void vhost_scsi_evt_work(struct vhost_work *work) +{ + struct vhost_scsi *vs = container_of(work, struct vhost_scsi, + vs_event_work); + vhost_scsi_complete_events(vs, false); +} + +static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd) +{ + struct iov_iter *iter = cmd->read_iter; + struct scatterlist *sg; + struct page *page; + size_t len; + int i; + + for_each_sgtable_sg(&cmd->table, sg, i) { + page = sg_page(sg); + if (!page) + continue; + + len = sg->length; + + if (copy_page_to_iter(page, 0, len, iter) != len) { + pr_err("Could not copy data while handling misaligned cmd. Error %zu\n", + len); + return -1; + } + } + + return 0; +} + /* Fill in status and signal that we are done processing this command * * This is scheduled in the vhost work queue so we are called with the owner @@ -537,100 +687,113 @@ static void vhost_scsi_evt_work(struct vhost_work *work) */ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) { - struct vhost_scsi *vs = container_of(work, struct vhost_scsi, - vs_completion_work); + struct vhost_scsi_virtqueue *svq = container_of(work, + struct vhost_scsi_virtqueue, completion_work); struct virtio_scsi_cmd_resp v_rsp; struct vhost_scsi_cmd *cmd, *t; struct llist_node *llnode; struct se_cmd *se_cmd; struct iov_iter iov_iter; - int ret, vq; + bool signal = false; + int ret; + + llnode = llist_del_all(&svq->completion_list); + + mutex_lock(&svq->vq.mutex); - bitmap_zero(vs->compl_bitmap, vs->dev.nvqs); - llnode = llist_del_all(&vs->vs_completion_list); llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) { se_cmd = &cmd->tvc_se_cmd; pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__, cmd, se_cmd->residual_count, se_cmd->scsi_status); - memset(&v_rsp, 0, sizeof(v_rsp)); - v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count); - /* TODO is status_qualifier field needed? */ - v_rsp.status = se_cmd->scsi_status; - v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq, - se_cmd->scsi_sense_length); - memcpy(v_rsp.sense, cmd->tvc_sense_buf, - se_cmd->scsi_sense_length); - - iov_iter_init(&iov_iter, ITER_DEST, &cmd->tvc_resp_iov, - cmd->tvc_in_iovs, sizeof(v_rsp)); + + if (cmd->read_iter && vhost_scsi_copy_sgl_to_iov(cmd)) { + v_rsp.response = VIRTIO_SCSI_S_BAD_TARGET; + } else { + v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, + se_cmd->residual_count); + /* TODO is status_qualifier field needed? */ + v_rsp.status = se_cmd->scsi_status; + v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq, + se_cmd->scsi_sense_length); + memcpy(v_rsp.sense, cmd->tvc_sense_buf, + se_cmd->scsi_sense_length); + } + + iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iovs, + cmd->tvc_resp_iovs_cnt, sizeof(v_rsp)); ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter); if (likely(ret == sizeof(v_rsp))) { - struct vhost_scsi_virtqueue *q; + signal = true; + vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0); - q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); - vq = q - vs->vqs; - __set_bit(vq, vs->compl_bitmap); } else pr_err("Faulted on virtio_scsi_cmd_resp\n"); + vhost_scsi_log_write(cmd->tvc_vq, cmd->tvc_log, + cmd->tvc_log_num); + vhost_scsi_release_cmd_res(se_cmd); } - vq = -1; - while ((vq = find_next_bit(vs->compl_bitmap, vs->dev.nvqs, vq + 1)) - < vs->dev.nvqs) - vhost_signal(&vs->dev, &vs->vqs[vq].vq); + mutex_unlock(&svq->vq.mutex); + + if (signal) + vhost_signal(&svq->vs->dev, &svq->vq); } static struct vhost_scsi_cmd * -vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, - unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr, - u32 exp_data_len, int data_direction) +vhost_scsi_get_cmd(struct vhost_virtqueue *vq, u64 scsi_tag) { struct vhost_scsi_virtqueue *svq = container_of(vq, struct vhost_scsi_virtqueue, vq); struct vhost_scsi_cmd *cmd; - struct vhost_scsi_nexus *tv_nexus; - struct scatterlist *sg, *prot_sg; - struct page **pages; + struct scatterlist *sgl, *prot_sgl; + struct vhost_log *log; int tag; - tv_nexus = tpg->tpg_nexus; - if (!tv_nexus) { - pr_err("Unable to locate active struct vhost_scsi_nexus\n"); - return ERR_PTR(-EIO); - } - tag = sbitmap_get(&svq->scsi_tags); if (tag < 0) { - pr_err("Unable to obtain tag for vhost_scsi_cmd\n"); + pr_warn_once("Guest sent too many cmds. Returning TASK_SET_FULL.\n"); return ERR_PTR(-ENOMEM); } cmd = &svq->scsi_cmds[tag]; - sg = cmd->tvc_sgl; - prot_sg = cmd->tvc_prot_sgl; - pages = cmd->tvc_upages; + sgl = cmd->sgl; + prot_sgl = cmd->prot_sgl; + log = cmd->tvc_log; memset(cmd, 0, sizeof(*cmd)); - cmd->tvc_sgl = sg; - cmd->tvc_prot_sgl = prot_sg; - cmd->tvc_upages = pages; + cmd->sgl = sgl; + cmd->prot_sgl = prot_sgl; + cmd->tvc_log = log; cmd->tvc_se_cmd.map_tag = tag; - cmd->tvc_tag = scsi_tag; - cmd->tvc_lun = lun; - cmd->tvc_task_attr = task_attr; - cmd->tvc_exp_data_len = exp_data_len; - cmd->tvc_data_direction = data_direction; - cmd->tvc_nexus = tv_nexus; cmd->inflight = vhost_scsi_get_inflight(vq); - memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE); - return cmd; } +static void vhost_scsi_revert_map_iov_to_sgl(struct iov_iter *iter, + struct scatterlist *curr, + struct scatterlist *end) +{ + size_t revert_bytes = 0; + struct page *page; + + while (curr != end) { + page = sg_page(curr); + + if (page) { + put_page(page); + revert_bytes += curr->length; + } + /* Clear so we can re-use it for the copy path */ + sg_set_page(curr, NULL, 0, 0); + curr = sg_next(curr); + } + iov_iter_revert(iter, revert_bytes); +} + /* * Map a user memory range into a scatterlist * @@ -639,14 +802,17 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, static int vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, - struct scatterlist *sgl, - bool write) + struct sg_table *sg_table, + struct scatterlist **sgl, + bool is_prot) { - struct page **pages = cmd->tvc_upages; - struct scatterlist *sg = sgl; + struct vhost_scsi_virtqueue *svq = container_of(cmd->tvc_vq, + struct vhost_scsi_virtqueue, vq); + struct page **pages = svq->upages; + struct scatterlist *sg = *sgl; ssize_t bytes; size_t offset; - unsigned int npages = 0; + unsigned int n, npages = 0; bytes = iov_iter_get_pages2(iter, pages, LONG_MAX, VHOST_SCSI_PREALLOC_UPAGES, &offset); @@ -655,12 +821,48 @@ vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd, return bytes < 0 ? bytes : -EFAULT; while (bytes) { - unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes); - sg_set_page(sg++, pages[npages++], n, offset); + n = min_t(unsigned int, PAGE_SIZE - offset, bytes); + /* + * The block layer requires bios/requests to be a multiple of + * 512 bytes, but Windows can send us vecs that are misaligned. + * This can result in bios and later requests with misaligned + * sizes if we have to break up a cmd/scatterlist into multiple + * bios. + * + * We currently only break up a command into multiple bios if + * we hit the vec/seg limit, so check if our sgl_count is + * greater than the max and if a vec in the cmd has a + * misaligned offset/size. + */ + if (!is_prot && + (offset & (SECTOR_SIZE - 1) || n & (SECTOR_SIZE - 1)) && + cmd->tvc_sgl_count > BIO_MAX_VECS) { + WARN_ONCE(true, + "vhost-scsi detected misaligned IO. Performance may be degraded."); + goto revert_iter_get_pages; + } + + sg_set_page(sg, pages[npages++], n, offset); + sg = sg_next(sg); bytes -= n; offset = 0; } + + *sgl = sg; return npages; + +revert_iter_get_pages: + vhost_scsi_revert_map_iov_to_sgl(iter, *sgl, sg); + + iov_iter_revert(iter, bytes); + while (bytes) { + n = min_t(unsigned int, PAGE_SIZE, bytes); + + put_page(pages[npages++]); + bytes -= n; + } + + return -EINVAL; } static int @@ -668,7 +870,7 @@ vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls) { int sgl_count = 0; - if (!iter || !iter->iov) { + if (!iter || !iter_iov(iter)) { pr_err("%s: iter->iov is NULL, but expected bytes: %zu" " present\n", __func__, bytes); return -EINVAL; @@ -684,51 +886,111 @@ vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls) } static int -vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write, - struct iov_iter *iter, - struct scatterlist *sg, int sg_count) +vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, + struct sg_table *sg_table, int sg_count, + int data_dir) +{ + size_t len = iov_iter_count(iter); + unsigned int nbytes = 0; + struct scatterlist *sg; + struct page *page; + int i, ret; + + if (data_dir == DMA_FROM_DEVICE) { + cmd->read_iter = kzalloc(sizeof(*cmd->read_iter), GFP_KERNEL); + if (!cmd->read_iter) + return -ENOMEM; + + cmd->read_iov = dup_iter(cmd->read_iter, iter, GFP_KERNEL); + if (!cmd->read_iov) { + ret = -ENOMEM; + goto free_iter; + } + } + + for_each_sgtable_sg(sg_table, sg, i) { + page = alloc_page(GFP_KERNEL); + if (!page) { + ret = -ENOMEM; + goto err; + } + + nbytes = min_t(unsigned int, PAGE_SIZE, len); + sg_set_page(sg, page, nbytes, 0); + + if (data_dir == DMA_TO_DEVICE && + copy_page_from_iter(page, 0, nbytes, iter) != nbytes) { + ret = -EFAULT; + goto err; + } + + len -= nbytes; + } + + cmd->copied_iov = 1; + return 0; + +err: + pr_err("Could not read %u bytes while handling misaligned cmd\n", + nbytes); + + for_each_sgtable_sg(sg_table, sg, i) { + page = sg_page(sg); + if (page) + __free_page(page); + } + kfree(cmd->read_iov); +free_iter: + kfree(cmd->read_iter); + return ret; +} + +static int +vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, + struct sg_table *sg_table, int sg_count, bool is_prot) { - struct scatterlist *p = sg; + struct scatterlist *sg = sg_table->sgl; int ret; while (iov_iter_count(iter)) { - ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write); + ret = vhost_scsi_map_to_sgl(cmd, iter, sg_table, &sg, is_prot); if (ret < 0) { - while (p < sg) { - struct page *page = sg_page(p++); - if (page) - put_page(page); - } + vhost_scsi_revert_map_iov_to_sgl(iter, sg_table->sgl, + sg); return ret; } - sg += ret; } + return 0; } static int -vhost_scsi_mapal(struct vhost_scsi_cmd *cmd, +vhost_scsi_mapal(struct vhost_scsi *vs, struct vhost_scsi_cmd *cmd, size_t prot_bytes, struct iov_iter *prot_iter, - size_t data_bytes, struct iov_iter *data_iter) + size_t data_bytes, struct iov_iter *data_iter, int data_dir) { int sgl_count, ret; - bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE); if (prot_bytes) { sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes, VHOST_SCSI_PREALLOC_PROT_SGLS); - if (sgl_count < 0) - return sgl_count; + cmd->prot_table.sgl = cmd->prot_sgl; + ret = sg_alloc_table_chained(&cmd->prot_table, sgl_count, + cmd->prot_table.sgl, + vs->inline_sg_cnt); + if (ret) + return ret; - sg_init_table(cmd->tvc_prot_sgl, sgl_count); cmd->tvc_prot_sgl_count = sgl_count; pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__, - cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count); + cmd->prot_table.sgl, cmd->tvc_prot_sgl_count); - ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter, - cmd->tvc_prot_sgl, - cmd->tvc_prot_sgl_count); + ret = vhost_scsi_map_iov_to_sgl(cmd, prot_iter, + &cmd->prot_table, + cmd->tvc_prot_sgl_count, true); if (ret < 0) { + sg_free_table_chained(&cmd->prot_table, + vs->inline_sg_cnt); cmd->tvc_prot_sgl_count = 0; return ret; } @@ -738,14 +1000,23 @@ vhost_scsi_mapal(struct vhost_scsi_cmd *cmd, if (sgl_count < 0) return sgl_count; - sg_init_table(cmd->tvc_sgl, sgl_count); + cmd->table.sgl = cmd->sgl; + ret = sg_alloc_table_chained(&cmd->table, sgl_count, cmd->table.sgl, + vs->inline_sg_cnt); + if (ret) + return ret; + cmd->tvc_sgl_count = sgl_count; pr_debug("%s data_sg %p data_sgl_count %u\n", __func__, - cmd->tvc_sgl, cmd->tvc_sgl_count); + cmd->table.sgl, cmd->tvc_sgl_count); - ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter, - cmd->tvc_sgl, cmd->tvc_sgl_count); + ret = vhost_scsi_map_iov_to_sgl(cmd, data_iter, &cmd->table, + cmd->tvc_sgl_count, false); + if (ret == -EINVAL) + ret = vhost_scsi_copy_iov_to_sgl(cmd, data_iter, &cmd->table, + cmd->tvc_sgl_count, data_dir); if (ret < 0) { + sg_free_table_chained(&cmd->table, vs->inline_sg_cnt); cmd->tvc_sgl_count = 0; return ret; } @@ -769,67 +1040,117 @@ static int vhost_scsi_to_tcm_attr(int attr) return TCM_SIMPLE_TAG; } -static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd) +static void vhost_scsi_target_queue_cmd(struct vhost_scsi_nexus *nexus, + struct vhost_scsi_cmd *cmd, + unsigned char *cdb, u16 lun, + int task_attr, int data_dir, + u32 exp_data_len) { struct se_cmd *se_cmd = &cmd->tvc_se_cmd; - struct vhost_scsi_nexus *tv_nexus; struct scatterlist *sg_ptr, *sg_prot_ptr = NULL; /* FIXME: BIDI operation */ if (cmd->tvc_sgl_count) { - sg_ptr = cmd->tvc_sgl; + sg_ptr = cmd->table.sgl; if (cmd->tvc_prot_sgl_count) - sg_prot_ptr = cmd->tvc_prot_sgl; + sg_prot_ptr = cmd->prot_table.sgl; else se_cmd->prot_pto = true; } else { sg_ptr = NULL; } - tv_nexus = cmd->tvc_nexus; se_cmd->tag = 0; - target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0], - cmd->tvc_lun, cmd->tvc_exp_data_len, - vhost_scsi_to_tcm_attr(cmd->tvc_task_attr), - cmd->tvc_data_direction, TARGET_SCF_ACK_KREF); + target_init_cmd(se_cmd, nexus->tvn_se_sess, &cmd->tvc_sense_buf[0], + lun, exp_data_len, vhost_scsi_to_tcm_attr(task_attr), + data_dir, TARGET_SCF_ACK_KREF); - if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr, + if (target_submit_prep(se_cmd, cdb, sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count, GFP_KERNEL)) return; - target_queue_submission(se_cmd); + target_submit(se_cmd); } static void -vhost_scsi_send_bad_target(struct vhost_scsi *vs, - struct vhost_virtqueue *vq, - int head, unsigned out) +vhost_scsi_send_status(struct vhost_scsi *vs, struct vhost_virtqueue *vq, + struct vhost_scsi_ctx *vc, u8 status) { - struct virtio_scsi_cmd_resp __user *resp; struct virtio_scsi_cmd_resp rsp; + struct iov_iter iov_iter; int ret; memset(&rsp, 0, sizeof(rsp)); - rsp.response = VIRTIO_SCSI_S_BAD_TARGET; - resp = vq->iov[out].iov_base; - ret = __copy_to_user(resp, &rsp, sizeof(rsp)); - if (!ret) - vhost_add_used_and_signal(&vs->dev, vq, head, 0); + rsp.status = status; + + iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, + sizeof(rsp)); + + ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter); + + if (likely(ret == sizeof(rsp))) + vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); else pr_err("Faulted on virtio_scsi_cmd_resp\n"); } +#define TYPE_IO_CMD 0 +#define TYPE_CTRL_TMF 1 +#define TYPE_CTRL_AN 2 + +static void +vhost_scsi_send_bad_target(struct vhost_scsi *vs, + struct vhost_virtqueue *vq, + struct vhost_scsi_ctx *vc, int type) +{ + union { + struct virtio_scsi_cmd_resp cmd; + struct virtio_scsi_ctrl_tmf_resp tmf; + struct virtio_scsi_ctrl_an_resp an; + } rsp; + struct iov_iter iov_iter; + size_t rsp_size; + int ret; + + memset(&rsp, 0, sizeof(rsp)); + + if (type == TYPE_IO_CMD) { + rsp_size = sizeof(struct virtio_scsi_cmd_resp); + rsp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; + } else if (type == TYPE_CTRL_TMF) { + rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp); + rsp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET; + } else { + rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp); + rsp.an.response = VIRTIO_SCSI_S_BAD_TARGET; + } + + iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, + rsp_size); + + ret = copy_to_iter(&rsp, rsp_size, &iov_iter); + + if (likely(ret == rsp_size)) + vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); + else + pr_err("Faulted on virtio scsi type=%d\n", type); +} + static int vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq, - struct vhost_scsi_ctx *vc) + struct vhost_scsi_ctx *vc, + struct vhost_log *log, unsigned int *log_num) { int ret = -ENXIO; + if (likely(log_num)) + *log_num = 0; + vc->head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), &vc->out, &vc->in, - NULL, NULL); + log, log_num); pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", vc->head, vc->out, vc->in); @@ -902,21 +1223,59 @@ vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc, /* virtio-scsi spec requires byte 0 of the lun to be 1 */ vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp); } else { - struct vhost_scsi_tpg **vs_tpg, *tpg; + struct vhost_scsi_tpg **vs_tpg, *tpg = NULL; + + if (vc->target) { + /* validated at handler entry */ + vs_tpg = vhost_vq_get_backend(vq); + tpg = READ_ONCE(vs_tpg[*vc->target]); + if (unlikely(!tpg)) + goto out; + } + + if (tpgp) + *tpgp = tpg; + ret = 0; + } +out: + return ret; +} - vs_tpg = vhost_vq_get_backend(vq); /* validated at handler entry */ +static int +vhost_scsi_setup_resp_iovs(struct vhost_scsi_cmd *cmd, struct iovec *in_iovs, + unsigned int in_iovs_cnt) +{ + int i, cnt; - tpg = READ_ONCE(vs_tpg[*vc->target]); - if (unlikely(!tpg)) { - vq_err(vq, "Target 0x%x does not exist\n", *vc->target); - } else { - if (tpgp) - *tpgp = tpg; - ret = 0; - } + if (!in_iovs_cnt) + return 0; + /* + * Initiators normally just put the virtio_scsi_cmd_resp in the first + * iov, but just in case they wedged in some data with it we check for + * greater than or equal to the response struct. + */ + if (in_iovs[0].iov_len >= sizeof(struct virtio_scsi_cmd_resp)) { + cmd->tvc_resp_iovs = &cmd->tvc_resp_iov; + cmd->tvc_resp_iovs_cnt = 1; + } else { + /* + * Legacy descriptor layouts didn't specify that we must put + * the entire response in one iov. Worst case we have a + * iov per byte. + */ + cnt = min(VHOST_SCSI_MAX_RESP_IOVS, in_iovs_cnt); + cmd->tvc_resp_iovs = kcalloc(cnt, sizeof(struct iovec), + GFP_KERNEL); + if (!cmd->tvc_resp_iovs) + return -ENOMEM; + + cmd->tvc_resp_iovs_cnt = cnt; } - return ret; + for (i = 0; i < cmd->tvc_resp_iovs_cnt; i++) + cmd->tvc_resp_iovs[i] = in_iovs[i]; + + return 0; } static u16 vhost_buf_to_lun(u8 *lun_buf) @@ -930,6 +1289,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) struct vhost_scsi_tpg **vs_tpg, *tpg; struct virtio_scsi_cmd_req v_req; struct virtio_scsi_cmd_req_pi v_req_pi; + struct vhost_scsi_nexus *nexus; struct vhost_scsi_ctx vc; struct vhost_scsi_cmd *cmd; struct iov_iter in_iter, prot_iter, data_iter; @@ -939,7 +1299,9 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) u16 lun; u8 task_attr; bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI); - void *cdb; + u8 *cdb; + struct vhost_log *vq_log; + unsigned int log_num; mutex_lock(&vq->mutex); /* @@ -955,8 +1317,11 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) vhost_disable_notify(&vs->dev, vq); + vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ? + vq->log : NULL; + do { - ret = vhost_scsi_get_desc(vs, vq, &vc); + ret = vhost_scsi_get_desc(vs, vq, &vc, vq_log, &log_num); if (ret) goto err; @@ -1048,7 +1413,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) /* * Set prot_iter to data_iter and truncate it to * prot_bytes, and advance data_iter past any - * preceeding prot_bytes that may be present. + * preceding prot_bytes that may be present. * * Also fix up the exp_data_len to reflect only the * actual data payload length. @@ -1082,28 +1447,47 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE); goto err; } - cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr, - exp_data_len + prot_bytes, - data_direction); + + nexus = tpg->tpg_nexus; + if (!nexus) { + vq_err(vq, "Unable to locate active struct vhost_scsi_nexus\n"); + ret = -EIO; + goto err; + } + + cmd = vhost_scsi_get_cmd(vq, tag); if (IS_ERR(cmd)) { - vq_err(vq, "vhost_scsi_get_cmd failed %ld\n", - PTR_ERR(cmd)); + ret = PTR_ERR(cmd); + vq_err(vq, "vhost_scsi_get_tag failed %d\n", ret); goto err; } - cmd->tvc_vhost = vs; cmd->tvc_vq = vq; - cmd->tvc_resp_iov = vq->iov[vc.out]; - cmd->tvc_in_iovs = vc.in; + + ret = vhost_scsi_setup_resp_iovs(cmd, &vq->iov[vc.out], vc.in); + if (ret) { + vq_err(vq, "Failed to alloc recv iovs\n"); + vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd); + goto err; + } + + if (unlikely(vq_log && log_num)) { + ret = vhost_scsi_copy_cmd_log(vq, cmd, vq_log, log_num); + if (unlikely(ret)) { + vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd); + goto err; + } + } pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", - cmd->tvc_cdb[0], cmd->tvc_lun); + cdb[0], lun); pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:" " %d\n", cmd, exp_data_len, prot_bytes, data_direction); if (data_direction != DMA_NONE) { - if (unlikely(vhost_scsi_mapal(cmd, prot_bytes, - &prot_iter, exp_data_len, - &data_iter))) { + ret = vhost_scsi_mapal(vs, cmd, prot_bytes, &prot_iter, + exp_data_len, &data_iter, + data_direction); + if (unlikely(ret)) { vq_err(vq, "Failed to map iov to sgl\n"); vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd); goto err; @@ -1115,7 +1499,9 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) * vhost_scsi_queue_data_in() and vhost_scsi_queue_status() */ cmd->tvc_vq_desc = vc.head; - vhost_scsi_target_queue_cmd(cmd); + vhost_scsi_target_queue_cmd(nexus, cmd, cdb, lun, task_attr, + data_direction, + exp_data_len + prot_bytes); ret = 0; err: /* @@ -1123,11 +1509,18 @@ err: * EINVAL: Invalid response buffer, drop the request * EIO: Respond with bad target * EAGAIN: Pending request + * ENOMEM: Could not allocate resources for request */ if (ret == -ENXIO) break; - else if (ret == -EIO) - vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); + else if (ret == -EIO) { + vhost_scsi_send_bad_target(vs, vq, &vc, TYPE_IO_CMD); + vhost_scsi_log_write(vq, vq_log, log_num); + } else if (ret == -ENOMEM) { + vhost_scsi_send_status(vs, vq, &vc, + SAM_STAT_TASK_SET_FULL); + vhost_scsi_log_write(vq, vq_log, log_num); + } } while (likely(!vhost_exceeds_weight(vq, ++c, 0))); out: mutex_unlock(&vq->mutex); @@ -1166,16 +1559,36 @@ static void vhost_scsi_tmf_resp_work(struct vhost_work *work) else resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED; + mutex_lock(&tmf->svq->vq.mutex); vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs, tmf->vq_desc, &tmf->resp_iov, resp_code); + vhost_scsi_log_write(&tmf->svq->vq, tmf->tmf_log, + tmf->tmf_log_num); + mutex_unlock(&tmf->svq->vq.mutex); + vhost_scsi_release_tmf_res(tmf); } +static void vhost_scsi_tmf_flush_work(struct work_struct *work) +{ + struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf, + flush_work); + struct vhost_virtqueue *vq = &tmf->svq->vq; + /* + * Make sure we have sent responses for other commands before we + * send our response. + */ + vhost_dev_flush(vq->dev); + if (!vhost_vq_work_queue(vq, &tmf->vwork)) + vhost_scsi_release_tmf_res(tmf); +} + static void vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg, struct vhost_virtqueue *vq, struct virtio_scsi_ctrl_tmf_req *vtmf, - struct vhost_scsi_ctx *vc) + struct vhost_scsi_ctx *vc, + struct vhost_log *log, unsigned int log_num) { struct vhost_scsi_virtqueue *svq = container_of(vq, struct vhost_scsi_virtqueue, vq); @@ -1190,19 +1603,12 @@ vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg, goto send_reject; } - mutex_lock(&tpg->tv_tpg_mutex); - if (list_empty(&tpg->tmf_queue)) { - pr_err("Missing reserve TMF. Could not handle LUN RESET.\n"); - mutex_unlock(&tpg->tv_tpg_mutex); + tmf = kzalloc(sizeof(*tmf), GFP_KERNEL); + if (!tmf) goto send_reject; - } - tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf, - queue_entry); - list_del_init(&tmf->queue_entry); - mutex_unlock(&tpg->tv_tpg_mutex); - - tmf->tpg = tpg; + INIT_WORK(&tmf->flush_work, vhost_scsi_tmf_flush_work); + vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work); tmf->vhost = vs; tmf->svq = svq; tmf->resp_iov = vq->iov[vc->out]; @@ -1210,6 +1616,19 @@ vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg, tmf->in_iovs = vc->in; tmf->inflight = vhost_scsi_get_inflight(vq); + if (unlikely(log && log_num)) { + tmf->tmf_log = kmalloc_array(log_num, sizeof(*tmf->tmf_log), + GFP_KERNEL); + if (tmf->tmf_log) { + memcpy(tmf->tmf_log, log, sizeof(*tmf->tmf_log) * log_num); + tmf->tmf_log_num = log_num; + } else { + pr_err("vhost_scsi tmf log allocation error\n"); + vhost_scsi_release_tmf_res(tmf); + goto send_reject; + } + } + if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL, vhost_buf_to_lun(vtmf->lun), NULL, TMR_LUN_RESET, GFP_KERNEL, 0, @@ -1223,6 +1642,7 @@ vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg, send_reject: vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out], VIRTIO_SCSI_S_FUNCTION_REJECTED); + vhost_scsi_log_write(vq, log, log_num); } static void @@ -1259,6 +1679,8 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) struct vhost_scsi_ctx vc; size_t typ_size; int ret, c = 0; + struct vhost_log *vq_log; + unsigned int log_num; mutex_lock(&vq->mutex); /* @@ -1272,8 +1694,11 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) vhost_disable_notify(&vs->dev, vq); + vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ? + vq->log : NULL; + do { - ret = vhost_scsi_get_desc(vs, vq, &vc); + ret = vhost_scsi_get_desc(vs, vq, &vc, vq_log, &log_num); if (ret) goto err; @@ -1337,9 +1762,12 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) goto err; if (v_req.type == VIRTIO_SCSI_T_TMF) - vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc); - else + vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc, + vq_log, log_num); + else { vhost_scsi_send_an_resp(vs, vq, &vc); + vhost_scsi_log_write(vq, vq_log, log_num); + } err: /* * ENXIO: No more requests, or read error, wait for next kick @@ -1349,8 +1777,13 @@ err: */ if (ret == -ENXIO) break; - else if (ret == -EIO) - vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); + else if (ret == -EIO) { + vhost_scsi_send_bad_target(vs, vq, &vc, + v_req.type == VIRTIO_SCSI_T_TMF ? + TYPE_CTRL_TMF : + TYPE_CTRL_AN); + vhost_scsi_log_write(vq, vq_log, log_num); + } } while (likely(!vhost_exceeds_weight(vq, ++c, 0))); out: mutex_unlock(&vq->mutex); @@ -1367,11 +1800,9 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) } static void -vhost_scsi_send_evt(struct vhost_scsi *vs, - struct vhost_scsi_tpg *tpg, - struct se_lun *lun, - u32 event, - u32 reason) +vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq, + struct vhost_scsi_tpg *tpg, struct se_lun *lun, + u32 event, u32 reason) { struct vhost_scsi_evt *evt; @@ -1393,7 +1824,8 @@ vhost_scsi_send_evt(struct vhost_scsi *vs, } llist_add(&evt->list, &vs->vs_event_list); - vhost_work_queue(&vs->dev, &vs->vs_event_work); + if (!vhost_vq_work_queue(vq, &vs->vs_event_work)) + vhost_scsi_complete_events(vs, true); } static void vhost_scsi_evt_handle_kick(struct vhost_work *work) @@ -1407,7 +1839,8 @@ static void vhost_scsi_evt_handle_kick(struct vhost_work *work) goto out; if (vs->vs_events_missed) - vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); + vhost_scsi_send_evt(vs, vq, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, + 0); out: mutex_unlock(&vq->mutex); } @@ -1445,6 +1878,24 @@ static void vhost_scsi_flush(struct vhost_scsi *vs) wait_for_completion(&vs->old_inflight[i]->comp); } +static void vhost_scsi_destroy_vq_log(struct vhost_virtqueue *vq) +{ + struct vhost_scsi_virtqueue *svq = container_of(vq, + struct vhost_scsi_virtqueue, vq); + struct vhost_scsi_cmd *tv_cmd; + unsigned int i; + + if (!svq->scsi_cmds) + return; + + for (i = 0; i < svq->max_cmds; i++) { + tv_cmd = &svq->scsi_cmds[i]; + kfree(tv_cmd->tvc_log); + tv_cmd->tvc_log = NULL; + tv_cmd->tvc_log_num = 0; + } +} + static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq) { struct vhost_scsi_virtqueue *svq = container_of(vq, @@ -1458,12 +1909,13 @@ static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq) for (i = 0; i < svq->max_cmds; i++) { tv_cmd = &svq->scsi_cmds[i]; - kfree(tv_cmd->tvc_sgl); - kfree(tv_cmd->tvc_prot_sgl); - kfree(tv_cmd->tvc_upages); + kfree(tv_cmd->sgl); + kfree(tv_cmd->prot_sgl); } sbitmap_free(&svq->scsi_tags); + kfree(svq->upages); + vhost_scsi_destroy_vq_log(vq); kfree(svq->scsi_cmds); svq->scsi_cmds = NULL; } @@ -1472,6 +1924,7 @@ static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds) { struct vhost_scsi_virtqueue *svq = container_of(vq, struct vhost_scsi_virtqueue, vq); + struct vhost_scsi *vs = svq->vs; struct vhost_scsi_cmd *tv_cmd; unsigned int i; @@ -1489,31 +1942,33 @@ static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds) return -ENOMEM; } + svq->upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES, sizeof(struct page *), + GFP_KERNEL); + if (!svq->upages) + goto out; + for (i = 0; i < max_cmds; i++) { tv_cmd = &svq->scsi_cmds[i]; - tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS, - sizeof(struct scatterlist), - GFP_KERNEL); - if (!tv_cmd->tvc_sgl) { - pr_err("Unable to allocate tv_cmd->tvc_sgl\n"); - goto out; - } - - tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES, - sizeof(struct page *), - GFP_KERNEL); - if (!tv_cmd->tvc_upages) { - pr_err("Unable to allocate tv_cmd->tvc_upages\n"); - goto out; + if (vs->inline_sg_cnt) { + tv_cmd->sgl = kcalloc(vs->inline_sg_cnt, + sizeof(struct scatterlist), + GFP_KERNEL); + if (!tv_cmd->sgl) { + pr_err("Unable to allocate tv_cmd->sgl\n"); + goto out; + } } - tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS, - sizeof(struct scatterlist), - GFP_KERNEL); - if (!tv_cmd->tvc_prot_sgl) { - pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); - goto out; + if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI) && + vs->inline_sg_cnt) { + tv_cmd->prot_sgl = kcalloc(vs->inline_sg_cnt, + sizeof(struct scatterlist), + GFP_KERNEL); + if (!tv_cmd->prot_sgl) { + pr_err("Unable to allocate tv_cmd->prot_sgl\n"); + goto out; + } } } return 0; @@ -1527,7 +1982,7 @@ out: * vhost_scsi_tpg with an active struct vhost_scsi_nexus * * The lock nesting rule is: - * vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex + * vs->dev.mutex -> vhost_scsi_mutex -> tpg->tv_tpg_mutex -> vq->mutex */ static int vhost_scsi_set_endpoint(struct vhost_scsi *vs, @@ -1541,7 +1996,6 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, int index, ret, i, len; bool match = false; - mutex_lock(&vhost_scsi_mutex); mutex_lock(&vs->dev.mutex); /* Verify that ring has been setup correctly. */ @@ -1553,15 +2007,21 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, } } + if (vs->vs_tpg) { + pr_err("vhost-scsi endpoint already set for %s.\n", + vs->vs_vhost_wwpn); + ret = -EEXIST; + goto out; + } + len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; vs_tpg = kzalloc(len, GFP_KERNEL); if (!vs_tpg) { ret = -ENOMEM; goto out; } - if (vs->vs_tpg) - memcpy(vs_tpg, vs->vs_tpg, len); + mutex_lock(&vhost_scsi_mutex); list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) { mutex_lock(&tpg->tv_tpg_mutex); if (!tpg->tpg_nexus) { @@ -1575,11 +2035,6 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, tv_tport = tpg->tport; if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { - if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) { - mutex_unlock(&tpg->tv_tpg_mutex); - ret = -EEXIST; - goto undepend; - } /* * In order to ensure individual vhost-scsi configfs * groups cannot be removed while in use by vhost ioctl, @@ -1591,6 +2046,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, if (ret) { pr_warn("target_depend_item() failed: %d\n", ret); mutex_unlock(&tpg->tv_tpg_mutex); + mutex_unlock(&vhost_scsi_mutex); goto undepend; } tpg->tv_tpg_vhost_count++; @@ -1600,6 +2056,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, } mutex_unlock(&tpg->tv_tpg_mutex); } + mutex_unlock(&vhost_scsi_mutex); if (match) { memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, @@ -1624,15 +2081,15 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, } ret = 0; } else { - ret = -EEXIST; + ret = -ENODEV; + goto free_tpg; } /* - * Act as synchronize_rcu to make sure access to - * old vs->vs_tpg is finished. + * Act as synchronize_rcu to make sure requests after this point + * see a fully setup device. */ vhost_scsi_flush(vs); - kfree(vs->vs_tpg); vs->vs_tpg = vs_tpg; goto out; @@ -1645,14 +2102,17 @@ undepend: for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { tpg = vs_tpg[i]; if (tpg) { + mutex_lock(&tpg->tv_tpg_mutex); + tpg->vhost_scsi = NULL; tpg->tv_tpg_vhost_count--; + mutex_unlock(&tpg->tv_tpg_mutex); target_undepend_item(&tpg->se_tpg.tpg_group.cg_item); } } +free_tpg: kfree(vs_tpg); out: mutex_unlock(&vs->dev.mutex); - mutex_unlock(&vhost_scsi_mutex); return ret; } @@ -1668,7 +2128,6 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, int index, ret, i; u8 target; - mutex_lock(&vhost_scsi_mutex); mutex_lock(&vs->dev.mutex); /* Verify that ring has been setup correctly. */ for (index = 0; index < vs->dev.nvqs; ++index) { @@ -1689,11 +2148,10 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, if (!tpg) continue; - mutex_lock(&tpg->tv_tpg_mutex); tv_tport = tpg->tport; if (!tv_tport) { ret = -ENODEV; - goto err_tpg; + goto err_dev; } if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) { @@ -1702,35 +2160,51 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, tv_tport->tport_name, tpg->tport_tpgt, t->vhost_wwpn, t->vhost_tpgt); ret = -EINVAL; - goto err_tpg; + goto err_dev; } + match = true; + } + if (!match) + goto free_vs_tpg; + + /* Prevent new cmds from starting and accessing the tpgs/sessions */ + for (i = 0; i < vs->dev.nvqs; i++) { + vq = &vs->vqs[i].vq; + mutex_lock(&vq->mutex); + vhost_vq_set_backend(vq, NULL); + mutex_unlock(&vq->mutex); + } + /* Make sure cmds are not running before tearing them down. */ + vhost_scsi_flush(vs); + + for (i = 0; i < vs->dev.nvqs; i++) { + vq = &vs->vqs[i].vq; + vhost_scsi_destroy_vq_cmds(vq); + } + + /* + * We can now release our hold on the tpg and sessions and userspace + * can free them after this point. + */ + for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { + target = i; + tpg = vs->vs_tpg[target]; + if (!tpg) + continue; + + mutex_lock(&tpg->tv_tpg_mutex); + tpg->tv_tpg_vhost_count--; tpg->vhost_scsi = NULL; vs->vs_tpg[target] = NULL; - match = true; + mutex_unlock(&tpg->tv_tpg_mutex); - /* - * Release se_tpg->tpg_group.cg_item configfs dependency now - * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. - */ + se_tpg = &tpg->se_tpg; target_undepend_item(&se_tpg->tpg_group.cg_item); } - if (match) { - for (i = 0; i < vs->dev.nvqs; i++) { - vq = &vs->vqs[i].vq; - mutex_lock(&vq->mutex); - vhost_vq_set_backend(vq, NULL); - mutex_unlock(&vq->mutex); - } - /* Make sure cmds are not running before tearing them down. */ - vhost_scsi_flush(vs); - for (i = 0; i < vs->dev.nvqs; i++) { - vq = &vs->vqs[i].vq; - vhost_scsi_destroy_vq_cmds(vq); - } - } +free_vs_tpg: /* * Act as synchronize_rcu to make sure access to * old vs->vs_tpg is finished. @@ -1738,22 +2212,20 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, vhost_scsi_flush(vs); kfree(vs->vs_tpg); vs->vs_tpg = NULL; + memset(vs->vs_vhost_wwpn, 0, sizeof(vs->vs_vhost_wwpn)); WARN_ON(vs->vs_events_nr); mutex_unlock(&vs->dev.mutex); - mutex_unlock(&vhost_scsi_mutex); return 0; -err_tpg: - mutex_unlock(&tpg->tv_tpg_mutex); err_dev: mutex_unlock(&vs->dev.mutex); - mutex_unlock(&vhost_scsi_mutex); return ret; } static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) { struct vhost_virtqueue *vq; + bool is_log, was_log; int i; if (features & ~VHOST_SCSI_FEATURES) @@ -1766,18 +2238,46 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) return -EFAULT; } + if (!vs->dev.nvqs) + goto out; + + is_log = features & (1 << VHOST_F_LOG_ALL); + /* + * All VQs should have same feature. + */ + was_log = vhost_has_feature(&vs->vqs[0].vq, VHOST_F_LOG_ALL); + for (i = 0; i < vs->dev.nvqs; i++) { vq = &vs->vqs[i].vq; mutex_lock(&vq->mutex); vq->acked_features = features; mutex_unlock(&vq->mutex); } + + /* + * If VHOST_F_LOG_ALL is removed, free tvc_log after + * vq->acked_features is committed. + */ + if (!is_log && was_log) { + for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) { + if (!vs->vqs[i].scsi_cmds) + continue; + + vq = &vs->vqs[i].vq; + mutex_lock(&vq->mutex); + vhost_scsi_destroy_vq_log(vq); + mutex_unlock(&vq->mutex); + } + } + +out: mutex_unlock(&vs->dev.mutex); return 0; } static int vhost_scsi_open(struct inode *inode, struct file *f) { + struct vhost_scsi_virtqueue *svq; struct vhost_scsi *vs; struct vhost_virtqueue **vqs; int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs; @@ -1785,6 +2285,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) vs = kvzalloc(sizeof(*vs), GFP_KERNEL); if (!vs) goto err_vs; + vs->inline_sg_cnt = vhost_scsi_inline_sg_cnt; if (nvqs > VHOST_SCSI_MAX_IO_VQ) { pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs, @@ -1796,10 +2297,6 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) } nvqs += VHOST_SCSI_VQ_IO; - vs->compl_bitmap = bitmap_alloc(nvqs, GFP_KERNEL); - if (!vs->compl_bitmap) - goto err_compl_bitmap; - vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight), GFP_KERNEL | __GFP_ZERO); if (!vs->old_inflight) @@ -1814,7 +2311,6 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) if (!vqs) goto err_local_vqs; - vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work); vs->vs_events_nr = 0; @@ -1825,8 +2321,14 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick; vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick; for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) { - vqs[i] = &vs->vqs[i].vq; - vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; + svq = &vs->vqs[i]; + + vqs[i] = &svq->vq; + svq->vs = vs; + init_llist_head(&svq->completion_list); + vhost_work_init(&svq->completion_work, + vhost_scsi_complete_cmd_work); + svq->vq.handle_kick = vhost_scsi_handle_kick; } vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV, VHOST_SCSI_WEIGHT, 0, true, NULL); @@ -1841,8 +2343,6 @@ err_local_vqs: err_vqs: kfree(vs->old_inflight); err_inflight: - bitmap_free(vs->compl_bitmap); -err_compl_bitmap: kvfree(vs); err_vs: return r; @@ -1862,7 +2362,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f) kfree(vs->dev.vqs); kfree(vs->vqs); kfree(vs->old_inflight); - bitmap_free(vs->compl_bitmap); kvfree(vs); return 0; } @@ -1924,6 +2423,14 @@ vhost_scsi_ioctl(struct file *f, if (copy_from_user(&features, featurep, sizeof features)) return -EFAULT; return vhost_scsi_set_features(vs, features); + case VHOST_NEW_WORKER: + case VHOST_FREE_WORKER: + case VHOST_ATTACH_VRING_WORKER: + case VHOST_GET_VRING_WORKER: + mutex_lock(&vs->dev.mutex); + r = vhost_worker_ioctl(&vs->dev, ioctl, argp); + mutex_unlock(&vs->dev.mutex); + return r; default: mutex_lock(&vs->dev.mutex); r = vhost_dev_ioctl(&vs->dev, ioctl, argp); @@ -1988,8 +2495,6 @@ vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg, if (!vs) return; - mutex_lock(&vs->dev.mutex); - if (plug) reason = VIRTIO_SCSI_EVT_RESET_RESCAN; else @@ -1997,11 +2502,18 @@ vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg, vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; mutex_lock(&vq->mutex); + /* + * We can't queue events if the backend has been cleared, because + * we could end up queueing an event after the flush. + */ + if (!vhost_vq_get_backend(vq)) + goto unlock; + if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG)) - vhost_scsi_send_evt(vs, tpg, lun, + vhost_scsi_send_evt(vs, vq, tpg, lun, VIRTIO_SCSI_T_TRANSPORT_RESET, reason); +unlock: mutex_unlock(&vq->mutex); - mutex_unlock(&vs->dev.mutex); } static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun) @@ -2019,24 +2531,11 @@ static int vhost_scsi_port_link(struct se_portal_group *se_tpg, { struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); - struct vhost_scsi_tmf *tmf; - - tmf = kzalloc(sizeof(*tmf), GFP_KERNEL); - if (!tmf) - return -ENOMEM; - INIT_LIST_HEAD(&tmf->queue_entry); - vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work); - - mutex_lock(&vhost_scsi_mutex); mutex_lock(&tpg->tv_tpg_mutex); tpg->tv_tpg_port_count++; - list_add_tail(&tmf->queue_entry, &tpg->tmf_queue); - mutex_unlock(&tpg->tv_tpg_mutex); - vhost_scsi_hotplug(tpg, lun); - - mutex_unlock(&vhost_scsi_mutex); + mutex_unlock(&tpg->tv_tpg_mutex); return 0; } @@ -2046,21 +2545,11 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg, { struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); - struct vhost_scsi_tmf *tmf; - - mutex_lock(&vhost_scsi_mutex); mutex_lock(&tpg->tv_tpg_mutex); tpg->tv_tpg_port_count--; - tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf, - queue_entry); - list_del(&tmf->queue_entry); - kfree(tmf); - mutex_unlock(&tpg->tv_tpg_mutex); - vhost_scsi_hotunplug(tpg, lun); - - mutex_unlock(&vhost_scsi_mutex); + mutex_unlock(&tpg->tv_tpg_mutex); } static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store( @@ -2092,7 +2581,7 @@ static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show( struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg, se_tpg); - return sprintf(page, "%d\n", tpg->tv_fabric_prot_type); + return sysfs_emit(page, "%d\n", tpg->tv_fabric_prot_type); } CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type); @@ -2121,7 +2610,7 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, return -ENOMEM; } /* - * Since we are running in 'demo mode' this call with generate a + * Since we are running in 'demo mode' this call will generate a * struct se_node_acl for the vhost_scsi struct se_portal_group with * the SCSI Initiator port name of the passed configfs group 'name'. */ @@ -2202,7 +2691,7 @@ static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page) mutex_unlock(&tpg->tv_tpg_mutex); return -ENODEV; } - ret = snprintf(page, PAGE_SIZE, "%s\n", + ret = sysfs_emit(page, "%s\n", tv_nexus->tvn_se_sess->se_node_acl->initiatorname); mutex_unlock(&tpg->tv_tpg_mutex); @@ -2316,7 +2805,6 @@ vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name) } mutex_init(&tpg->tv_tpg_mutex); INIT_LIST_HEAD(&tpg->tv_tpg_list); - INIT_LIST_HEAD(&tpg->tmf_queue); tpg->tport = tport; tpg->tport_tpgt = tpgt; @@ -2399,7 +2887,7 @@ vhost_scsi_make_tport(struct target_fabric_configfs *tf, check_len: if (strlen(name) >= VHOST_SCSI_NAMELEN) { pr_err("Emulated %s Address: %s, exceeds" - " max: %d\n", name, vhost_scsi_dump_proto_id(tport), + " max: %d\n", vhost_scsi_dump_proto_id(tport), name, VHOST_SCSI_NAMELEN); kfree(tport); return ERR_PTR(-EINVAL); @@ -2427,8 +2915,8 @@ static void vhost_scsi_drop_tport(struct se_wwn *wwn) static ssize_t vhost_scsi_wwn_version_show(struct config_item *item, char *page) { - return sprintf(page, "TCM_VHOST fabric module %s on %s/%s" - "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname, + return sysfs_emit(page, "TCM_VHOST fabric module %s on %s/%s" + " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname, utsname()->machine); } @@ -2447,17 +2935,11 @@ static const struct target_core_fabric_ops vhost_scsi_ops = { .tpg_get_tag = vhost_scsi_get_tpgt, .tpg_check_demo_mode = vhost_scsi_check_true, .tpg_check_demo_mode_cache = vhost_scsi_check_true, - .tpg_check_demo_mode_write_protect = vhost_scsi_check_false, - .tpg_check_prod_mode_write_protect = vhost_scsi_check_false, .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only, - .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index, .release_cmd = vhost_scsi_release_cmd, .check_stop_free = vhost_scsi_check_stop_free, - .sess_get_index = vhost_scsi_sess_get_index, .sess_get_initiator_sid = NULL, .write_pending = vhost_scsi_write_pending, - .set_default_node_attributes = vhost_scsi_set_default_node_attrs, - .get_cmd_state = vhost_scsi_get_cmd_state, .queue_data_in = vhost_scsi_queue_data_in, .queue_status = vhost_scsi_queue_status, .queue_tm_rsp = vhost_scsi_queue_tm_rsp, @@ -2475,6 +2957,9 @@ static const struct target_core_fabric_ops vhost_scsi_ops = { .tfc_wwn_attrs = vhost_scsi_wwn_attrs, .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs, .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs, + + .default_submit_type = TARGET_QUEUE_SUBMIT, + .direct_submit_supp = 1, }; static int __init vhost_scsi_init(void) @@ -2499,13 +2984,13 @@ out_vhost_scsi_deregister: vhost_scsi_deregister(); out: return ret; -}; +} static void vhost_scsi_exit(void) { target_unregister_template(&vhost_scsi_ops); vhost_scsi_deregister(); -}; +} MODULE_DESCRIPTION("VHOST_SCSI series fabric driver"); MODULE_ALIAS("tcm_vhost"); |
