diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-07-03 15:38:26 -0700 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-07-03 15:38:26 -0700 | 
| commit | a8d70602b186f3c347e62c59a418be802b71886d (patch) | |
| tree | 48bf9b05703ff824a4dddfaaa773687c9fe6fd05 /drivers/vhost/scsi.c | |
| parent | e8069f5a8e3bdb5fdeeff895780529388592ee7a (diff) | |
| parent | 9e396a2f434f829fb3b98a24bb8db5429320589d (diff) | |
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio updates from Michael Tsirkin:
 - resume support in vdpa/solidrun
 - structure size optimizations in virtio_pci
 - new pds_vdpa driver
 - immediate initialization mechanism for vdpa/ifcvf
 - interrupt bypass for vdpa/mlx5
 - multiple worker support for vhost
 - viirtio net in Intel F2000X-PL support for vdpa/ifcvf
 - fixes, cleanups all over the place
* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (48 commits)
  vhost: Make parameter name match of vhost_get_vq_desc()
  vduse: fix NULL pointer dereference
  vhost: Allow worker switching while work is queueing
  vhost_scsi: add support for worker ioctls
  vhost: allow userspace to create workers
  vhost: replace single worker pointer with xarray
  vhost: add helper to parse userspace vring state/file
  vhost: remove vhost_work_queue
  vhost_scsi: flush IO vqs then send TMF rsp
  vhost_scsi: convert to vhost_vq_work_queue
  vhost_scsi: make SCSI cmd completion per vq
  vhost_sock: convert to vhost_vq_work_queue
  vhost: convert poll work to be vq based
  vhost: take worker or vq for flushing
  vhost: take worker or vq instead of dev for queueing
  vhost, vhost_net: add helper to check if vq has work
  vhost: add vhost_worker pointer to vhost_virtqueue
  vhost: dynamically allocate vhost_worker
  vhost: create worker at end of vhost_dev_set_owner
  virtio_bt: call scheduler when we free unused buffs
  ...
Diffstat (limited to 'drivers/vhost/scsi.c')
| -rw-r--r-- | drivers/vhost/scsi.c | 103 | 
1 files changed, 61 insertions, 42 deletions
| diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index bb10fa4bb4f6..c83f7f043470 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -167,6 +167,7 @@ MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi d  struct vhost_scsi_virtqueue {  	struct vhost_virtqueue vq; +	struct vhost_scsi *vs;  	/*  	 * Reference counting for inflight reqs, used for flush operation. At  	 * each time, one reference tracks new commands submitted, while we @@ -181,6 +182,9 @@ struct vhost_scsi_virtqueue {  	struct vhost_scsi_cmd *scsi_cmds;  	struct sbitmap scsi_tags;  	int max_cmds; + +	struct vhost_work completion_work; +	struct llist_head completion_list;  };  struct vhost_scsi { @@ -190,12 +194,8 @@ struct vhost_scsi {  	struct vhost_dev dev;  	struct vhost_scsi_virtqueue *vqs; -	unsigned long *compl_bitmap;  	struct vhost_scsi_inflight **old_inflight; -	struct vhost_work vs_completion_work; /* cmd completion work item */ -	struct llist_head vs_completion_list; /* cmd completion queue */ -  	struct vhost_work vs_event_work; /* evt injection work item */  	struct llist_head vs_event_list; /* evt injection queue */ @@ -353,15 +353,17 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)  	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {  		struct vhost_scsi_tmf *tmf = container_of(se_cmd,  					struct vhost_scsi_tmf, se_cmd); +		struct vhost_virtqueue *vq = &tmf->svq->vq; -		vhost_work_queue(&tmf->vhost->dev, &tmf->vwork); +		vhost_vq_work_queue(vq, &tmf->vwork);  	} else {  		struct vhost_scsi_cmd *cmd = container_of(se_cmd,  					struct vhost_scsi_cmd, tvc_se_cmd); -		struct vhost_scsi *vs = cmd->tvc_vhost; +		struct vhost_scsi_virtqueue *svq =  container_of(cmd->tvc_vq, +					struct vhost_scsi_virtqueue, vq); -		llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list); -		vhost_work_queue(&vs->dev, &vs->vs_completion_work); +		llist_add(&cmd->tvc_completion_list, &svq->completion_list); +		vhost_vq_work_queue(&svq->vq, &svq->completion_work);  	}  } @@ -509,17 +511,17 @@ static void vhost_scsi_evt_work(struct vhost_work *work)   */  static void vhost_scsi_complete_cmd_work(struct vhost_work *work)  { -	struct vhost_scsi *vs = container_of(work, struct vhost_scsi, -					vs_completion_work); +	struct vhost_scsi_virtqueue *svq = container_of(work, +				struct vhost_scsi_virtqueue, completion_work);  	struct virtio_scsi_cmd_resp v_rsp;  	struct vhost_scsi_cmd *cmd, *t;  	struct llist_node *llnode;  	struct se_cmd *se_cmd;  	struct iov_iter iov_iter; -	int ret, vq; +	bool signal = false; +	int ret; -	bitmap_zero(vs->compl_bitmap, vs->dev.nvqs); -	llnode = llist_del_all(&vs->vs_completion_list); +	llnode = llist_del_all(&svq->completion_list);  	llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {  		se_cmd = &cmd->tvc_se_cmd; @@ -539,21 +541,17 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)  			      cmd->tvc_in_iovs, sizeof(v_rsp));  		ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);  		if (likely(ret == sizeof(v_rsp))) { -			struct vhost_scsi_virtqueue *q; +			signal = true; +  			vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0); -			q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); -			vq = q - vs->vqs; -			__set_bit(vq, vs->compl_bitmap);  		} else  			pr_err("Faulted on virtio_scsi_cmd_resp\n");  		vhost_scsi_release_cmd_res(se_cmd);  	} -	vq = -1; -	while ((vq = find_next_bit(vs->compl_bitmap, vs->dev.nvqs, vq + 1)) -		< vs->dev.nvqs) -		vhost_signal(&vs->dev, &vs->vqs[vq].vq); +	if (signal) +		vhost_signal(&svq->vs->dev, &svq->vq);  }  static struct vhost_scsi_cmd * @@ -1135,12 +1133,27 @@ static void vhost_scsi_tmf_resp_work(struct vhost_work *work)  {  	struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,  						  vwork); -	int resp_code; +	struct vhost_virtqueue *ctl_vq, *vq; +	int resp_code, i; + +	if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE) { +		/* +		 * Flush IO vqs that don't share a worker with the ctl to make +		 * sure they have sent their responses before us. +		 */ +		ctl_vq = &tmf->vhost->vqs[VHOST_SCSI_VQ_CTL].vq; +		for (i = VHOST_SCSI_VQ_IO; i < tmf->vhost->dev.nvqs; i++) { +			vq = &tmf->vhost->vqs[i].vq; + +			if (vhost_vq_is_setup(vq) && +			    vq->worker != ctl_vq->worker) +				vhost_vq_flush(vq); +		} -	if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)  		resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED; -	else +	} else {  		resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED; +	}  	vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,  				 tmf->vq_desc, &tmf->resp_iov, resp_code); @@ -1335,11 +1348,9 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)  }  static void -vhost_scsi_send_evt(struct vhost_scsi *vs, -		   struct vhost_scsi_tpg *tpg, -		   struct se_lun *lun, -		   u32 event, -		   u32 reason) +vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq, +		    struct vhost_scsi_tpg *tpg, struct se_lun *lun, +		    u32 event, u32 reason)  {  	struct vhost_scsi_evt *evt; @@ -1361,7 +1372,7 @@ vhost_scsi_send_evt(struct vhost_scsi *vs,  	}  	llist_add(&evt->list, &vs->vs_event_list); -	vhost_work_queue(&vs->dev, &vs->vs_event_work); +	vhost_vq_work_queue(vq, &vs->vs_event_work);  }  static void vhost_scsi_evt_handle_kick(struct vhost_work *work) @@ -1375,7 +1386,8 @@ static void vhost_scsi_evt_handle_kick(struct vhost_work *work)  		goto out;  	if (vs->vs_events_missed) -		vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); +		vhost_scsi_send_evt(vs, vq, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, +				    0);  out:  	mutex_unlock(&vq->mutex);  } @@ -1770,6 +1782,7 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)  static int vhost_scsi_open(struct inode *inode, struct file *f)  { +	struct vhost_scsi_virtqueue *svq;  	struct vhost_scsi *vs;  	struct vhost_virtqueue **vqs;  	int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs; @@ -1788,10 +1801,6 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)  	}  	nvqs += VHOST_SCSI_VQ_IO; -	vs->compl_bitmap = bitmap_alloc(nvqs, GFP_KERNEL); -	if (!vs->compl_bitmap) -		goto err_compl_bitmap; -  	vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),  					 GFP_KERNEL | __GFP_ZERO);  	if (!vs->old_inflight) @@ -1806,7 +1815,6 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)  	if (!vqs)  		goto err_local_vqs; -	vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);  	vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);  	vs->vs_events_nr = 0; @@ -1817,8 +1825,14 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)  	vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;  	vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;  	for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) { -		vqs[i] = &vs->vqs[i].vq; -		vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; +		svq = &vs->vqs[i]; + +		vqs[i] = &svq->vq; +		svq->vs = vs; +		init_llist_head(&svq->completion_list); +		vhost_work_init(&svq->completion_work, +				vhost_scsi_complete_cmd_work); +		svq->vq.handle_kick = vhost_scsi_handle_kick;  	}  	vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,  		       VHOST_SCSI_WEIGHT, 0, true, NULL); @@ -1833,8 +1847,6 @@ err_local_vqs:  err_vqs:  	kfree(vs->old_inflight);  err_inflight: -	bitmap_free(vs->compl_bitmap); -err_compl_bitmap:  	kvfree(vs);  err_vs:  	return r; @@ -1854,7 +1866,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)  	kfree(vs->dev.vqs);  	kfree(vs->vqs);  	kfree(vs->old_inflight); -	bitmap_free(vs->compl_bitmap);  	kvfree(vs);  	return 0;  } @@ -1916,6 +1927,14 @@ vhost_scsi_ioctl(struct file *f,  		if (copy_from_user(&features, featurep, sizeof features))  			return -EFAULT;  		return vhost_scsi_set_features(vs, features); +	case VHOST_NEW_WORKER: +	case VHOST_FREE_WORKER: +	case VHOST_ATTACH_VRING_WORKER: +	case VHOST_GET_VRING_WORKER: +		mutex_lock(&vs->dev.mutex); +		r = vhost_worker_ioctl(&vs->dev, ioctl, argp); +		mutex_unlock(&vs->dev.mutex); +		return r;  	default:  		mutex_lock(&vs->dev.mutex);  		r = vhost_dev_ioctl(&vs->dev, ioctl, argp); @@ -1995,7 +2014,7 @@ vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,  		goto unlock;  	if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG)) -		vhost_scsi_send_evt(vs, tpg, lun, +		vhost_scsi_send_evt(vs, vq, tpg, lun,  				   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);  unlock:  	mutex_unlock(&vq->mutex); | 
