From c011bb669ddc22b0374d747d90467d1b2f80bc05 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Mon, 26 Jun 2023 18:22:52 -0500 Subject: vhost: dynamically allocate vhost_worker This patchset allows us to allocate multiple workers, so this has us move from the vhost_worker that's embedded in the vhost_dev to dynamically allocating it. Signed-off-by: Mike Christie Message-Id: <20230626232307.97930-3-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vhost.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/vhost/vhost.h') diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index fc900be504b3..cb872cc4157a 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -44,7 +44,7 @@ struct vhost_poll { }; void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn); -void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work); +bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work); bool vhost_has_work(struct vhost_dev *dev); void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, @@ -158,7 +158,7 @@ struct vhost_dev { struct vhost_virtqueue **vqs; int nvqs; struct eventfd_ctx *log_ctx; - struct vhost_worker worker; + struct vhost_worker *worker; struct vhost_iotlb *umem; struct vhost_iotlb *iotlb; spinlock_t iotlb_lock; -- cgit From 737bdb643c4f488defd6c226eb40de2c8f6e3f75 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Mon, 26 Jun 2023 18:22:53 -0500 Subject: vhost: add vhost_worker pointer to vhost_virtqueue This patchset allows userspace to map vqs to different workers. This patch adds a worker pointer to the vq so in later patches in this set we can queue/flush specific vqs and their workers. Signed-off-by: Mike Christie Message-Id: <20230626232307.97930-4-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vhost.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/vhost/vhost.h') diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index cb872cc4157a..206617edb2a9 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -74,6 +74,7 @@ struct vhost_vring_call { /* The virtqueue structure describes a queue attached to a device. */ struct vhost_virtqueue { struct vhost_dev *dev; + struct vhost_worker *worker; /* The actual ring of buffers. */ struct mutex mutex; -- cgit From 9784df151a601fa1d6d146f8a7f35a2d875e2976 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Mon, 26 Jun 2023 18:22:54 -0500 Subject: vhost, vhost_net: add helper to check if vq has work In the next patches each vq might have different workers so one could have work but others do not. For net, we only want to check specific vqs, so this adds a helper to check if a vq has work pending and converts vhost-net to use it. Signed-off-by: Mike Christie Acked-by: Jason Wang Message-Id: <20230626232307.97930-5-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vhost.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/vhost/vhost.h') diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 206617edb2a9..37c183b37c42 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -45,7 +45,6 @@ struct vhost_poll { void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn); bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work); -bool vhost_has_work(struct vhost_dev *dev); void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, __poll_t mask, struct vhost_dev *dev); @@ -199,6 +198,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *, struct vhost_log *log, unsigned int *log_num); void vhost_discard_vq_desc(struct vhost_virtqueue *, int n); +bool vhost_vq_has_work(struct vhost_virtqueue *vq); bool vhost_vq_is_setup(struct vhost_virtqueue *vq); int vhost_vq_init_access(struct vhost_virtqueue *); int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len); -- cgit From 0921dddcb5898030f0951816ed685a958acfbde2 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Mon, 26 Jun 2023 18:22:55 -0500 Subject: vhost: take worker or vq instead of dev for queueing This patch has the core work queueing function take a worker for when we support multiple workers. It also adds a helper that takes a vq during queueing so modules can control which vq/worker to queue work on. This temp leaves vhost_work_queue. It will be removed when the drivers are converted in the next patches. Signed-off-by: Mike Christie Message-Id: <20230626232307.97930-6-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vhost.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/vhost/vhost.h') diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 37c183b37c42..6a1ae8ae9c7d 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -198,6 +198,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *, struct vhost_log *log, unsigned int *log_num); void vhost_discard_vq_desc(struct vhost_virtqueue *, int n); +bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work); bool vhost_vq_has_work(struct vhost_virtqueue *vq); bool vhost_vq_is_setup(struct vhost_virtqueue *vq); int vhost_vq_init_access(struct vhost_virtqueue *); -- cgit From a6fc04739be7cd8a744658fd2734906a6a0eb400 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Mon, 26 Jun 2023 18:22:56 -0500 Subject: vhost: take worker or vq for flushing This patch has the core work flush function take a worker. When we support multiple workers we can then flush each worker during device removal, stoppage, etc. It also adds a helper to flush specific virtqueues, so vhost-scsi can flush IO vqs from it's ctl vq. Signed-off-by: Mike Christie Message-Id: <20230626232307.97930-7-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vhost.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/vhost/vhost.h') diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 6a1ae8ae9c7d..ac1f4924e548 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -198,6 +198,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *, struct vhost_log *log, unsigned int *log_num); void vhost_discard_vq_desc(struct vhost_virtqueue *, int n); +void vhost_vq_flush(struct vhost_virtqueue *vq); bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work); bool vhost_vq_has_work(struct vhost_virtqueue *vq); bool vhost_vq_is_setup(struct vhost_virtqueue *vq); -- cgit From 493b94bf5ae0f6d67bd3728e8c723800d99a13ad Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Mon, 26 Jun 2023 18:22:57 -0500 Subject: vhost: convert poll work to be vq based This has the drivers pass in their poll to vq mapping and then converts the core poll code to use the vq based helpers. In the next patches we will allow vqs to be handled by different workers, so to allow drivers to execute operations like queue, stop, flush, etc on specific polls/vqs we need to know the mappings. Signed-off-by: Mike Christie Message-Id: <20230626232307.97930-8-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vhost.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/vhost/vhost.h') diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index ac1f4924e548..f44eecd4fcf9 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -41,13 +41,15 @@ struct vhost_poll { struct vhost_work work; __poll_t mask; struct vhost_dev *dev; + struct vhost_virtqueue *vq; }; void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn); bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work); void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, - __poll_t mask, struct vhost_dev *dev); + __poll_t mask, struct vhost_dev *dev, + struct vhost_virtqueue *vq); int vhost_poll_start(struct vhost_poll *poll, struct file *file); void vhost_poll_stop(struct vhost_poll *poll); void vhost_poll_queue(struct vhost_poll *poll); -- cgit From 27eca189114235fde84980b8ee044f42c1d59519 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Mon, 26 Jun 2023 18:23:02 -0500 Subject: vhost: remove vhost_work_queue vhost_work_queue is no longer used. Each driver is using the poll or vq based queueing, so remove vhost_work_queue. Signed-off-by: Mike Christie Message-Id: <20230626232307.97930-13-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vhost.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/vhost/vhost.h') diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index f44eecd4fcf9..b850f534bc9a 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -44,15 +44,14 @@ struct vhost_poll { struct vhost_virtqueue *vq; }; -void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn); -bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work); - void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, __poll_t mask, struct vhost_dev *dev, struct vhost_virtqueue *vq); int vhost_poll_start(struct vhost_poll *poll, struct file *file); void vhost_poll_stop(struct vhost_poll *poll); void vhost_poll_queue(struct vhost_poll *poll); + +void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn); void vhost_dev_flush(struct vhost_dev *dev); struct vhost_log { -- cgit From 1cdaafa1b8b4ef6052869c86ba2b41c0cff05957 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Mon, 26 Jun 2023 18:23:04 -0500 Subject: vhost: replace single worker pointer with xarray The next patch allows userspace to create multiple workers per device, so this patch replaces the vhost_worker pointer with an xarray so we can store mupltiple workers and look them up. Signed-off-by: Mike Christie Message-Id: <20230626232307.97930-15-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vhost.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/vhost/vhost.h') diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index b850f534bc9a..31937e98c01b 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -30,6 +30,7 @@ struct vhost_worker { struct vhost_task *vtsk; struct llist_head work_list; u64 kcov_handle; + u32 id; }; /* Poll a file (eventfd or socket) */ @@ -159,7 +160,6 @@ struct vhost_dev { struct vhost_virtqueue **vqs; int nvqs; struct eventfd_ctx *log_ctx; - struct vhost_worker *worker; struct vhost_iotlb *umem; struct vhost_iotlb *iotlb; spinlock_t iotlb_lock; @@ -169,6 +169,7 @@ struct vhost_dev { int iov_limit; int weight; int byte_weight; + struct xarray worker_xa; bool use_worker; int (*msg_handler)(struct vhost_dev *dev, u32 asid, struct vhost_iotlb_msg *msg); -- cgit From c1ecd8e9500797748ae4f79657971955d452d69d Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Mon, 26 Jun 2023 18:23:05 -0500 Subject: vhost: allow userspace to create workers For vhost-scsi with 3 vqs or more and a workload that tries to use them in parallel like: fio --filename=/dev/sdb --direct=1 --rw=randrw --bs=4k \ --ioengine=libaio --iodepth=128 --numjobs=3 the single vhost worker thread will become a bottlneck and we are stuck at around 500K IOPs no matter how many jobs, virtqueues, and CPUs are used. To better utilize virtqueues and available CPUs, this patch allows userspace to create workers and bind them to vqs. You can have N workers per dev and also share N workers with M vqs on that dev. This patch adds the interface related code and the next patch will hook vhost-scsi into it. The patches do not try to hook net and vsock into the interface because: 1. multiple workers don't seem to help vsock. The problem is that with only 2 virtqueues we never fully use the existing worker when doing bidirectional tests. This seems to match vhost-scsi where we don't see the worker as a bottleneck until 3 virtqueues are used. 2. net already has a way to use multiple workers. Signed-off-by: Mike Christie Message-Id: <20230626232307.97930-16-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vhost.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/vhost/vhost.h') diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 31937e98c01b..4920ca63b8de 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -31,6 +31,7 @@ struct vhost_worker { struct llist_head work_list; u64 kcov_handle; u32 id; + int attachment_cnt; }; /* Poll a file (eventfd or socket) */ @@ -190,6 +191,8 @@ void vhost_dev_cleanup(struct vhost_dev *); void vhost_dev_stop(struct vhost_dev *); long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp); long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp); +long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl, + void __user *argp); bool vhost_vq_access_ok(struct vhost_virtqueue *vq); bool vhost_log_access_ok(struct vhost_dev *); void vhost_clear_msg(struct vhost_dev *dev); -- cgit From 228a27cf78afc63a18f744a56740d26570ecaec0 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Mon, 26 Jun 2023 18:23:07 -0500 Subject: vhost: Allow worker switching while work is queueing This patch drops the requirement that we can only switch workers if work has not been queued by using RCU for the vq based queueing paths and a mutex for the device wide flush. We can also use this to support SIGKILL properly in the future where we should exit almost immediately after getting that signal. With this patch, when get_signal returns true, we can set the vq->worker to NULL and do a synchronize_rcu to prevent new work from being queued to the vhost_task that has been killed. Signed-off-by: Mike Christie Message-Id: <20230626232307.97930-18-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vhost.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/vhost/vhost.h') diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 4920ca63b8de..f1e7d4d13219 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -28,6 +28,8 @@ struct vhost_work { struct vhost_worker { struct vhost_task *vtsk; + /* Used to serialize device wide flushing with worker swapping. */ + struct mutex mutex; struct llist_head work_list; u64 kcov_handle; u32 id; @@ -76,7 +78,7 @@ struct vhost_vring_call { /* The virtqueue structure describes a queue attached to a device. */ struct vhost_virtqueue { struct vhost_dev *dev; - struct vhost_worker *worker; + struct vhost_worker __rcu *worker; /* The actual ring of buffers. */ struct mutex mutex; -- cgit From 9e396a2f434f829fb3b98a24bb8db5429320589d Mon Sep 17 00:00:00 2001 From: Xianting Tian Date: Wed, 21 Jun 2023 17:38:35 +0800 Subject: vhost: Make parameter name match of vhost_get_vq_desc() The parameter name in the function declaration and definition should be the same. drivers/vhost/vhost.h, int vhost_get_vq_desc(..., unsigned int iov_count,...); drivers/vhost/vhost.c, int vhost_get_vq_desc(..., unsigned int iov_size,...) Signed-off-by: Xianting Tian Message-Id: <20230621093835.36878-1-xianting.tian@linux.alibaba.com> Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vhost.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/vhost/vhost.h') diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index f1e7d4d13219..f60d5f7bef94 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -200,7 +200,7 @@ bool vhost_log_access_ok(struct vhost_dev *); void vhost_clear_msg(struct vhost_dev *dev); int vhost_get_vq_desc(struct vhost_virtqueue *, - struct iovec iov[], unsigned int iov_count, + struct iovec iov[], unsigned int iov_size, unsigned int *out_num, unsigned int *in_num, struct vhost_log *log, unsigned int *log_num); void vhost_discard_vq_desc(struct vhost_virtqueue *, int n); -- cgit