summaryrefslogtreecommitdiff
path: root/drivers/vhost
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/Kconfig114
-rw-r--r--drivers/vhost/Makefile11
-rw-r--r--drivers/vhost/iotlb.c217
-rw-r--r--drivers/vhost/net.c1440
-rw-r--r--drivers/vhost/scsi.c2835
-rw-r--r--drivers/vhost/test.c166
-rw-r--r--drivers/vhost/test.h2
-rw-r--r--drivers/vhost/vdpa.c1681
-rw-r--r--drivers/vhost/vhost.c2945
-rw-r--r--drivers/vhost/vhost.h360
-rw-r--r--drivers/vhost/vringh.c807
-rw-r--r--drivers/vhost/vsock.c966
12 files changed, 9287 insertions, 2257 deletions
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index 017a1e8a8f6f..bc0f38574497 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -1,9 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VHOST_IOTLB
+ tristate
+ help
+ Generic IOTLB implementation for vhost and vringh.
+ This option is selected by any driver which needs to support
+ an IOMMU in software.
+
+config VHOST_RING
+ tristate
+ select VHOST_IOTLB
+ help
+ This option is selected by any driver which needs to access
+ the host side of a virtio ring.
+
+config VHOST_TASK
+ bool
+ default n
+
+config VHOST
+ tristate
+ select VHOST_IOTLB
+ select VHOST_TASK
+ help
+ This option is selected by any driver which needs to access
+ the core of vhost.
+
+menuconfig VHOST_MENU
+ bool "VHOST drivers"
+ default y
+
+if VHOST_MENU
+
config VHOST_NET
tristate "Host kernel accelerator for virtio net"
- depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP)
+ depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP)
select VHOST
- select VHOST_RING
- ---help---
+ help
This kernel module can be loaded in host kernel to accelerate
guest networking with virtio_net. Not to be confused with virtio_net
module itself which needs to be loaded in guest kernel.
@@ -13,22 +45,72 @@ config VHOST_NET
config VHOST_SCSI
tristate "VHOST_SCSI TCM fabric driver"
- depends on TARGET_CORE && EVENTFD && m
+ depends on TARGET_CORE && EVENTFD
select VHOST
- select VHOST_RING
+ select SG_POOL
default n
- ---help---
+ help
Say M here to enable the vhost_scsi TCM fabric module
for use with virtio-scsi guests
-config VHOST_RING
- tristate
- ---help---
- This option is selected by any driver which needs to access
- the host side of a virtio ring.
+config VHOST_VSOCK
+ tristate "vhost virtio-vsock driver"
+ depends on VSOCKETS && EVENTFD
+ select VHOST
+ select VIRTIO_VSOCKETS_COMMON
+ default n
+ help
+ This kernel module can be loaded in the host kernel to provide AF_VSOCK
+ sockets for communicating with guests. The guests must have the
+ virtio_transport.ko driver loaded to use the virtio-vsock device.
-config VHOST
- tristate
- ---help---
- This option is selected by any driver which needs to access
- the core of vhost.
+ To compile this driver as a module, choose M here: the module will be called
+ vhost_vsock.
+
+config VHOST_VDPA
+ tristate "Vhost driver for vDPA-based backend"
+ depends on EVENTFD
+ select VHOST
+ select IRQ_BYPASS_MANAGER
+ depends on VDPA
+ help
+ This kernel module can be loaded in host kernel to accelerate
+ guest virtio devices with the vDPA-based backends.
+
+ To compile this driver as a module, choose M here: the module
+ will be called vhost_vdpa.
+
+config VHOST_CROSS_ENDIAN_LEGACY
+ bool "Cross-endian support for vhost"
+ default n
+ help
+ This option allows vhost to support guests with a different byte
+ ordering from host while using legacy virtio.
+
+ Userspace programs can control the feature using the
+ VHOST_SET_VRING_ENDIAN and VHOST_GET_VRING_ENDIAN ioctls.
+
+ This is only useful on a few platforms (ppc64 and arm64). Since it
+ adds some overhead, it is disabled by default.
+
+ If unsure, say "N".
+
+config VHOST_ENABLE_FORK_OWNER_CONTROL
+ bool "Enable VHOST_ENABLE_FORK_OWNER_CONTROL"
+ default y
+ help
+ This option enables two IOCTLs: VHOST_SET_FORK_FROM_OWNER and
+ VHOST_GET_FORK_FROM_OWNER. These allow userspace applications
+ to modify the vhost worker mode for vhost devices.
+
+ Also expose module parameter 'fork_from_owner_default' to allow users
+ to configure the default mode for vhost workers.
+
+ By default, `VHOST_ENABLE_FORK_OWNER_CONTROL` is set to `y`,
+ users can change the worker thread mode as needed.
+ If this config is disabled (n),the related IOCTLs and parameters will
+ be unavailable.
+
+ If unsure, say "Y".
+
+endif
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
index e0441c34db1c..f3e1897cce85 100644
--- a/drivers/vhost/Makefile
+++ b/drivers/vhost/Makefile
@@ -1,8 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VHOST_NET) += vhost_net.o
vhost_net-y := net.o
obj-$(CONFIG_VHOST_SCSI) += vhost_scsi.o
vhost_scsi-y := scsi.o
+obj-$(CONFIG_VHOST_VSOCK) += vhost_vsock.o
+vhost_vsock-y := vsock.o
+
obj-$(CONFIG_VHOST_RING) += vringh.o
+
+obj-$(CONFIG_VHOST_VDPA) += vhost_vdpa.o
+vhost_vdpa-y := vdpa.o
+
obj-$(CONFIG_VHOST) += vhost.o
+
+obj-$(CONFIG_VHOST_IOTLB) += vhost_iotlb.o
+vhost_iotlb-y := iotlb.o
diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c
new file mode 100644
index 000000000000..ea61330a3431
--- /dev/null
+++ b/drivers/vhost/iotlb.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Red Hat, Inc.
+ * Author: Jason Wang <jasowang@redhat.com>
+ *
+ * IOTLB implementation for vhost.
+ */
+#include <linux/slab.h>
+#include <linux/vhost_iotlb.h>
+#include <linux/module.h>
+
+#define MOD_VERSION "0.1"
+#define MOD_DESC "VHOST IOTLB"
+#define MOD_AUTHOR "Jason Wang <jasowang@redhat.com>"
+#define MOD_LICENSE "GPL v2"
+
+#define START(map) ((map)->start)
+#define LAST(map) ((map)->last)
+
+INTERVAL_TREE_DEFINE(struct vhost_iotlb_map,
+ rb, __u64, __subtree_last,
+ START, LAST, static inline, vhost_iotlb_itree);
+
+/**
+ * vhost_iotlb_map_free - remove a map node and free it
+ * @iotlb: the IOTLB
+ * @map: the map that want to be remove and freed
+ */
+void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
+ struct vhost_iotlb_map *map)
+{
+ vhost_iotlb_itree_remove(map, &iotlb->root);
+ list_del(&map->link);
+ kfree(map);
+ iotlb->nmaps--;
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_map_free);
+
+/**
+ * vhost_iotlb_add_range_ctx - add a new range to vhost IOTLB
+ * @iotlb: the IOTLB
+ * @start: start of the IOVA range
+ * @last: last of IOVA range
+ * @addr: the address that is mapped to @start
+ * @perm: access permission of this range
+ * @opaque: the opaque pointer for the new mapping
+ *
+ * Returns an error last is smaller than start or memory allocation
+ * fails
+ */
+int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb,
+ u64 start, u64 last,
+ u64 addr, unsigned int perm,
+ void *opaque)
+{
+ struct vhost_iotlb_map *map;
+
+ if (last < start)
+ return -EFAULT;
+
+ /* If the range being mapped is [0, ULONG_MAX], split it into two entries
+ * otherwise its size would overflow u64.
+ */
+ if (start == 0 && last == ULONG_MAX) {
+ u64 mid = last / 2;
+ int err = vhost_iotlb_add_range_ctx(iotlb, start, mid, addr,
+ perm, opaque);
+
+ if (err)
+ return err;
+
+ addr += mid + 1;
+ start = mid + 1;
+ }
+
+ if (iotlb->limit &&
+ iotlb->nmaps == iotlb->limit &&
+ iotlb->flags & VHOST_IOTLB_FLAG_RETIRE) {
+ map = list_first_entry(&iotlb->list, typeof(*map), link);
+ vhost_iotlb_map_free(iotlb, map);
+ }
+
+ map = kmalloc(sizeof(*map), GFP_ATOMIC);
+ if (!map)
+ return -ENOMEM;
+
+ map->start = start;
+ map->size = last - start + 1;
+ map->last = last;
+ map->addr = addr;
+ map->perm = perm;
+ map->opaque = opaque;
+
+ iotlb->nmaps++;
+ vhost_iotlb_itree_insert(map, &iotlb->root);
+
+ INIT_LIST_HEAD(&map->link);
+ list_add_tail(&map->link, &iotlb->list);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_add_range_ctx);
+
+int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
+ u64 start, u64 last,
+ u64 addr, unsigned int perm)
+{
+ return vhost_iotlb_add_range_ctx(iotlb, start, last,
+ addr, perm, NULL);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_add_range);
+
+/**
+ * vhost_iotlb_del_range - delete overlapped ranges from vhost IOTLB
+ * @iotlb: the IOTLB
+ * @start: start of the IOVA range
+ * @last: last of IOVA range
+ */
+void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last)
+{
+ struct vhost_iotlb_map *map;
+
+ while ((map = vhost_iotlb_itree_iter_first(&iotlb->root,
+ start, last)))
+ vhost_iotlb_map_free(iotlb, map);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_del_range);
+
+/**
+ * vhost_iotlb_init - initialize a vhost IOTLB
+ * @iotlb: the IOTLB that needs to be initialized
+ * @limit: maximum number of IOTLB entries
+ * @flags: VHOST_IOTLB_FLAG_XXX
+ */
+void vhost_iotlb_init(struct vhost_iotlb *iotlb, unsigned int limit,
+ unsigned int flags)
+{
+ iotlb->root = RB_ROOT_CACHED;
+ iotlb->limit = limit;
+ iotlb->nmaps = 0;
+ iotlb->flags = flags;
+ INIT_LIST_HEAD(&iotlb->list);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_init);
+
+/**
+ * vhost_iotlb_alloc - add a new vhost IOTLB
+ * @limit: maximum number of IOTLB entries
+ * @flags: VHOST_IOTLB_FLAG_XXX
+ *
+ * Returns an error is memory allocation fails
+ */
+struct vhost_iotlb *vhost_iotlb_alloc(unsigned int limit, unsigned int flags)
+{
+ struct vhost_iotlb *iotlb = kzalloc(sizeof(*iotlb), GFP_KERNEL);
+
+ if (!iotlb)
+ return NULL;
+
+ vhost_iotlb_init(iotlb, limit, flags);
+
+ return iotlb;
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_alloc);
+
+/**
+ * vhost_iotlb_reset - reset vhost IOTLB (free all IOTLB entries)
+ * @iotlb: the IOTLB to be reset
+ */
+void vhost_iotlb_reset(struct vhost_iotlb *iotlb)
+{
+ vhost_iotlb_del_range(iotlb, 0ULL, 0ULL - 1);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_reset);
+
+/**
+ * vhost_iotlb_free - reset and free vhost IOTLB
+ * @iotlb: the IOTLB to be freed
+ */
+void vhost_iotlb_free(struct vhost_iotlb *iotlb)
+{
+ if (iotlb) {
+ vhost_iotlb_reset(iotlb);
+ kfree(iotlb);
+ }
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_free);
+
+/**
+ * vhost_iotlb_itree_first - return the first overlapped range
+ * @iotlb: the IOTLB
+ * @start: start of IOVA range
+ * @last: last byte in IOVA range
+ */
+struct vhost_iotlb_map *
+vhost_iotlb_itree_first(struct vhost_iotlb *iotlb, u64 start, u64 last)
+{
+ return vhost_iotlb_itree_iter_first(&iotlb->root, start, last);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_itree_first);
+
+/**
+ * vhost_iotlb_itree_next - return the next overlapped range
+ * @map: the starting map node
+ * @start: start of IOVA range
+ * @last: last byte IOVA range
+ */
+struct vhost_iotlb_map *
+vhost_iotlb_itree_next(struct vhost_iotlb_map *map, u64 start, u64 last)
+{
+ return vhost_iotlb_itree_iter_next(map, start, last);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_itree_next);
+
+MODULE_VERSION(MOD_VERSION);
+MODULE_DESCRIPTION(MOD_DESC);
+MODULE_AUTHOR(MOD_AUTHOR);
+MODULE_LICENSE(MOD_LICENSE);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 027be91db139..7f886d3dba7d 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2009 Red Hat, Inc.
* Author: Michael S. Tsirkin <mst@redhat.com>
*
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
* virtio-net server in host kernel.
*/
@@ -15,22 +14,28 @@
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
-#include <linux/rcupdate.h>
#include <linux/file.h>
#include <linux/slab.h>
+#include <linux/sched/clock.h>
+#include <linux/sched/signal.h>
+#include <linux/vmalloc.h>
#include <linux/net.h>
#include <linux/if_packet.h>
#include <linux/if_arp.h>
#include <linux/if_tun.h>
#include <linux/if_macvlan.h>
+#include <linux/if_tap.h>
#include <linux/if_vlan.h>
+#include <linux/skb_array.h>
+#include <linux/skbuff.h>
#include <net/sock.h>
+#include <net/xdp.h>
#include "vhost.h"
-static int experimental_zcopytx = 1;
+static int experimental_zcopytx = 0;
module_param(experimental_zcopytx, int, 0444);
MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
" 1 -Enable; 0 - Disable");
@@ -39,6 +44,12 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
* Using this limit prevents one virtqueue from starving others. */
#define VHOST_NET_WEIGHT 0x80000
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with small
+ * pkts.
+ */
+#define VHOST_NET_PKT_WEIGHT 256
+
/* MAX number of TX used buffers for outstanding zerocopy */
#define VHOST_MAX_PEND 128
#define VHOST_GOODCOPY_LEN 256
@@ -48,20 +59,29 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
* status internally; used for zerocopy tx only.
*/
/* Lower device DMA failed */
-#define VHOST_DMA_FAILED_LEN 3
+#define VHOST_DMA_FAILED_LEN ((__force __virtio32)3)
/* Lower device DMA done */
-#define VHOST_DMA_DONE_LEN 2
+#define VHOST_DMA_DONE_LEN ((__force __virtio32)2)
/* Lower device DMA in progress */
-#define VHOST_DMA_IN_PROGRESS 1
+#define VHOST_DMA_IN_PROGRESS ((__force __virtio32)1)
/* Buffer unused */
-#define VHOST_DMA_CLEAR_LEN 0
-
-#define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN)
+#define VHOST_DMA_CLEAR_LEN ((__force __virtio32)0)
+
+#define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
+
+static const int vhost_net_bits[] = {
+ VHOST_FEATURES,
+ VHOST_NET_F_VIRTIO_NET_HDR,
+ VIRTIO_NET_F_MRG_RXBUF,
+ VIRTIO_F_ACCESS_PLATFORM,
+ VIRTIO_F_RING_RESET,
+ VIRTIO_F_IN_ORDER,
+ VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO,
+ VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO
+};
enum {
- VHOST_NET_FEATURES = VHOST_FEATURES |
- (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
- (1ULL << VIRTIO_NET_F_MRG_RXBUF),
+ VHOST_NET_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
};
enum {
@@ -71,29 +91,46 @@ enum {
};
struct vhost_net_ubuf_ref {
- struct kref kref;
+ /* refcount follows semantics similar to kref:
+ * 0: object is released
+ * 1: no outstanding ubufs
+ * >1: outstanding ubufs
+ */
+ atomic_t refcount;
wait_queue_head_t wait;
struct vhost_virtqueue *vq;
+ struct rcu_head rcu;
+};
+
+#define VHOST_NET_BATCH 64
+struct vhost_net_buf {
+ void **queue;
+ int tail;
+ int head;
};
struct vhost_net_virtqueue {
struct vhost_virtqueue vq;
- /* hdr is used to store the virtio header.
- * Since each iovec has >= 1 byte length, we never need more than
- * header length entries to store the header. */
- struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
size_t vhost_hlen;
size_t sock_hlen;
/* vhost zerocopy support fields below: */
/* last used idx for outstanding DMA zerocopy buffers */
int upend_idx;
- /* first used idx for DMA done zerocopy buffers */
+ /* For TX, first used idx for DMA done zerocopy buffers
+ * For RX, number of batched heads
+ */
int done_idx;
+ /* Number of XDP frames batched */
+ int batched_xdp;
/* an array of userspace buffers info */
- struct ubuf_info *ubuf_info;
+ struct ubuf_info_msgzc *ubuf_info;
/* Reference counting for outstanding ubufs.
* Protected by vq mutex. Writers must also take device mutex. */
struct vhost_net_ubuf_ref *ubufs;
+ struct ptr_ring *rx_ring;
+ struct vhost_net_buf rxq;
+ /* Batched XDP buffs */
+ struct xdp_buff *xdp;
};
struct vhost_net {
@@ -108,21 +145,92 @@ struct vhost_net {
unsigned tx_zcopy_err;
/* Flush in progress. Protected by tx vq lock. */
bool tx_flush;
+ /* Private page frag cache */
+ struct page_frag_cache pf_cache;
};
static unsigned vhost_net_zcopy_mask __read_mostly;
-static void vhost_net_enable_zcopy(int vq)
+static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq)
{
- vhost_net_zcopy_mask |= 0x1 << vq;
+ if (rxq->tail != rxq->head)
+ return rxq->queue[rxq->head];
+ else
+ return NULL;
}
-static void vhost_net_zerocopy_done_signal(struct kref *kref)
+static int vhost_net_buf_get_size(struct vhost_net_buf *rxq)
{
- struct vhost_net_ubuf_ref *ubufs;
+ return rxq->tail - rxq->head;
+}
- ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
- wake_up(&ubufs->wait);
+static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq)
+{
+ return rxq->tail == rxq->head;
+}
+
+static void *vhost_net_buf_consume(struct vhost_net_buf *rxq)
+{
+ void *ret = vhost_net_buf_get_ptr(rxq);
+ ++rxq->head;
+ return ret;
+}
+
+static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
+{
+ struct vhost_net_buf *rxq = &nvq->rxq;
+
+ rxq->head = 0;
+ rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
+ VHOST_NET_BATCH);
+ return rxq->tail;
+}
+
+static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
+{
+ struct vhost_net_buf *rxq = &nvq->rxq;
+
+ if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
+ ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
+ vhost_net_buf_get_size(rxq),
+ tun_ptr_free);
+ rxq->head = rxq->tail = 0;
+ }
+}
+
+static int vhost_net_buf_peek_len(void *ptr)
+{
+ if (tun_is_xdp_frame(ptr)) {
+ struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
+
+ return xdpf->len;
+ }
+
+ return __skb_array_len_with_tag(ptr);
+}
+
+static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
+{
+ struct vhost_net_buf *rxq = &nvq->rxq;
+
+ if (!vhost_net_buf_is_empty(rxq))
+ goto out;
+
+ if (!vhost_net_buf_produce(nvq))
+ return 0;
+
+out:
+ return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
+}
+
+static void vhost_net_buf_init(struct vhost_net_buf *rxq)
+{
+ rxq->head = rxq->tail = 0;
+}
+
+static void vhost_net_enable_zcopy(int vq)
+{
+ vhost_net_zcopy_mask |= 0x1 << vq;
}
static struct vhost_net_ubuf_ref *
@@ -135,27 +243,34 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
if (!ubufs)
return ERR_PTR(-ENOMEM);
- kref_init(&ubufs->kref);
+ atomic_set(&ubufs->refcount, 1);
init_waitqueue_head(&ubufs->wait);
ubufs->vq = vq;
return ubufs;
}
-static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
+static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
{
- kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
+ int r;
+
+ rcu_read_lock();
+ r = atomic_sub_return(1, &ubufs->refcount);
+ if (unlikely(!r))
+ wake_up(&ubufs->wait);
+ rcu_read_unlock();
+ return r;
}
static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
{
- kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
- wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
+ vhost_net_ubuf_put(ubufs);
+ wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
}
static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
{
vhost_net_ubuf_put_and_wait(ubufs);
- kfree(ubufs);
+ kfree_rcu(ubufs, rcu);
}
static void vhost_net_clear_ubuf_info(struct vhost_net *n)
@@ -177,8 +292,10 @@ static int vhost_net_set_ubuf_info(struct vhost_net *n)
zcopy = vhost_net_zcopy_mask & (0x1 << i);
if (!zcopy)
continue;
- n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) *
- UIO_MAXIOV, GFP_KERNEL);
+ n->vqs[i].ubuf_info =
+ kmalloc_array(UIO_MAXIOV,
+ sizeof(*n->vqs[i].ubuf_info),
+ GFP_KERNEL);
if (!n->vqs[i].ubuf_info)
goto err;
}
@@ -201,6 +318,7 @@ static void vhost_net_vq_reset(struct vhost_net *n)
n->vqs[i].ubufs = NULL;
n->vqs[i].vhost_hlen = 0;
n->vqs[i].sock_hlen = 0;
+ vhost_net_buf_init(&n->vqs[i].rxq);
}
}
@@ -234,42 +352,9 @@ static bool vhost_sock_zcopy(struct socket *sock)
sock_flag(sock->sk, SOCK_ZEROCOPY);
}
-/* Pop first len bytes from iovec. Return number of segments used. */
-static int move_iovec_hdr(struct iovec *from, struct iovec *to,
- size_t len, int iov_count)
-{
- int seg = 0;
- size_t size;
-
- while (len && seg < iov_count) {
- size = min(from->iov_len, len);
- to->iov_base = from->iov_base;
- to->iov_len = size;
- from->iov_len -= size;
- from->iov_base += size;
- len -= size;
- ++from;
- ++to;
- ++seg;
- }
- return seg;
-}
-/* Copy iovec entries for len bytes from iovec. */
-static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
- size_t len, int iovcount)
+static bool vhost_sock_xdp(struct socket *sock)
{
- int seg = 0;
- size_t size;
-
- while (len && seg < iovcount) {
- size = min(from->iov_len, len);
- to->iov_base = from->iov_base;
- to->iov_len = size;
- len -= size;
- ++from;
- ++to;
- ++seg;
- }
+ return sock_flag(sock->sk, SOCK_XDP);
}
/* In case of DMA done not in order in lower device driver for some reason.
@@ -277,12 +362,12 @@ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
* of used idx. Once lower device DMA done contiguously, we will signal KVM
* guest used idx.
*/
-static int vhost_zerocopy_signal_used(struct vhost_net *net,
- struct vhost_virtqueue *vq)
+static void vhost_zerocopy_signal_used(struct vhost_net *net,
+ struct vhost_virtqueue *vq)
{
struct vhost_net_virtqueue *nvq =
container_of(vq, struct vhost_net_virtqueue, vq);
- int i;
+ int i, add;
int j = 0;
for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
@@ -290,192 +375,635 @@ static int vhost_zerocopy_signal_used(struct vhost_net *net,
vhost_net_tx_err(net);
if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
- vhost_add_used_and_signal(vq->dev, vq,
- vq->heads[i].id, 0);
++j;
} else
break;
}
- if (j)
- nvq->done_idx = i;
- return j;
+ while (j) {
+ add = min(UIO_MAXIOV - nvq->done_idx, j);
+ vhost_add_used_and_signal_n(vq->dev, vq,
+ &vq->heads[nvq->done_idx],
+ NULL, add);
+ nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
+ j -= add;
+ }
}
-static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
+static void vhost_zerocopy_complete(struct sk_buff *skb,
+ struct ubuf_info *ubuf_base, bool success)
{
+ struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
struct vhost_virtqueue *vq = ubufs->vq;
- int cnt = atomic_read(&ubufs->kref.refcount);
+ int cnt;
+
+ rcu_read_lock_bh();
+
+ /* set len to mark this desc buffers done DMA */
+ vq->heads[ubuf->desc].len = success ?
+ VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
+ cnt = vhost_net_ubuf_put(ubufs);
/*
* Trigger polling thread if guest stopped submitting new buffers:
- * in this case, the refcount after decrement will eventually reach 1
- * so here it is 2.
+ * in this case, the refcount after decrement will eventually reach 1.
* We also trigger polling periodically after each 16 packets
* (the value 16 here is more or less arbitrary, it's tuned to trigger
* less than 10% of times).
*/
- if (cnt <= 2 || !(cnt % 16))
+ if (cnt <= 1 || !(cnt % 16))
vhost_poll_queue(&vq->poll);
- /* set len to mark this desc buffers done DMA */
- vq->heads[ubuf->desc].len = success ?
- VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
- vhost_net_ubuf_put(ubufs);
+
+ rcu_read_unlock_bh();
}
-/* Expects to be always run from workqueue - which acts as
- * read-size critical section for our kind of RCU. */
-static void handle_tx(struct vhost_net *net)
+static const struct ubuf_info_ops vhost_ubuf_ops = {
+ .complete = vhost_zerocopy_complete,
+};
+
+static inline unsigned long busy_clock(void)
+{
+ return local_clock() >> 10;
+}
+
+static bool vhost_can_busy_poll(unsigned long endtime)
+{
+ return likely(!need_resched() && !time_after(busy_clock(), endtime) &&
+ !signal_pending(current));
+}
+
+static void vhost_net_disable_vq(struct vhost_net *n,
+ struct vhost_virtqueue *vq)
+{
+ struct vhost_net_virtqueue *nvq =
+ container_of(vq, struct vhost_net_virtqueue, vq);
+ struct vhost_poll *poll = n->poll + (nvq - n->vqs);
+ if (!vhost_vq_get_backend(vq))
+ return;
+ vhost_poll_stop(poll);
+}
+
+static int vhost_net_enable_vq(struct vhost_net *n,
+ struct vhost_virtqueue *vq)
+{
+ struct vhost_net_virtqueue *nvq =
+ container_of(vq, struct vhost_net_virtqueue, vq);
+ struct vhost_poll *poll = n->poll + (nvq - n->vqs);
+ struct socket *sock;
+
+ sock = vhost_vq_get_backend(vq);
+ if (!sock)
+ return 0;
+
+ return vhost_poll_start(poll, sock->file);
+}
+
+static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq,
+ unsigned int count)
+{
+ struct vhost_virtqueue *vq = &nvq->vq;
+ struct vhost_dev *dev = vq->dev;
+
+ if (!nvq->done_idx)
+ return;
+
+ vhost_add_used_and_signal_n(dev, vq, vq->heads,
+ vq->nheads, count);
+ nvq->done_idx = 0;
+}
+
+static void vhost_tx_batch(struct vhost_net *net,
+ struct vhost_net_virtqueue *nvq,
+ struct socket *sock,
+ struct msghdr *msghdr)
+{
+ struct vhost_virtqueue *vq = &nvq->vq;
+ bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
+ struct tun_msg_ctl ctl = {
+ .type = TUN_MSG_PTR,
+ .num = nvq->batched_xdp,
+ .ptr = nvq->xdp,
+ };
+ int i, err;
+
+ if (in_order) {
+ vq->heads[0].len = 0;
+ vq->nheads[0] = nvq->done_idx;
+ }
+
+ if (nvq->batched_xdp == 0)
+ goto signal_used;
+
+ msghdr->msg_control = &ctl;
+ msghdr->msg_controllen = sizeof(ctl);
+ err = sock->ops->sendmsg(sock, msghdr, 0);
+ if (unlikely(err < 0)) {
+ vq_err(&nvq->vq, "Fail to batch sending packets\n");
+
+ /* free pages owned by XDP; since this is an unlikely error path,
+ * keep it simple and avoid more complex bulk update for the
+ * used pages
+ */
+ for (i = 0; i < nvq->batched_xdp; ++i)
+ put_page(virt_to_head_page(nvq->xdp[i].data));
+ nvq->batched_xdp = 0;
+ nvq->done_idx = 0;
+ return;
+ }
+
+signal_used:
+ vhost_net_signal_used(nvq, in_order ? 1 : nvq->done_idx);
+ nvq->batched_xdp = 0;
+}
+
+static int sock_has_rx_data(struct socket *sock)
+{
+ if (unlikely(!sock))
+ return 0;
+
+ if (sock->ops->peek_len)
+ return sock->ops->peek_len(sock);
+
+ return skb_queue_empty(&sock->sk->sk_receive_queue);
+}
+
+static void vhost_net_busy_poll_try_queue(struct vhost_net *net,
+ struct vhost_virtqueue *vq)
+{
+ if (!vhost_vq_avail_empty(&net->dev, vq)) {
+ vhost_poll_queue(&vq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+ vhost_disable_notify(&net->dev, vq);
+ vhost_poll_queue(&vq->poll);
+ }
+}
+
+static void vhost_net_busy_poll(struct vhost_net *net,
+ struct vhost_virtqueue *rvq,
+ struct vhost_virtqueue *tvq,
+ bool *busyloop_intr,
+ bool poll_rx)
+{
+ unsigned long busyloop_timeout;
+ unsigned long endtime;
+ struct socket *sock;
+ struct vhost_virtqueue *vq = poll_rx ? tvq : rvq;
+
+ /* Try to hold the vq mutex of the paired virtqueue. We can't
+ * use mutex_lock() here since we could not guarantee a
+ * consistenet lock ordering.
+ */
+ if (!mutex_trylock(&vq->mutex))
+ return;
+
+ vhost_disable_notify(&net->dev, vq);
+ sock = vhost_vq_get_backend(rvq);
+
+ busyloop_timeout = poll_rx ? rvq->busyloop_timeout:
+ tvq->busyloop_timeout;
+
+ preempt_disable();
+ endtime = busy_clock() + busyloop_timeout;
+
+ while (vhost_can_busy_poll(endtime)) {
+ if (vhost_vq_has_work(vq)) {
+ *busyloop_intr = true;
+ break;
+ }
+
+ if ((sock_has_rx_data(sock) &&
+ !vhost_vq_avail_empty(&net->dev, rvq)) ||
+ !vhost_vq_avail_empty(&net->dev, tvq))
+ break;
+
+ cpu_relax();
+ }
+
+ preempt_enable();
+
+ if (poll_rx || sock_has_rx_data(sock))
+ vhost_net_busy_poll_try_queue(net, vq);
+ else if (!poll_rx) /* On tx here, sock has no rx data. */
+ vhost_enable_notify(&net->dev, rvq);
+
+ mutex_unlock(&vq->mutex);
+}
+
+static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
+ struct vhost_net_virtqueue *tnvq,
+ unsigned int *out_num, unsigned int *in_num,
+ struct msghdr *msghdr, bool *busyloop_intr,
+ unsigned int *ndesc)
+{
+ struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
+ struct vhost_virtqueue *rvq = &rnvq->vq;
+ struct vhost_virtqueue *tvq = &tnvq->vq;
+
+ int r = vhost_get_vq_desc_n(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
+ out_num, in_num, NULL, NULL, ndesc);
+
+ if (r == tvq->num && tvq->busyloop_timeout) {
+ /* Flush batched packets first */
+ if (!vhost_sock_zcopy(vhost_vq_get_backend(tvq)))
+ vhost_tx_batch(net, tnvq,
+ vhost_vq_get_backend(tvq),
+ msghdr);
+
+ vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
+
+ r = vhost_get_vq_desc_n(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
+ out_num, in_num, NULL, NULL, ndesc);
+ }
+
+ return r;
+}
+
+static bool vhost_exceeds_maxpend(struct vhost_net *net)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
- unsigned out, in, s;
+
+ return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV >
+ min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2);
+}
+
+static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
+ size_t hdr_size, int out)
+{
+ /* Skip header. TODO: support TSO. */
+ size_t len = iov_length(vq->iov, out);
+
+ iov_iter_init(iter, ITER_SOURCE, vq->iov, out, len);
+ iov_iter_advance(iter, hdr_size);
+
+ return iov_iter_count(iter);
+}
+
+static int get_tx_bufs(struct vhost_net *net,
+ struct vhost_net_virtqueue *nvq,
+ struct msghdr *msg,
+ unsigned int *out, unsigned int *in,
+ size_t *len, bool *busyloop_intr,
+ unsigned int *ndesc)
+{
+ struct vhost_virtqueue *vq = &nvq->vq;
+ int ret;
+
+ ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg,
+ busyloop_intr, ndesc);
+
+ if (ret < 0 || ret == vq->num)
+ return ret;
+
+ if (*in) {
+ vq_err(vq, "Unexpected descriptor format for TX: out %d, int %d\n",
+ *out, *in);
+ return -EFAULT;
+ }
+
+ /* Sanity check */
+ *len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out);
+ if (*len == 0) {
+ vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n",
+ *len, nvq->vhost_hlen);
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
+{
+ return total_len < VHOST_NET_WEIGHT &&
+ !vhost_vq_avail_empty(vq->dev, vq);
+}
+
+#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
+
+static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
+ struct iov_iter *from)
+{
+ struct vhost_virtqueue *vq = &nvq->vq;
+ struct vhost_net *net = container_of(vq->dev, struct vhost_net,
+ dev);
+ struct socket *sock = vhost_vq_get_backend(vq);
+ struct virtio_net_hdr *gso;
+ struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
+ size_t len = iov_iter_count(from);
+ int headroom = vhost_sock_xdp(sock) ? XDP_PACKET_HEADROOM : 0;
+ int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ int pad = SKB_DATA_ALIGN(VHOST_NET_RX_PAD + headroom + nvq->sock_hlen);
+ int sock_hlen = nvq->sock_hlen;
+ void *buf;
+ int copied;
+ int ret;
+
+ if (unlikely(len < nvq->sock_hlen))
+ return -EFAULT;
+
+ if (SKB_DATA_ALIGN(len + pad) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
+ return -ENOSPC;
+
+ buflen += SKB_DATA_ALIGN(len + pad);
+ buf = page_frag_alloc_align(&net->pf_cache, buflen, GFP_KERNEL,
+ SMP_CACHE_BYTES);
+ if (unlikely(!buf))
+ return -ENOMEM;
+
+ copied = copy_from_iter(buf + pad - sock_hlen, len, from);
+ if (copied != len) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ gso = buf + pad - sock_hlen;
+
+ if (!sock_hlen)
+ memset(buf, 0, pad);
+
+ if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
+ vhost16_to_cpu(vq, gso->csum_start) +
+ vhost16_to_cpu(vq, gso->csum_offset) + 2 >
+ vhost16_to_cpu(vq, gso->hdr_len)) {
+ gso->hdr_len = cpu_to_vhost16(vq,
+ vhost16_to_cpu(vq, gso->csum_start) +
+ vhost16_to_cpu(vq, gso->csum_offset) + 2);
+
+ if (vhost16_to_cpu(vq, gso->hdr_len) > len) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ /* pad contains sock_hlen */
+ memcpy(buf, buf + pad - sock_hlen, sock_hlen);
+
+ xdp_init_buff(xdp, buflen, NULL);
+ xdp_prepare_buff(xdp, buf, pad, len - sock_hlen, true);
+
+ ++nvq->batched_xdp;
+
+ return 0;
+
+err:
+ page_frag_free(buf);
+ return ret;
+}
+
+static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
+{
+ struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *vq = &nvq->vq;
+ unsigned out, in;
int head;
struct msghdr msg = {
.msg_name = NULL,
.msg_namelen = 0,
.msg_control = NULL,
.msg_controllen = 0,
- .msg_iov = vq->iov,
.msg_flags = MSG_DONTWAIT,
};
size_t len, total_len = 0;
int err;
- size_t hdr_size;
- struct socket *sock;
- struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
- bool zcopy, zcopy_used;
-
- /* TODO: check that we are running from vhost_worker? */
- sock = rcu_dereference_check(vq->private_data, 1);
- if (!sock)
- return;
+ int sent_pkts = 0;
+ bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
+ bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
+ unsigned int ndesc = 0;
- mutex_lock(&vq->mutex);
- vhost_disable_notify(&net->dev, vq);
+ do {
+ bool busyloop_intr = false;
- hdr_size = nvq->vhost_hlen;
- zcopy = nvq->ubufs;
-
- for (;;) {
- /* Release DMAs done buffers first */
- if (zcopy)
- vhost_zerocopy_signal_used(net, vq);
+ if (nvq->done_idx == VHOST_NET_BATCH)
+ vhost_tx_batch(net, nvq, sock, &msg);
- head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
- ARRAY_SIZE(vq->iov),
- &out, &in,
- NULL, NULL);
+ head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
+ &busyloop_intr, &ndesc);
/* On error, stop handling until the next kick. */
if (unlikely(head < 0))
break;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (head == vq->num) {
- int num_pends;
-
- /* If more outstanding DMAs, queue the work.
- * Handle upend_idx wrap around
+ /* Flush batched packets to handle pending RX
+ * work (if busyloop_intr is set) and to avoid
+ * unnecessary virtqueue kicks.
*/
- num_pends = likely(nvq->upend_idx >= nvq->done_idx) ?
- (nvq->upend_idx - nvq->done_idx) :
- (nvq->upend_idx + UIO_MAXIOV -
- nvq->done_idx);
- if (unlikely(num_pends > VHOST_MAX_PEND))
- break;
- if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+ vhost_tx_batch(net, nvq, sock, &msg);
+ if (unlikely(busyloop_intr)) {
+ vhost_poll_queue(&vq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev,
+ vq))) {
vhost_disable_notify(&net->dev, vq);
continue;
}
break;
}
- if (in) {
- vq_err(vq, "Unexpected descriptor format for TX: "
- "out %d, int %d\n", out, in);
- break;
+
+ total_len += len;
+
+ /* For simplicity, TX batching is only enabled if
+ * sndbuf is unlimited.
+ */
+ if (sock_can_batch) {
+ err = vhost_net_build_xdp(nvq, &msg.msg_iter);
+ if (!err) {
+ goto done;
+ } else if (unlikely(err != -ENOSPC)) {
+ vhost_tx_batch(net, nvq, sock, &msg);
+ vhost_discard_vq_desc(vq, 1, ndesc);
+ vhost_net_enable_vq(net, vq);
+ break;
+ }
+
+ if (nvq->batched_xdp) {
+ /* We can't build XDP buff, go for single
+ * packet path but let's flush batched
+ * packets.
+ */
+ vhost_tx_batch(net, nvq, sock, &msg);
+ }
+ msg.msg_control = NULL;
+ } else {
+ if (tx_can_batch(vq, total_len))
+ msg.msg_flags |= MSG_MORE;
+ else
+ msg.msg_flags &= ~MSG_MORE;
+ }
+
+ err = sock->ops->sendmsg(sock, &msg, len);
+ if (unlikely(err < 0)) {
+ if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
+ vhost_discard_vq_desc(vq, 1, ndesc);
+ vhost_net_enable_vq(net, vq);
+ break;
+ }
+ pr_debug("Fail to send packet: err %d", err);
+ } else if (unlikely(err != len))
+ pr_debug("Truncated TX packet: len %d != %zd\n",
+ err, len);
+done:
+ if (in_order) {
+ vq->heads[0].id = cpu_to_vhost32(vq, head);
+ } else {
+ vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
+ vq->heads[nvq->done_idx].len = 0;
}
- /* Skip header. TODO: support TSO. */
- s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out);
- msg.msg_iovlen = out;
- len = iov_length(vq->iov, out);
- /* Sanity check */
- if (!len) {
- vq_err(vq, "Unexpected header len for TX: "
- "%zd expected %zd\n",
- iov_length(nvq->hdr, s), hdr_size);
+ ++nvq->done_idx;
+ } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
+
+ vhost_tx_batch(net, nvq, sock, &msg);
+}
+
+static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
+{
+ struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *vq = &nvq->vq;
+ unsigned out, in;
+ int head;
+ struct msghdr msg = {
+ .msg_name = NULL,
+ .msg_namelen = 0,
+ .msg_control = NULL,
+ .msg_controllen = 0,
+ .msg_flags = MSG_DONTWAIT,
+ };
+ struct tun_msg_ctl ctl;
+ size_t len, total_len = 0;
+ int err;
+ struct vhost_net_ubuf_ref *ubufs;
+ struct ubuf_info_msgzc *ubuf;
+ unsigned int ndesc = 0;
+ bool zcopy_used;
+ int sent_pkts = 0;
+
+ do {
+ bool busyloop_intr;
+
+ /* Release DMAs done buffers first */
+ vhost_zerocopy_signal_used(net, vq);
+
+ busyloop_intr = false;
+ head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
+ &busyloop_intr, &ndesc);
+ /* On error, stop handling until the next kick. */
+ if (unlikely(head < 0))
+ break;
+ /* Nothing new? Wait for eventfd to tell us they refilled. */
+ if (head == vq->num) {
+ if (unlikely(busyloop_intr)) {
+ vhost_poll_queue(&vq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+ vhost_disable_notify(&net->dev, vq);
+ continue;
+ }
break;
}
- zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN ||
- nvq->upend_idx != nvq->done_idx);
+
+ zcopy_used = len >= VHOST_GOODCOPY_LEN
+ && !vhost_exceeds_maxpend(net)
+ && vhost_net_tx_select_zcopy(net);
/* use msg_control to pass vhost zerocopy ubuf info to skb */
if (zcopy_used) {
- vq->heads[nvq->upend_idx].id = head;
- if (!vhost_net_tx_select_zcopy(net) ||
- len < VHOST_GOODCOPY_LEN) {
- /* copy don't need to wait for DMA done */
- vq->heads[nvq->upend_idx].len =
- VHOST_DMA_DONE_LEN;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- ubufs = NULL;
- } else {
- struct ubuf_info *ubuf;
- ubuf = nvq->ubuf_info + nvq->upend_idx;
-
- vq->heads[nvq->upend_idx].len =
- VHOST_DMA_IN_PROGRESS;
- ubuf->callback = vhost_zerocopy_callback;
- ubuf->ctx = nvq->ubufs;
- ubuf->desc = nvq->upend_idx;
- msg.msg_control = ubuf;
- msg.msg_controllen = sizeof(ubuf);
- ubufs = nvq->ubufs;
- kref_get(&ubufs->kref);
- }
+ ubuf = nvq->ubuf_info + nvq->upend_idx;
+ vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
+ vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
+ ubuf->ctx = nvq->ubufs;
+ ubuf->desc = nvq->upend_idx;
+ ubuf->ubuf.ops = &vhost_ubuf_ops;
+ ubuf->ubuf.flags = SKBFL_ZEROCOPY_FRAG;
+ refcount_set(&ubuf->ubuf.refcnt, 1);
+ msg.msg_control = &ctl;
+ ctl.type = TUN_MSG_UBUF;
+ ctl.ptr = &ubuf->ubuf;
+ msg.msg_controllen = sizeof(ctl);
+ ubufs = nvq->ubufs;
+ atomic_inc(&ubufs->refcount);
nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
- } else
+ } else {
msg.msg_control = NULL;
- /* TODO: Check specific error and bomb out unless ENOBUFS? */
- err = sock->ops->sendmsg(NULL, sock, &msg, len);
+ ubufs = NULL;
+ }
+ total_len += len;
+ if (tx_can_batch(vq, total_len) &&
+ likely(!vhost_exceeds_maxpend(net))) {
+ msg.msg_flags |= MSG_MORE;
+ } else {
+ msg.msg_flags &= ~MSG_MORE;
+ }
+
+ err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
+ bool retry = err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS;
+
if (zcopy_used) {
- if (ubufs)
+ if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
vhost_net_ubuf_put(ubufs);
- nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
- % UIO_MAXIOV;
+ if (retry)
+ nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
+ % UIO_MAXIOV;
+ else
+ vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
}
- vhost_discard_vq_desc(vq, 1);
- break;
- }
- if (err != len)
+ if (retry) {
+ vhost_discard_vq_desc(vq, 1, ndesc);
+ vhost_net_enable_vq(net, vq);
+ break;
+ }
+ pr_debug("Fail to send packet: err %d", err);
+ } else if (unlikely(err != len))
pr_debug("Truncated TX packet: "
" len %d != %zd\n", err, len);
if (!zcopy_used)
vhost_add_used_and_signal(&net->dev, vq, head, 0);
else
vhost_zerocopy_signal_used(net, vq);
- total_len += len;
vhost_net_tx_packet(net);
- if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
- vhost_poll_queue(&vq->poll);
- break;
- }
- }
+ } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
+}
+/* Expects to be always run from workqueue - which acts as
+ * read-size critical section for our kind of RCU. */
+static void handle_tx(struct vhost_net *net)
+{
+ struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *vq = &nvq->vq;
+ struct socket *sock;
+
+ mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
+ sock = vhost_vq_get_backend(vq);
+ if (!sock)
+ goto out;
+
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
+ vhost_disable_notify(&net->dev, vq);
+ vhost_net_disable_vq(net, vq);
+
+ if (vhost_sock_zcopy(sock))
+ handle_tx_zerocopy(net, sock);
+ else
+ handle_tx_copy(net, sock);
+
+out:
mutex_unlock(&vq->mutex);
}
-static int peek_head_len(struct sock *sk)
+static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
{
struct sk_buff *head;
int len = 0;
unsigned long flags;
+ if (rvq->rx_ring)
+ return vhost_net_buf_peek(rvq);
+
spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
head = skb_peek(&sk->sk_receive_queue);
if (likely(head)) {
len = head->len;
- if (vlan_tx_tag_present(head))
+ if (skb_vlan_tag_present(head))
len += VLAN_HLEN;
}
@@ -483,9 +1011,31 @@ static int peek_head_len(struct sock *sk)
return len;
}
+static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
+ bool *busyloop_intr, unsigned int *count)
+{
+ struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
+ struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *rvq = &rnvq->vq;
+ struct vhost_virtqueue *tvq = &tnvq->vq;
+ int len = peek_head_len(rnvq, sk);
+
+ if (!len && rvq->busyloop_timeout) {
+ /* Flush batched heads first */
+ vhost_net_signal_used(rnvq, *count);
+ *count = 0;
+ /* Both tx vq and rx socket were polled here */
+ vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true);
+
+ len = peek_head_len(rnvq, sk);
+ }
+
+ return len;
+}
+
/* This is a multi-buffer version of vhost_get_desc, that works if
* vq has read descriptors only.
- * @vq - the relevant virtqueue
+ * @nvq - the relevant vhost_net virtqueue
* @datalen - data length we'll be reading
* @iovcount - returned count of io vectors we fill
* @log - vhost log
@@ -493,28 +1043,40 @@ static int peek_head_len(struct sock *sk)
* @quota - headcount quota, 1 for big buffer
* returns number of buffer heads allocated, negative on error
*/
-static int get_rx_bufs(struct vhost_virtqueue *vq,
+static int get_rx_bufs(struct vhost_net_virtqueue *nvq,
struct vring_used_elem *heads,
+ u16 *nheads,
int datalen,
unsigned *iovcount,
struct vhost_log *log,
unsigned *log_num,
- unsigned int quota)
+ unsigned int quota,
+ unsigned int *ndesc)
{
- unsigned int out, in;
+ struct vhost_virtqueue *vq = &nvq->vq;
+ bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
+ unsigned int out, in, desc_num, n = 0;
int seg = 0;
int headcount = 0;
unsigned d;
int r, nlogs = 0;
+ /* len is always initialized before use since we are always called with
+ * datalen > 0.
+ */
+ u32 len;
while (datalen > 0 && headcount < quota) {
if (unlikely(seg >= UIO_MAXIOV)) {
r = -ENOBUFS;
goto err;
}
- d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
- ARRAY_SIZE(vq->iov) - seg, &out,
- &in, log, log_num);
+ r = vhost_get_vq_desc_n(vq, vq->iov + seg,
+ ARRAY_SIZE(vq->iov) - seg, &out,
+ &in, log, log_num, &desc_num);
+ if (unlikely(r < 0))
+ goto err;
+
+ d = r;
if (d == vq->num) {
r = 0;
goto err;
@@ -529,19 +1091,40 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
nlogs += *log_num;
log += *log_num;
}
- heads[headcount].id = d;
- heads[headcount].len = iov_length(vq->iov + seg, in);
- datalen -= heads[headcount].len;
+ len = iov_length(vq->iov + seg, in);
+ if (!in_order) {
+ heads[headcount].id = cpu_to_vhost32(vq, d);
+ heads[headcount].len = cpu_to_vhost32(vq, len);
+ }
++headcount;
+ datalen -= len;
seg += in;
+ n += desc_num;
}
- heads[headcount - 1].len += datalen;
+
*iovcount = seg;
if (unlikely(log))
*log_num = nlogs;
+
+ /* Detect overrun */
+ if (unlikely(datalen > 0)) {
+ r = UIO_MAXIOV + 1;
+ goto err;
+ }
+
+ if (!in_order)
+ heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
+ else {
+ heads[0].len = cpu_to_vhost32(vq, len + datalen);
+ heads[0].id = cpu_to_vhost32(vq, d);
+ nheads[0] = headcount;
+ }
+
+ *ndesc = n;
+
return headcount;
err:
- vhost_discard_vq_desc(vq, headcount);
+ vhost_discard_vq_desc(vq, headcount, n);
return r;
}
@@ -551,52 +1134,74 @@ static void handle_rx(struct vhost_net *net)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
struct vhost_virtqueue *vq = &nvq->vq;
- unsigned uninitialized_var(in), log;
+ bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
+ unsigned int count = 0;
+ unsigned in, log;
struct vhost_log *vq_log;
struct msghdr msg = {
.msg_name = NULL,
.msg_namelen = 0,
.msg_control = NULL, /* FIXME: get and handle RX aux data. */
.msg_controllen = 0,
- .msg_iov = vq->iov,
.msg_flags = MSG_DONTWAIT,
};
- struct virtio_net_hdr_mrg_rxbuf hdr = {
- .hdr.flags = 0,
- .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
+ struct virtio_net_hdr hdr = {
+ .flags = 0,
+ .gso_type = VIRTIO_NET_HDR_GSO_NONE
};
size_t total_len = 0;
int err, mergeable;
s16 headcount;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
- /* TODO: check that we are running from vhost_worker? */
- struct socket *sock = rcu_dereference_check(vq->private_data, 1);
+ bool busyloop_intr = false;
+ bool set_num_buffers;
+ struct socket *sock;
+ struct iov_iter fixup;
+ __virtio16 num_buffers;
+ int recv_pkts = 0;
+ unsigned int ndesc;
+ mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
+ sock = vhost_vq_get_backend(vq);
if (!sock)
- return;
+ goto out;
+
+ if (!vq_meta_prefetch(vq))
+ goto out;
- mutex_lock(&vq->mutex);
vhost_disable_notify(&net->dev, vq);
+ vhost_net_disable_vq(net, vq);
+
vhost_hlen = nvq->vhost_hlen;
sock_hlen = nvq->sock_hlen;
- vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
+ vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
vq->log : NULL;
- mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF);
-
- while ((sock_len = peek_head_len(sock->sk))) {
+ mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
+ set_num_buffers = mergeable ||
+ vhost_has_feature(vq, VIRTIO_F_VERSION_1);
+
+ do {
+ sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
+ &busyloop_intr, &count);
+ if (!sock_len)
+ break;
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
- headcount = get_rx_bufs(vq, vq->heads, vhost_len,
- &in, vq_log, &log,
- likely(mergeable) ? UIO_MAXIOV : 1);
+ headcount = get_rx_bufs(nvq, vq->heads + count,
+ vq->nheads + count,
+ vhost_len, &in, vq_log, &log,
+ likely(mergeable) ? UIO_MAXIOV : 1,
+ &ndesc);
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
- break;
+ goto out;
/* OK, now we need to know about added descriptors. */
if (!headcount) {
- if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+ if (unlikely(busyloop_intr)) {
+ vhost_poll_queue(&vq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
/* They have slipped one in as we were
* doing that: check again. */
vhost_disable_notify(&net->dev, vq);
@@ -604,18 +1209,29 @@ static void handle_rx(struct vhost_net *net)
}
/* Nothing new? Wait for eventfd to tell us
* they refilled. */
- break;
+ goto out;
+ }
+ busyloop_intr = false;
+ if (nvq->rx_ring)
+ msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
+ /* On overrun, truncate and discard */
+ if (unlikely(headcount > UIO_MAXIOV)) {
+ iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, 1, 1);
+ err = sock->ops->recvmsg(sock, &msg,
+ 1, MSG_DONTWAIT | MSG_TRUNC);
+ pr_debug("Discarded rx packet: len %zd\n", sock_len);
+ continue;
}
/* We don't need to be notified again. */
- if (unlikely((vhost_hlen)))
- /* Skip header. TODO: support TSO. */
- move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in);
- else
- /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
- * needed because recvmsg can modify msg_iov. */
- copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in);
- msg.msg_iovlen = in;
- err = sock->ops->recvmsg(NULL, sock, &msg,
+ iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, in, vhost_len);
+ fixup = msg.msg_iter;
+ if (unlikely((vhost_hlen))) {
+ /* We will supply the header ourselves
+ * TODO: support TSO.
+ */
+ iov_iter_advance(&msg.msg_iter, vhost_hlen);
+ }
+ err = sock->ops->recvmsg(sock, &msg,
sock_len, MSG_DONTWAIT | MSG_TRUNC);
/* Userspace might have consumed the packet meanwhile:
* it's not supposed to do this usually, but might be hard
@@ -623,36 +1239,51 @@ static void handle_rx(struct vhost_net *net)
if (unlikely(err != sock_len)) {
pr_debug("Discarded rx packet: "
" len %d, expected %zd\n", err, sock_len);
- vhost_discard_vq_desc(vq, headcount);
+ vhost_discard_vq_desc(vq, headcount, ndesc);
continue;
}
- if (unlikely(vhost_hlen) &&
- memcpy_toiovecend(nvq->hdr, (unsigned char *)&hdr, 0,
- vhost_hlen)) {
- vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
- vq->iov->iov_base);
- break;
+ /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
+ if (unlikely(vhost_hlen)) {
+ if (copy_to_iter(&hdr, sizeof(hdr),
+ &fixup) != sizeof(hdr)) {
+ vq_err(vq, "Unable to write vnet_hdr "
+ "at addr %p\n", vq->iov->iov_base);
+ goto out;
+ }
+ } else {
+ /* Header came from socket; we'll need to patch
+ * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
+ */
+ iov_iter_advance(&fixup, sizeof(hdr));
}
/* TODO: Should check and handle checksum. */
- if (likely(mergeable) &&
- memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
- offsetof(typeof(hdr), num_buffers),
- sizeof hdr.num_buffers)) {
+
+ num_buffers = cpu_to_vhost16(vq, headcount);
+ if (likely(set_num_buffers) &&
+ copy_to_iter(&num_buffers, sizeof num_buffers,
+ &fixup) != sizeof num_buffers) {
vq_err(vq, "Failed num_buffers write");
- vhost_discard_vq_desc(vq, headcount);
- break;
+ vhost_discard_vq_desc(vq, headcount, ndesc);
+ goto out;
+ }
+ nvq->done_idx += headcount;
+ count += in_order ? 1 : headcount;
+ if (nvq->done_idx > VHOST_NET_BATCH) {
+ vhost_net_signal_used(nvq, count);
+ count = 0;
}
- vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
- headcount);
if (unlikely(vq_log))
- vhost_log_write(vq, vq_log, log, vhost_len);
+ vhost_log_write(vq, vq_log, log, vhost_len,
+ vq->iov, in);
total_len += vhost_len;
- if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
- vhost_poll_queue(&vq->poll);
- break;
- }
- }
+ } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));
+ if (unlikely(busyloop_intr))
+ vhost_poll_queue(&vq->poll);
+ else if (!sock_len)
+ vhost_net_enable_vq(net, vq);
+out:
+ vhost_net_signal_used(nvq, count);
mutex_unlock(&vq->mutex);
}
@@ -690,18 +1321,39 @@ static void handle_rx_net(struct vhost_work *work)
static int vhost_net_open(struct inode *inode, struct file *f)
{
- struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
+ struct vhost_net *n;
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
- int r, i;
+ void **queue;
+ struct xdp_buff *xdp;
+ int i;
+ n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!n)
return -ENOMEM;
- vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
+ vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
- kfree(n);
+ kvfree(n);
+ return -ENOMEM;
+ }
+
+ queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *),
+ GFP_KERNEL);
+ if (!queue) {
+ kfree(vqs);
+ kvfree(n);
return -ENOMEM;
}
+ n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
+
+ xdp = kmalloc_array(VHOST_NET_BATCH, sizeof(*xdp), GFP_KERNEL);
+ if (!xdp) {
+ kfree(vqs);
+ kvfree(n);
+ kfree(queue);
+ return -ENOMEM;
+ }
+ n->vqs[VHOST_NET_VQ_TX].xdp = xdp;
dev = &n->dev;
vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
@@ -713,61 +1365,41 @@ static int vhost_net_open(struct inode *inode, struct file *f)
n->vqs[i].ubuf_info = NULL;
n->vqs[i].upend_idx = 0;
n->vqs[i].done_idx = 0;
+ n->vqs[i].batched_xdp = 0;
n->vqs[i].vhost_hlen = 0;
n->vqs[i].sock_hlen = 0;
+ n->vqs[i].rx_ring = NULL;
+ vhost_net_buf_init(&n->vqs[i].rxq);
}
- r = vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
- if (r < 0) {
- kfree(n);
- kfree(vqs);
- return r;
- }
+ vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
+ UIO_MAXIOV + VHOST_NET_BATCH,
+ VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, true,
+ NULL);
- vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
- vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
+ vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev,
+ vqs[VHOST_NET_VQ_TX]);
+ vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev,
+ vqs[VHOST_NET_VQ_RX]);
f->private_data = n;
+ page_frag_cache_init(&n->pf_cache);
return 0;
}
-static void vhost_net_disable_vq(struct vhost_net *n,
- struct vhost_virtqueue *vq)
-{
- struct vhost_net_virtqueue *nvq =
- container_of(vq, struct vhost_net_virtqueue, vq);
- struct vhost_poll *poll = n->poll + (nvq - n->vqs);
- if (!vq->private_data)
- return;
- vhost_poll_stop(poll);
-}
-
-static int vhost_net_enable_vq(struct vhost_net *n,
- struct vhost_virtqueue *vq)
-{
- struct vhost_net_virtqueue *nvq =
- container_of(vq, struct vhost_net_virtqueue, vq);
- struct vhost_poll *poll = n->poll + (nvq - n->vqs);
- struct socket *sock;
-
- sock = rcu_dereference_protected(vq->private_data,
- lockdep_is_held(&vq->mutex));
- if (!sock)
- return 0;
-
- return vhost_poll_start(poll, sock->file);
-}
-
static struct socket *vhost_net_stop_vq(struct vhost_net *n,
struct vhost_virtqueue *vq)
{
struct socket *sock;
+ struct vhost_net_virtqueue *nvq =
+ container_of(vq, struct vhost_net_virtqueue, vq);
mutex_lock(&vq->mutex);
- sock = rcu_dereference_protected(vq->private_data,
- lockdep_is_held(&vq->mutex));
+ sock = vhost_vq_get_backend(vq);
vhost_net_disable_vq(n, vq);
- rcu_assign_pointer(vq->private_data, NULL);
+ vhost_vq_set_backend(vq, NULL);
+ vhost_net_buf_unproduce(nvq);
+ nvq->rx_ring = NULL;
mutex_unlock(&vq->mutex);
return sock;
}
@@ -779,16 +1411,9 @@ static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
*rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
}
-static void vhost_net_flush_vq(struct vhost_net *n, int index)
-{
- vhost_poll_flush(n->poll + index);
- vhost_poll_flush(&n->vqs[index].vq.poll);
-}
-
static void vhost_net_flush(struct vhost_net *n)
{
- vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
- vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
+ vhost_dev_flush(&n->dev);
if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = true;
@@ -797,7 +1422,7 @@ static void vhost_net_flush(struct vhost_net *n)
vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = false;
- kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref);
+ atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
}
}
@@ -811,27 +1436,28 @@ static int vhost_net_release(struct inode *inode, struct file *f)
vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n);
vhost_dev_stop(&n->dev);
- vhost_dev_cleanup(&n->dev, false);
+ vhost_dev_cleanup(&n->dev);
vhost_net_vq_reset(n);
if (tx_sock)
- fput(tx_sock->file);
+ sockfd_put(tx_sock);
if (rx_sock)
- fput(rx_sock->file);
+ sockfd_put(rx_sock);
+ /* Make sure no callbacks are outstanding */
+ synchronize_rcu();
/* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */
vhost_net_flush(n);
+ kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
+ kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
kfree(n->dev.vqs);
- kfree(n);
+ page_frag_cache_drain(&n->pf_cache);
+ kvfree(n);
return 0;
}
static struct socket *get_raw_socket(int fd)
{
- struct {
- struct sockaddr_ll sa;
- char buf[MAX_ADDR_LEN];
- } uaddr;
- int uaddr_len = sizeof uaddr, r;
+ int r;
struct socket *sock = sockfd_lookup(fd, &r);
if (!sock)
@@ -843,21 +1469,30 @@ static struct socket *get_raw_socket(int fd)
goto err;
}
- r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
- &uaddr_len, 0);
- if (r)
- goto err;
-
- if (uaddr.sa.sll_family != AF_PACKET) {
+ if (sock->sk->sk_family != AF_PACKET) {
r = -EPFNOSUPPORT;
goto err;
}
return sock;
err:
- fput(sock->file);
+ sockfd_put(sock);
return ERR_PTR(r);
}
+static struct ptr_ring *get_tap_ptr_ring(struct file *file)
+{
+ struct ptr_ring *ring;
+ ring = tun_get_tx_ring(file);
+ if (!IS_ERR(ring))
+ goto out;
+ ring = tap_get_ptr_ring(file);
+ if (!IS_ERR(ring))
+ goto out;
+ ring = NULL;
+out:
+ return ring;
+}
+
static struct socket *get_tap_socket(int fd)
{
struct file *file = fget(fd);
@@ -868,7 +1503,7 @@ static struct socket *get_tap_socket(int fd)
sock = tun_get_socket(file);
if (!IS_ERR(sock))
return sock;
- sock = macvtap_get_socket(file);
+ sock = tap_get_socket(file);
if (IS_ERR(sock))
fput(file);
return sock;
@@ -911,6 +1546,9 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
nvq = &n->vqs[index];
mutex_lock(&vq->mutex);
+ if (fd == -1)
+ vhost_clear_msg(&n->dev);
+
/* Verify that ring has been setup correctly. */
if (!vhost_vq_access_ok(vq)) {
r = -EFAULT;
@@ -923,8 +1561,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
}
/* start polling new socket */
- oldsock = rcu_dereference_protected(vq->private_data,
- lockdep_is_held(&vq->mutex));
+ oldsock = vhost_vq_get_backend(vq);
if (sock != oldsock) {
ubufs = vhost_net_ubuf_alloc(vq,
sock && vhost_sock_zcopy(sock));
@@ -934,13 +1571,20 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
}
vhost_net_disable_vq(n, vq);
- rcu_assign_pointer(vq->private_data, sock);
- r = vhost_init_used(vq);
+ vhost_vq_set_backend(vq, sock);
+ vhost_net_buf_unproduce(nvq);
+ r = vhost_vq_init_access(vq);
if (r)
goto err_used;
r = vhost_net_enable_vq(n, vq);
if (r)
goto err_used;
+ if (index == VHOST_NET_VQ_RX) {
+ if (sock)
+ nvq->rx_ring = get_tap_ptr_ring(sock->file);
+ else
+ nvq->rx_ring = NULL;
+ }
oldubufs = nvq->ubufs;
nvq->ubufs = ubufs;
@@ -960,20 +1604,21 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
}
if (oldsock) {
- vhost_net_flush_vq(n, index);
- fput(oldsock->file);
+ vhost_dev_flush(&n->dev);
+ sockfd_put(oldsock);
}
mutex_unlock(&n->dev.mutex);
return 0;
err_used:
- rcu_assign_pointer(vq->private_data, oldsock);
+ vhost_vq_set_backend(vq, oldsock);
vhost_net_enable_vq(n, vq);
if (ubufs)
vhost_net_ubuf_put_wait_and_free(ubufs);
err_ubufs:
- fput(sock->file);
+ if (sock)
+ sockfd_put(sock);
err_vq:
mutex_unlock(&vq->mutex);
err:
@@ -986,39 +1631,48 @@ static long vhost_net_reset_owner(struct vhost_net *n)
struct socket *tx_sock = NULL;
struct socket *rx_sock = NULL;
long err;
- struct vhost_memory *memory;
+ struct vhost_iotlb *umem;
mutex_lock(&n->dev.mutex);
err = vhost_dev_check_owner(&n->dev);
if (err)
goto done;
- memory = vhost_dev_reset_owner_prepare();
- if (!memory) {
+ umem = vhost_dev_reset_owner_prepare();
+ if (!umem) {
err = -ENOMEM;
goto done;
}
vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n);
- vhost_dev_reset_owner(&n->dev, memory);
+ vhost_dev_stop(&n->dev);
+ vhost_dev_reset_owner(&n->dev, umem);
vhost_net_vq_reset(n);
done:
mutex_unlock(&n->dev.mutex);
if (tx_sock)
- fput(tx_sock->file);
+ sockfd_put(tx_sock);
if (rx_sock)
- fput(rx_sock->file);
+ sockfd_put(rx_sock);
return err;
}
-static int vhost_net_set_features(struct vhost_net *n, u64 features)
+static int vhost_net_set_features(struct vhost_net *n, const u64 *features)
{
size_t vhost_hlen, sock_hlen, hdr_len;
int i;
- hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ?
- sizeof(struct virtio_net_hdr_mrg_rxbuf) :
- sizeof(struct virtio_net_hdr);
- if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
+ hdr_len = virtio_features_test_bit(features, VIRTIO_NET_F_MRG_RXBUF) ||
+ virtio_features_test_bit(features, VIRTIO_F_VERSION_1) ?
+ sizeof(struct virtio_net_hdr_mrg_rxbuf) :
+ sizeof(struct virtio_net_hdr);
+
+ if (virtio_features_test_bit(features,
+ VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO) ||
+ virtio_features_test_bit(features,
+ VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO))
+ hdr_len = sizeof(struct virtio_net_hdr_v1_hash_tunnel);
+
+ if (virtio_features_test_bit(features, VHOST_NET_F_VIRTIO_NET_HDR)) {
/* vhost provides vnet_hdr */
vhost_hlen = hdr_len;
sock_hlen = 0;
@@ -1028,22 +1682,29 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
sock_hlen = hdr_len;
}
mutex_lock(&n->dev.mutex);
- if ((features & (1 << VHOST_F_LOG_ALL)) &&
- !vhost_log_access_ok(&n->dev)) {
- mutex_unlock(&n->dev.mutex);
- return -EFAULT;
+ if (virtio_features_test_bit(features, VHOST_F_LOG_ALL) &&
+ !vhost_log_access_ok(&n->dev))
+ goto out_unlock;
+
+ if (virtio_features_test_bit(features, VIRTIO_F_ACCESS_PLATFORM)) {
+ if (vhost_init_device_iotlb(&n->dev))
+ goto out_unlock;
}
- n->dev.acked_features = features;
- smp_wmb();
+
for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
mutex_lock(&n->vqs[i].vq.mutex);
+ virtio_features_copy(n->vqs[i].vq.acked_features_array,
+ features);
n->vqs[i].vhost_hlen = vhost_hlen;
n->vqs[i].sock_hlen = sock_hlen;
mutex_unlock(&n->vqs[i].vq.mutex);
}
- vhost_net_flush(n);
mutex_unlock(&n->dev.mutex);
return 0;
+
+out_unlock:
+ mutex_unlock(&n->dev.mutex);
+ return -EFAULT;
}
static long vhost_net_set_owner(struct vhost_net *n)
@@ -1070,12 +1731,14 @@ out:
static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
unsigned long arg)
{
+ const DEFINE_VHOST_FEATURES_ARRAY(vhost_net_features, vhost_net_bits);
+ u64 all_features[VIRTIO_FEATURES_U64S];
struct vhost_net *n = f->private_data;
void __user *argp = (void __user *)arg;
u64 __user *featurep = argp;
struct vhost_vring_file backend;
- u64 features;
- int r;
+ u64 features, count, copied;
+ int r, i;
switch (ioctl) {
case VHOST_NET_SET_BACKEND:
@@ -1083,16 +1746,72 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
return -EFAULT;
return vhost_net_set_backend(n, backend.index, backend.fd);
case VHOST_GET_FEATURES:
- features = VHOST_NET_FEATURES;
+ features = vhost_net_features[0];
if (copy_to_user(featurep, &features, sizeof features))
return -EFAULT;
return 0;
case VHOST_SET_FEATURES:
if (copy_from_user(&features, featurep, sizeof features))
return -EFAULT;
- if (features & ~VHOST_NET_FEATURES)
+ if (features & ~vhost_net_features[0])
return -EOPNOTSUPP;
- return vhost_net_set_features(n, features);
+
+ virtio_features_from_u64(all_features, features);
+ return vhost_net_set_features(n, all_features);
+ case VHOST_GET_FEATURES_ARRAY:
+ if (copy_from_user(&count, featurep, sizeof(count)))
+ return -EFAULT;
+
+ /* Copy the net features, up to the user-provided buffer size */
+ argp += sizeof(u64);
+ copied = min(count, (u64)VIRTIO_FEATURES_U64S);
+ if (copy_to_user(argp, vhost_net_features,
+ copied * sizeof(u64)))
+ return -EFAULT;
+
+ /* Zero the trailing space provided by user-space, if any */
+ if (clear_user(argp, size_mul(count - copied, sizeof(u64))))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_FEATURES_ARRAY:
+ if (copy_from_user(&count, featurep, sizeof(count)))
+ return -EFAULT;
+
+ virtio_features_zero(all_features);
+ argp += sizeof(u64);
+ copied = min(count, (u64)VIRTIO_FEATURES_U64S);
+ if (copy_from_user(all_features, argp, copied * sizeof(u64)))
+ return -EFAULT;
+
+ /*
+ * Any feature specified by user-space above
+ * VIRTIO_FEATURES_BITS is not supported by definition.
+ */
+ for (i = copied; i < count; ++i) {
+ if (copy_from_user(&features, featurep + 1 + i,
+ sizeof(features)))
+ return -EFAULT;
+ if (features)
+ return -EOPNOTSUPP;
+ }
+
+ for (i = 0; i < VIRTIO_FEATURES_U64S; i++)
+ if (all_features[i] & ~vhost_net_features[i])
+ return -EOPNOTSUPP;
+
+ return vhost_net_set_features(n, all_features);
+ case VHOST_GET_BACKEND_FEATURES:
+ features = VHOST_NET_BACKEND_FEATURES;
+ if (copy_to_user(featurep, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_BACKEND_FEATURES:
+ if (copy_from_user(&features, featurep, sizeof(features)))
+ return -EFAULT;
+ if (features & ~VHOST_NET_BACKEND_FEATURES)
+ return -EOPNOTSUPP;
+ vhost_set_backend_features(&n->dev, features);
+ return 0;
case VHOST_RESET_OWNER:
return vhost_net_reset_owner(n);
case VHOST_SET_OWNER:
@@ -1109,21 +1828,42 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
}
}
-#ifdef CONFIG_COMPAT
-static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
- unsigned long arg)
+static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_net *n = file->private_data;
+ struct vhost_dev *dev = &n->dev;
+ int noblock = file->f_flags & O_NONBLOCK;
+
+ return vhost_chr_read_iter(dev, to, noblock);
+}
+
+static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
{
- return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
+ struct file *file = iocb->ki_filp;
+ struct vhost_net *n = file->private_data;
+ struct vhost_dev *dev = &n->dev;
+
+ return vhost_chr_write_iter(dev, from);
+}
+
+static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait)
+{
+ struct vhost_net *n = file->private_data;
+ struct vhost_dev *dev = &n->dev;
+
+ return vhost_chr_poll(file, dev, wait);
}
-#endif
static const struct file_operations vhost_net_fops = {
.owner = THIS_MODULE,
.release = vhost_net_release,
+ .read_iter = vhost_net_chr_read_iter,
+ .write_iter = vhost_net_chr_write_iter,
+ .poll = vhost_net_chr_poll,
.unlocked_ioctl = vhost_net_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vhost_net_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = vhost_net_open,
.llseek = noop_llseek,
};
@@ -1134,7 +1874,7 @@ static struct miscdevice vhost_net_misc = {
.fops = &vhost_net_fops,
};
-static int vhost_net_init(void)
+static int __init vhost_net_init(void)
{
if (experimental_zcopytx)
vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
@@ -1142,7 +1882,7 @@ static int vhost_net_init(void)
}
module_init(vhost_net_init);
-static void vhost_net_exit(void)
+static void __exit vhost_net_exit(void)
{
misc_deregister(&vhost_net_misc);
}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 06adf31a9248..f43c1fe9fad9 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1,24 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
/*******************************************************************************
* Vhost kernel TCM fabric driver for virtio SCSI initiators
*
- * (C) Copyright 2010-2012 RisingTide Systems LLC.
+ * (C) Copyright 2010-2013 Datera, Inc.
* (C) Copyright 2010-2012 IBM Corp.
*
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
- *
- * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
+ * Authors: Nicholas A. Bellinger <nab@daterainc.com>
* Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
****************************************************************************/
#include <linux/module.h>
@@ -35,15 +23,15 @@
#include <linux/compat.h>
#include <linux/eventfd.h>
#include <linux/fs.h>
+#include <linux/vmalloc.h>
#include <linux/miscdevice.h>
-#include <asm/unaligned.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_tcq.h>
+#include <linux/blk_types.h>
+#include <linux/bio.h>
+#include <linux/unaligned.h>
+#include <scsi/scsi_common.h>
+#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
-#include <target/configfs_macros.h>
#include <linux/vhost.h>
#include <linux/virtio_scsi.h>
#include <linux/llist.h>
@@ -51,9 +39,67 @@
#include "vhost.h"
-#define TCM_VHOST_VERSION "v0.1"
-#define TCM_VHOST_NAMELEN 256
-#define TCM_VHOST_MAX_CDB_SIZE 32
+#define VHOST_SCSI_VERSION "v0.1"
+#define VHOST_SCSI_NAMELEN 256
+#define VHOST_SCSI_MAX_CDB_SIZE 32
+#define VHOST_SCSI_PREALLOC_SGLS 2048
+#define VHOST_SCSI_PREALLOC_UPAGES 2048
+#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
+/*
+ * For the legacy descriptor case we allocate an iov per byte in the
+ * virtio_scsi_cmd_resp struct.
+ */
+#define VHOST_SCSI_MAX_RESP_IOVS sizeof(struct virtio_scsi_cmd_resp)
+
+static unsigned int vhost_scsi_inline_sg_cnt = VHOST_SCSI_PREALLOC_SGLS;
+
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
+static int vhost_scsi_set_inline_sg_cnt(const char *buf,
+ const struct kernel_param *kp)
+{
+ pr_err("Setting inline_sg_cnt is not supported.\n");
+ return -EOPNOTSUPP;
+}
+#else
+static int vhost_scsi_set_inline_sg_cnt(const char *buf,
+ const struct kernel_param *kp)
+{
+ unsigned int cnt;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &cnt);
+ if (ret)
+ return ret;
+
+ if (cnt > VHOST_SCSI_PREALLOC_SGLS) {
+ pr_err("Max inline_sg_cnt is %u\n", VHOST_SCSI_PREALLOC_SGLS);
+ return -EINVAL;
+ }
+
+ vhost_scsi_inline_sg_cnt = cnt;
+ return 0;
+}
+#endif
+
+static int vhost_scsi_get_inline_sg_cnt(char *buf,
+ const struct kernel_param *kp)
+{
+ return sprintf(buf, "%u\n", vhost_scsi_inline_sg_cnt);
+}
+
+static const struct kernel_param_ops vhost_scsi_inline_sg_cnt_op = {
+ .get = vhost_scsi_get_inline_sg_cnt,
+ .set = vhost_scsi_set_inline_sg_cnt,
+};
+
+module_param_cb(inline_sg_cnt, &vhost_scsi_inline_sg_cnt_op, NULL, 0644);
+MODULE_PARM_DESC(inline_sg_cnt, "Set the number of scatterlist entries to pre-allocate. The default is 2048.");
+
+/* Max number of requests before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * request.
+ */
+#define VHOST_SCSI_WEIGHT 256
struct vhost_scsi_inflight {
/* Wait for the flush operation to finish */
@@ -62,92 +108,82 @@ struct vhost_scsi_inflight {
struct kref kref;
};
-struct tcm_vhost_cmd {
+struct vhost_scsi_cmd {
/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
int tvc_vq_desc;
- /* virtio-scsi initiator task attribute */
- int tvc_task_attr;
- /* virtio-scsi initiator data direction */
- enum dma_data_direction tvc_data_direction;
- /* Expected data transfer length from virtio-scsi header */
- u32 tvc_exp_data_len;
- /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
- u64 tvc_tag;
/* The number of scatterlists associated with this cmd */
u32 tvc_sgl_count;
- /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
- u32 tvc_lun;
- /* Pointer to the SGL formatted memory from virtio-scsi */
- struct scatterlist *tvc_sgl;
- /* Pointer to response */
- struct virtio_scsi_cmd_resp __user *tvc_resp;
- /* Pointer to vhost_scsi for our device */
- struct vhost_scsi *tvc_vhost;
+ u32 tvc_prot_sgl_count;
+ u32 copied_iov:1;
+ const void *read_iov;
+ struct iov_iter *read_iter;
+ struct scatterlist *sgl;
+ struct sg_table table;
+ struct scatterlist *prot_sgl;
+ struct sg_table prot_table;
+ /* Fast path response header iovec used when only one vec is needed */
+ struct iovec tvc_resp_iov;
+ /* Number of iovs for response */
+ unsigned int tvc_resp_iovs_cnt;
+ /* Pointer to response header iovecs if more than one is needed */
+ struct iovec *tvc_resp_iovs;
/* Pointer to vhost_virtqueue for the cmd */
struct vhost_virtqueue *tvc_vq;
- /* Pointer to vhost nexus memory */
- struct tcm_vhost_nexus *tvc_nexus;
/* The TCM I/O descriptor that is accessed via container_of() */
struct se_cmd tvc_se_cmd;
- /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
- struct work_struct work;
- /* Copy of the incoming SCSI command descriptor block (CDB) */
- unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
/* Sense buffer that will be mapped into outgoing status */
unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
+ /*
+ * Dirty write descriptors of this command.
+ */
+ struct vhost_log *tvc_log;
+ unsigned int tvc_log_num;
/* Completed commands list, serviced from vhost worker thread */
struct llist_node tvc_completion_list;
/* Used to track inflight cmd */
struct vhost_scsi_inflight *inflight;
};
-struct tcm_vhost_nexus {
+struct vhost_scsi_nexus {
/* Pointer to TCM session for I_T Nexus */
struct se_session *tvn_se_sess;
};
-struct tcm_vhost_nacl {
- /* Binary World Wide unique Port Name for Vhost Initiator port */
- u64 iport_wwpn;
- /* ASCII formatted WWPN for Sas Initiator port */
- char iport_name[TCM_VHOST_NAMELEN];
- /* Returned by tcm_vhost_make_nodeacl() */
- struct se_node_acl se_node_acl;
-};
-
-struct tcm_vhost_tpg {
+struct vhost_scsi_tpg {
/* Vhost port target portal group tag for TCM */
u16 tport_tpgt;
- /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
+ /* Used to track number of TPG Port/Lun Links wrt to explicit I_T Nexus shutdown */
int tv_tpg_port_count;
/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
int tv_tpg_vhost_count;
- /* list for tcm_vhost_list */
+ /* Used for enabling T10-PI with legacy devices */
+ int tv_fabric_prot_type;
+ /* list for vhost_scsi_list */
struct list_head tv_tpg_list;
/* Used to protect access for tpg_nexus */
struct mutex tv_tpg_mutex;
/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
- struct tcm_vhost_nexus *tpg_nexus;
- /* Pointer back to tcm_vhost_tport */
- struct tcm_vhost_tport *tport;
- /* Returned by tcm_vhost_make_tpg() */
+ struct vhost_scsi_nexus *tpg_nexus;
+ /* Pointer back to vhost_scsi_tport */
+ struct vhost_scsi_tport *tport;
+ /* Returned by vhost_scsi_make_tpg() */
struct se_portal_group se_tpg;
/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
struct vhost_scsi *vhost_scsi;
};
-struct tcm_vhost_tport {
+struct vhost_scsi_tport {
/* SCSI protocol the tport is providing */
u8 tport_proto_id;
/* Binary World Wide unique Port Name for Vhost Target port */
u64 tport_wwpn;
/* ASCII formatted WWPN for Vhost Target port */
- char tport_name[TCM_VHOST_NAMELEN];
- /* Returned by tcm_vhost_make_tport() */
+ char tport_name[VHOST_SCSI_NAMELEN];
+ /* Returned by vhost_scsi_make_tport() */
struct se_wwn tport_wwn;
};
-struct tcm_vhost_evt {
+struct vhost_scsi_evt {
/* event to be sent to guest */
struct virtio_scsi_event event;
/* event list, serviced from vhost worker thread */
@@ -160,16 +196,26 @@ enum {
VHOST_SCSI_VQ_IO = 2,
};
-enum {
- VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG)
+/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
+static const int vhost_scsi_bits[] = {
+ VHOST_FEATURES,
+ VIRTIO_SCSI_F_HOTPLUG,
+ VIRTIO_SCSI_F_T10_PI
};
+#define VHOST_SCSI_FEATURES VHOST_FEATURES_U64(vhost_scsi_bits, 0)
+
#define VHOST_SCSI_MAX_TARGET 256
-#define VHOST_SCSI_MAX_VQ 128
+#define VHOST_SCSI_MAX_IO_VQ 1024
#define VHOST_SCSI_MAX_EVENT 128
+static unsigned vhost_scsi_max_io_vqs = 128;
+module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
+MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
+
struct vhost_scsi_virtqueue {
struct vhost_virtqueue vq;
+ struct vhost_scsi *vs;
/*
* Reference counting for inflight reqs, used for flush operation. At
* each time, one reference tracks new commands submitted, while we
@@ -181,42 +227,74 @@ struct vhost_scsi_virtqueue {
* Writers must also take dev mutex and flush under it.
*/
int inflight_idx;
+ struct vhost_scsi_cmd *scsi_cmds;
+ struct sbitmap scsi_tags;
+ int max_cmds;
+ struct page **upages;
+
+ struct vhost_work completion_work;
+ struct llist_head completion_list;
};
struct vhost_scsi {
/* Protected by vhost_scsi->dev.mutex */
- struct tcm_vhost_tpg **vs_tpg;
+ struct vhost_scsi_tpg **vs_tpg;
char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
struct vhost_dev dev;
- struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
-
- struct vhost_work vs_completion_work; /* cmd completion work item */
- struct llist_head vs_completion_list; /* cmd completion queue */
+ struct vhost_scsi_virtqueue *vqs;
+ struct vhost_scsi_inflight **old_inflight;
struct vhost_work vs_event_work; /* evt injection work item */
struct llist_head vs_event_list; /* evt injection queue */
bool vs_events_missed; /* any missed events, protected by vq->mutex */
int vs_events_nr; /* num of pending events, protected by vq->mutex */
+
+ unsigned int inline_sg_cnt;
};
-/* Local pointer to allocated TCM configfs fabric module */
-static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
+struct vhost_scsi_tmf {
+ struct vhost_work vwork;
+ struct work_struct flush_work;
+ struct vhost_scsi *vhost;
+ struct vhost_scsi_virtqueue *svq;
-static struct workqueue_struct *tcm_vhost_workqueue;
+ struct se_cmd se_cmd;
+ u8 scsi_resp;
+ struct vhost_scsi_inflight *inflight;
+ struct iovec resp_iov;
+ int in_iovs;
+ int vq_desc;
-/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
-static DEFINE_MUTEX(tcm_vhost_mutex);
-static LIST_HEAD(tcm_vhost_list);
+ /*
+ * Dirty write descriptors of this command.
+ */
+ struct vhost_log *tmf_log;
+ unsigned int tmf_log_num;
+};
-static int iov_num_pages(struct iovec *iov)
-{
- return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
- ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
-}
+/*
+ * Context for processing request and control queue operations.
+ */
+struct vhost_scsi_ctx {
+ int head;
+ unsigned int out, in;
+ size_t req_size, rsp_size;
+ size_t out_size, in_size;
+ u8 *target, *lunp;
+ void *req;
+ struct iov_iter out_iter;
+};
+
+/*
+ * Global mutex to protect vhost_scsi TPG list for vhost IOCTLs and LIO
+ * configfs management operations.
+ */
+static DEFINE_MUTEX(vhost_scsi_mutex);
+static LIST_HEAD(vhost_scsi_list);
-static void tcm_vhost_done_inflight(struct kref *kref)
+static void vhost_scsi_done_inflight(struct kref *kref)
{
struct vhost_scsi_inflight *inflight;
@@ -224,24 +302,24 @@ static void tcm_vhost_done_inflight(struct kref *kref)
complete(&inflight->comp);
}
-static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
+static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
struct vhost_scsi_inflight *old_inflight[])
{
struct vhost_scsi_inflight *new_inflight;
struct vhost_virtqueue *vq;
int idx, i;
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
- /* store old infight */
+ /* store old inflight */
idx = vs->vqs[i].inflight_idx;
if (old_inflight)
old_inflight[i] = &vs->vqs[i].inflights[idx];
- /* setup new infight */
+ /* setup new inflight */
vs->vqs[i].inflight_idx = idx ^ 1;
new_inflight = &vs->vqs[i].inflights[idx ^ 1];
kref_init(&new_inflight->kref);
@@ -252,7 +330,7 @@ static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
}
static struct vhost_scsi_inflight *
-tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
+vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
{
struct vhost_scsi_inflight *inflight;
struct vhost_scsi_virtqueue *svq;
@@ -264,289 +342,207 @@ tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
return inflight;
}
-static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
+static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
{
- kref_put(&inflight->kref, tcm_vhost_done_inflight);
+ kref_put(&inflight->kref, vhost_scsi_done_inflight);
}
-static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
+static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
{
return 1;
}
-static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
-{
- return 0;
-}
-
-static char *tcm_vhost_get_fabric_name(void)
-{
- return "vhost";
-}
-
-static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
- struct tcm_vhost_tpg *tpg = container_of(se_tpg,
- struct tcm_vhost_tpg, se_tpg);
- struct tcm_vhost_tport *tport = tpg->tport;
-
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_get_fabric_proto_ident(se_tpg);
- case SCSI_PROTOCOL_FCP:
- return fc_get_fabric_proto_ident(se_tpg);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_get_fabric_proto_ident(se_tpg);
- default:
- pr_err("Unknown tport_proto_id: 0x%02x, using"
- " SAS emulation\n", tport->tport_proto_id);
- break;
- }
-
- return sas_get_fabric_proto_ident(se_tpg);
-}
-
-static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
+static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
{
- struct tcm_vhost_tpg *tpg = container_of(se_tpg,
- struct tcm_vhost_tpg, se_tpg);
- struct tcm_vhost_tport *tport = tpg->tport;
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+ struct vhost_scsi_tport *tport = tpg->tport;
return &tport->tport_name[0];
}
-static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
+static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
{
- struct tcm_vhost_tpg *tpg = container_of(se_tpg,
- struct tcm_vhost_tpg, se_tpg);
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
return tpg->tport_tpgt;
}
-static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static u32
-tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code,
- unsigned char *buf)
+static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
{
- struct tcm_vhost_tpg *tpg = container_of(se_tpg,
- struct tcm_vhost_tpg, se_tpg);
- struct tcm_vhost_tport *tport = tpg->tport;
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
- case SCSI_PROTOCOL_FCP:
- return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
- default:
- pr_err("Unknown tport_proto_id: 0x%02x, using"
- " SAS emulation\n", tport->tport_proto_id);
- break;
- }
-
- return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
+ return tpg->tv_fabric_prot_type;
}
-static u32
-tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code)
+static int vhost_scsi_copy_cmd_log(struct vhost_virtqueue *vq,
+ struct vhost_scsi_cmd *cmd,
+ struct vhost_log *log,
+ unsigned int log_num)
{
- struct tcm_vhost_tpg *tpg = container_of(se_tpg,
- struct tcm_vhost_tpg, se_tpg);
- struct tcm_vhost_tport *tport = tpg->tport;
+ if (!cmd->tvc_log)
+ cmd->tvc_log = kmalloc_array(vq->dev->iov_limit,
+ sizeof(*cmd->tvc_log),
+ GFP_KERNEL);
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
- case SCSI_PROTOCOL_FCP:
- return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
- default:
- pr_err("Unknown tport_proto_id: 0x%02x, using"
- " SAS emulation\n", tport->tport_proto_id);
- break;
+ if (unlikely(!cmd->tvc_log)) {
+ vq_err(vq, "Failed to alloc tvc_log\n");
+ return -ENOMEM;
}
- return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
+ memcpy(cmd->tvc_log, log, sizeof(*cmd->tvc_log) * log_num);
+ cmd->tvc_log_num = log_num;
+
+ return 0;
}
-static char *
-tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
- const char *buf,
- u32 *out_tid_len,
- char **port_nexus_ptr)
+static void vhost_scsi_log_write(struct vhost_virtqueue *vq,
+ struct vhost_log *log,
+ unsigned int log_num)
{
- struct tcm_vhost_tpg *tpg = container_of(se_tpg,
- struct tcm_vhost_tpg, se_tpg);
- struct tcm_vhost_tport *tport = tpg->tport;
+ if (likely(!vhost_has_feature(vq, VHOST_F_LOG_ALL)))
+ return;
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
- case SCSI_PROTOCOL_FCP:
- return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
- default:
- pr_err("Unknown tport_proto_id: 0x%02x, using"
- " SAS emulation\n", tport->tport_proto_id);
- break;
- }
+ if (likely(!log_num || !log))
+ return;
- return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
+ /*
+ * vhost-scsi doesn't support VIRTIO_F_ACCESS_PLATFORM.
+ * No requirement for vq->iotlb case.
+ */
+ WARN_ON_ONCE(unlikely(vq->iotlb));
+ vhost_log_write(vq, log, log_num, U64_MAX, NULL, 0);
}
-static struct se_node_acl *
-tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
+static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
{
- struct tcm_vhost_nacl *nacl;
+ struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
+ struct vhost_scsi_cmd, tvc_se_cmd);
+ struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi *vs = svq->vs;
+ struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
+ struct scatterlist *sg;
+ struct page *page;
+ int i;
- nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
- if (!nacl) {
- pr_err("Unable to allocate struct tcm_vhost_nacl\n");
- return NULL;
+ if (tv_cmd->tvc_sgl_count) {
+ for_each_sgtable_sg(&tv_cmd->table, sg, i) {
+ page = sg_page(sg);
+ if (!page)
+ continue;
+
+ if (tv_cmd->copied_iov)
+ __free_page(page);
+ else
+ put_page(page);
+ }
+ kfree(tv_cmd->read_iter);
+ kfree(tv_cmd->read_iov);
+ sg_free_table_chained(&tv_cmd->table, vs->inline_sg_cnt);
+ }
+ if (tv_cmd->tvc_prot_sgl_count) {
+ for_each_sgtable_sg(&tv_cmd->prot_table, sg, i) {
+ page = sg_page(sg);
+ if (page)
+ put_page(page);
+ }
+ sg_free_table_chained(&tv_cmd->prot_table, vs->inline_sg_cnt);
}
- return &nacl->se_node_acl;
+ if (tv_cmd->tvc_resp_iovs != &tv_cmd->tvc_resp_iov)
+ kfree(tv_cmd->tvc_resp_iovs);
+ sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
+ vhost_scsi_put_inflight(inflight);
}
-static void
-tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl)
+static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
{
- struct tcm_vhost_nacl *nacl = container_of(se_nacl,
- struct tcm_vhost_nacl, se_node_acl);
- kfree(nacl);
-}
+ struct vhost_scsi_inflight *inflight = tmf->inflight;
-static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
-{
- return 1;
+ /*
+ * tmf->tmf_log is default NULL unless VHOST_F_LOG_ALL is set.
+ */
+ kfree(tmf->tmf_log);
+ kfree(tmf);
+ vhost_scsi_put_inflight(inflight);
}
-static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
+static void vhost_scsi_drop_cmds(struct vhost_scsi_virtqueue *svq)
{
- struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
- struct tcm_vhost_cmd, tvc_se_cmd);
-
- if (tv_cmd->tvc_sgl_count) {
- u32 i;
- for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
- put_page(sg_page(&tv_cmd->tvc_sgl[i]));
-
- kfree(tv_cmd->tvc_sgl);
- }
+ struct vhost_scsi_cmd *cmd, *t;
+ struct llist_node *llnode;
- tcm_vhost_put_inflight(tv_cmd->inflight);
- kfree(tv_cmd);
+ llnode = llist_del_all(&svq->completion_list);
+ llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list)
+ vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
}
-static int tcm_vhost_shutdown_session(struct se_session *se_sess)
+static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
{
- return 0;
-}
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
+ struct vhost_scsi_tmf *tmf = container_of(se_cmd,
+ struct vhost_scsi_tmf, se_cmd);
-static void tcm_vhost_close_session(struct se_session *se_sess)
-{
- return;
-}
-
-static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
-{
- return 0;
+ schedule_work(&tmf->flush_work);
+ } else {
+ struct vhost_scsi_cmd *cmd = container_of(se_cmd,
+ struct vhost_scsi_cmd, tvc_se_cmd);
+ struct vhost_scsi_virtqueue *svq = container_of(cmd->tvc_vq,
+ struct vhost_scsi_virtqueue, vq);
+
+ llist_add(&cmd->tvc_completion_list, &svq->completion_list);
+ if (!vhost_vq_work_queue(&svq->vq, &svq->completion_work))
+ vhost_scsi_drop_cmds(svq);
+ }
}
-static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
+static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
{
/* Go ahead and process the write immediately */
target_execute_cmd(se_cmd);
return 0;
}
-static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
-{
- return 0;
-}
-
-static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
-{
- return;
-}
-
-static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
+static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
{
+ transport_generic_free_cmd(se_cmd, 0);
return 0;
}
-static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
+static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
{
+ transport_generic_free_cmd(se_cmd, 0);
return 0;
}
-static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
+static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
{
- struct vhost_scsi *vs = cmd->tvc_vhost;
-
- llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
-
- vhost_work_queue(&vs->dev, &vs->vs_completion_work);
-}
+ struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
+ se_cmd);
-static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
-{
- struct tcm_vhost_cmd *cmd = container_of(se_cmd,
- struct tcm_vhost_cmd, tvc_se_cmd);
- vhost_scsi_complete_cmd(cmd);
- return 0;
+ tmf->scsi_resp = se_cmd->se_tmr_req->response;
+ transport_generic_free_cmd(&tmf->se_cmd, 0);
}
-static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
-{
- struct tcm_vhost_cmd *cmd = container_of(se_cmd,
- struct tcm_vhost_cmd, tvc_se_cmd);
- vhost_scsi_complete_cmd(cmd);
- return 0;
-}
-
-static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
+static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
{
return;
}
-static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
+static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
{
vs->vs_events_nr--;
kfree(evt);
}
-static struct tcm_vhost_evt *
-tcm_vhost_allocate_evt(struct vhost_scsi *vs,
+static struct vhost_scsi_evt *
+vhost_scsi_allocate_evt(struct vhost_scsi *vs,
u32 event, u32 reason)
{
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
- struct tcm_vhost_evt *evt;
+ struct vhost_scsi_evt *evt;
if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
vs->vs_events_missed = true;
@@ -555,51 +551,54 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs,
evt = kzalloc(sizeof(*evt), GFP_KERNEL);
if (!evt) {
- vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
+ vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
vs->vs_events_missed = true;
return NULL;
}
- evt->event.event = event;
- evt->event.reason = reason;
+ evt->event.event = cpu_to_vhost32(vq, event);
+ evt->event.reason = cpu_to_vhost32(vq, reason);
vs->vs_events_nr++;
return evt;
}
-static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
-{
- struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
-
- /* TODO locking against target/backend threads? */
- transport_generic_free_cmd(se_cmd, 0);
-
-}
-
static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
{
- return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ return target_put_sess_cmd(se_cmd);
}
static void
-tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
+vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
{
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct virtio_scsi_event *event = &evt->event;
struct virtio_scsi_event __user *eventp;
+ struct vhost_log *vq_log;
+ unsigned int log_num;
unsigned out, in;
int head, ret;
- if (!vq->private_data) {
+ if (!vhost_vq_get_backend(vq)) {
vs->vs_events_missed = true;
return;
}
again:
vhost_disable_notify(&vs->dev, vq);
- head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
+
+ vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
+ vq->log : NULL;
+
+ /*
+ * Reset 'log_num' since vhost_get_vq_desc() may reset it only
+ * after certain condition checks.
+ */
+ log_num = 0;
+
+ head = vhost_get_vq_desc(vq, vq->iov,
ARRAY_SIZE(vq->iov), &out, &in,
- NULL, NULL);
+ vq_log, &log_num);
if (head < 0) {
vs->vs_events_missed = true;
return;
@@ -619,7 +618,7 @@ again:
}
if (vs->vs_events_missed) {
- event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
+ event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
vs->vs_events_missed = false;
}
@@ -628,28 +627,59 @@ again:
if (!ret)
vhost_add_used_and_signal(&vs->dev, vq, head, 0);
else
- vq_err(vq, "Faulted on tcm_vhost_send_event\n");
+ vq_err(vq, "Faulted on vhost_scsi_send_event\n");
+
+ vhost_scsi_log_write(vq, vq_log, log_num);
}
-static void tcm_vhost_evt_work(struct vhost_work *work)
+static void vhost_scsi_complete_events(struct vhost_scsi *vs, bool drop)
{
- struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
- vs_event_work);
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
- struct tcm_vhost_evt *evt;
+ struct vhost_scsi_evt *evt, *t;
struct llist_node *llnode;
mutex_lock(&vq->mutex);
llnode = llist_del_all(&vs->vs_event_list);
- while (llnode) {
- evt = llist_entry(llnode, struct tcm_vhost_evt, list);
- llnode = llist_next(llnode);
- tcm_vhost_do_evt_work(vs, evt);
- tcm_vhost_free_evt(vs, evt);
+ llist_for_each_entry_safe(evt, t, llnode, list) {
+ if (!drop)
+ vhost_scsi_do_evt_work(vs, evt);
+ vhost_scsi_free_evt(vs, evt);
}
mutex_unlock(&vq->mutex);
}
+static void vhost_scsi_evt_work(struct vhost_work *work)
+{
+ struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
+ vs_event_work);
+ vhost_scsi_complete_events(vs, false);
+}
+
+static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd)
+{
+ struct iov_iter *iter = cmd->read_iter;
+ struct scatterlist *sg;
+ struct page *page;
+ size_t len;
+ int i;
+
+ for_each_sgtable_sg(&cmd->table, sg, i) {
+ page = sg_page(sg);
+ if (!page)
+ continue;
+
+ len = sg->length;
+
+ if (copy_page_to_iter(page, 0, len, iter) != len) {
+ pr_err("Could not copy data while handling misaligned cmd. Error %zu\n",
+ len);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
/* Fill in status and signal that we are done processing this command
*
* This is scheduled in the vhost work queue so we are called with the owner
@@ -657,431 +687,1126 @@ static void tcm_vhost_evt_work(struct vhost_work *work)
*/
static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
{
- struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
- vs_completion_work);
- DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
+ struct vhost_scsi_virtqueue *svq = container_of(work,
+ struct vhost_scsi_virtqueue, completion_work);
struct virtio_scsi_cmd_resp v_rsp;
- struct tcm_vhost_cmd *cmd;
+ struct vhost_scsi_cmd *cmd, *t;
struct llist_node *llnode;
struct se_cmd *se_cmd;
- int ret, vq;
-
- bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
- llnode = llist_del_all(&vs->vs_completion_list);
- while (llnode) {
- cmd = llist_entry(llnode, struct tcm_vhost_cmd,
- tvc_completion_list);
- llnode = llist_next(llnode);
+ struct iov_iter iov_iter;
+ bool signal = false;
+ int ret;
+
+ llnode = llist_del_all(&svq->completion_list);
+
+ mutex_lock(&svq->vq.mutex);
+
+ llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
se_cmd = &cmd->tvc_se_cmd;
pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
cmd, se_cmd->residual_count, se_cmd->scsi_status);
-
memset(&v_rsp, 0, sizeof(v_rsp));
- v_rsp.resid = se_cmd->residual_count;
- /* TODO is status_qualifier field needed? */
- v_rsp.status = se_cmd->scsi_status;
- v_rsp.sense_len = se_cmd->scsi_sense_length;
- memcpy(v_rsp.sense, cmd->tvc_sense_buf,
- v_rsp.sense_len);
- ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
- if (likely(ret == 0)) {
- struct vhost_scsi_virtqueue *q;
+
+ if (cmd->read_iter && vhost_scsi_copy_sgl_to_iov(cmd)) {
+ v_rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
+ } else {
+ v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq,
+ se_cmd->residual_count);
+ /* TODO is status_qualifier field needed? */
+ v_rsp.status = se_cmd->scsi_status;
+ v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
+ se_cmd->scsi_sense_length);
+ memcpy(v_rsp.sense, cmd->tvc_sense_buf,
+ se_cmd->scsi_sense_length);
+ }
+
+ iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iovs,
+ cmd->tvc_resp_iovs_cnt, sizeof(v_rsp));
+ ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
+ if (likely(ret == sizeof(v_rsp))) {
+ signal = true;
+
vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
- q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
- vq = q - vs->vqs;
- __set_bit(vq, signal);
} else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
- vhost_scsi_free_cmd(cmd);
+ vhost_scsi_log_write(cmd->tvc_vq, cmd->tvc_log,
+ cmd->tvc_log_num);
+
+ vhost_scsi_release_cmd_res(se_cmd);
}
- vq = -1;
- while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
- < VHOST_SCSI_MAX_VQ)
- vhost_signal(&vs->dev, &vs->vqs[vq].vq);
+ mutex_unlock(&svq->vq.mutex);
+
+ if (signal)
+ vhost_signal(&svq->vs->dev, &svq->vq);
}
-static struct tcm_vhost_cmd *
-vhost_scsi_allocate_cmd(struct vhost_virtqueue *vq,
- struct tcm_vhost_tpg *tpg,
- struct virtio_scsi_cmd_req *v_req,
- u32 exp_data_len,
- int data_direction)
+static struct vhost_scsi_cmd *
+vhost_scsi_get_cmd(struct vhost_virtqueue *vq, u64 scsi_tag)
{
- struct tcm_vhost_cmd *cmd;
- struct tcm_vhost_nexus *tv_nexus;
-
- tv_nexus = tpg->tpg_nexus;
- if (!tv_nexus) {
- pr_err("Unable to locate active struct tcm_vhost_nexus\n");
- return ERR_PTR(-EIO);
- }
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi_cmd *cmd;
+ struct scatterlist *sgl, *prot_sgl;
+ struct vhost_log *log;
+ int tag;
- cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
- if (!cmd) {
- pr_err("Unable to allocate struct tcm_vhost_cmd\n");
+ tag = sbitmap_get(&svq->scsi_tags);
+ if (tag < 0) {
+ pr_warn_once("Guest sent too many cmds. Returning TASK_SET_FULL.\n");
return ERR_PTR(-ENOMEM);
}
- cmd->tvc_tag = v_req->tag;
- cmd->tvc_task_attr = v_req->task_attr;
- cmd->tvc_exp_data_len = exp_data_len;
- cmd->tvc_data_direction = data_direction;
- cmd->tvc_nexus = tv_nexus;
- cmd->inflight = tcm_vhost_get_inflight(vq);
+
+ cmd = &svq->scsi_cmds[tag];
+ sgl = cmd->sgl;
+ prot_sgl = cmd->prot_sgl;
+ log = cmd->tvc_log;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->sgl = sgl;
+ cmd->prot_sgl = prot_sgl;
+ cmd->tvc_log = log;
+ cmd->tvc_se_cmd.map_tag = tag;
+ cmd->inflight = vhost_scsi_get_inflight(vq);
return cmd;
}
+static void vhost_scsi_revert_map_iov_to_sgl(struct iov_iter *iter,
+ struct scatterlist *curr,
+ struct scatterlist *end)
+{
+ size_t revert_bytes = 0;
+ struct page *page;
+
+ while (curr != end) {
+ page = sg_page(curr);
+
+ if (page) {
+ put_page(page);
+ revert_bytes += curr->length;
+ }
+ /* Clear so we can re-use it for the copy path */
+ sg_set_page(curr, NULL, 0, 0);
+ curr = sg_next(curr);
+ }
+ iov_iter_revert(iter, revert_bytes);
+}
+
/*
* Map a user memory range into a scatterlist
*
* Returns the number of scatterlist entries used or -errno on error.
*/
static int
-vhost_scsi_map_to_sgl(struct scatterlist *sgl,
- unsigned int sgl_count,
- struct iovec *iov,
- int write)
-{
- unsigned int npages = 0, pages_nr, offset, nbytes;
- struct scatterlist *sg = sgl;
- void __user *ptr = iov->iov_base;
- size_t len = iov->iov_len;
- struct page **pages;
- int ret, i;
-
- pages_nr = iov_num_pages(iov);
- if (pages_nr > sgl_count)
- return -ENOBUFS;
-
- pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
+vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
+ struct iov_iter *iter,
+ struct sg_table *sg_table,
+ struct scatterlist **sgl,
+ bool is_prot)
+{
+ struct vhost_scsi_virtqueue *svq = container_of(cmd->tvc_vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct page **pages = svq->upages;
+ struct scatterlist *sg = *sgl;
+ ssize_t bytes;
+ size_t offset;
+ unsigned int n, npages = 0;
+
+ bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
+ VHOST_SCSI_PREALLOC_UPAGES, &offset);
/* No pages were pinned */
- if (ret < 0)
- goto out;
- /* Less pages pinned than wanted */
- if (ret != pages_nr) {
- for (i = 0; i < ret; i++)
- put_page(pages[i]);
- ret = -EFAULT;
- goto out;
+ if (bytes <= 0)
+ return bytes < 0 ? bytes : -EFAULT;
+
+ while (bytes) {
+ n = min_t(unsigned int, PAGE_SIZE - offset, bytes);
+ /*
+ * The block layer requires bios/requests to be a multiple of
+ * 512 bytes, but Windows can send us vecs that are misaligned.
+ * This can result in bios and later requests with misaligned
+ * sizes if we have to break up a cmd/scatterlist into multiple
+ * bios.
+ *
+ * We currently only break up a command into multiple bios if
+ * we hit the vec/seg limit, so check if our sgl_count is
+ * greater than the max and if a vec in the cmd has a
+ * misaligned offset/size.
+ */
+ if (!is_prot &&
+ (offset & (SECTOR_SIZE - 1) || n & (SECTOR_SIZE - 1)) &&
+ cmd->tvc_sgl_count > BIO_MAX_VECS) {
+ WARN_ONCE(true,
+ "vhost-scsi detected misaligned IO. Performance may be degraded.");
+ goto revert_iter_get_pages;
+ }
+
+ sg_set_page(sg, pages[npages++], n, offset);
+ sg = sg_next(sg);
+ bytes -= n;
+ offset = 0;
+ }
+
+ *sgl = sg;
+ return npages;
+
+revert_iter_get_pages:
+ vhost_scsi_revert_map_iov_to_sgl(iter, *sgl, sg);
+
+ iov_iter_revert(iter, bytes);
+ while (bytes) {
+ n = min_t(unsigned int, PAGE_SIZE, bytes);
+
+ put_page(pages[npages++]);
+ bytes -= n;
+ }
+
+ return -EINVAL;
+}
+
+static int
+vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
+{
+ int sgl_count = 0;
+
+ if (!iter || !iter_iov(iter)) {
+ pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
+ " present\n", __func__, bytes);
+ return -EINVAL;
}
- while (len > 0) {
- offset = (uintptr_t)ptr & ~PAGE_MASK;
- nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
- sg_set_page(sg, pages[npages], nbytes, offset);
- ptr += nbytes;
+ sgl_count = iov_iter_npages(iter, 0xffff);
+ if (sgl_count > max_sgls) {
+ pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
+ " max_sgls: %d\n", __func__, sgl_count, max_sgls);
+ return -EINVAL;
+ }
+ return sgl_count;
+}
+
+static int
+vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
+ struct sg_table *sg_table, int sg_count,
+ int data_dir)
+{
+ size_t len = iov_iter_count(iter);
+ unsigned int nbytes = 0;
+ struct scatterlist *sg;
+ struct page *page;
+ int i, ret;
+
+ if (data_dir == DMA_FROM_DEVICE) {
+ cmd->read_iter = kzalloc(sizeof(*cmd->read_iter), GFP_KERNEL);
+ if (!cmd->read_iter)
+ return -ENOMEM;
+
+ cmd->read_iov = dup_iter(cmd->read_iter, iter, GFP_KERNEL);
+ if (!cmd->read_iov) {
+ ret = -ENOMEM;
+ goto free_iter;
+ }
+ }
+
+ for_each_sgtable_sg(sg_table, sg, i) {
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ nbytes = min_t(unsigned int, PAGE_SIZE, len);
+ sg_set_page(sg, page, nbytes, 0);
+
+ if (data_dir == DMA_TO_DEVICE &&
+ copy_page_from_iter(page, 0, nbytes, iter) != nbytes) {
+ ret = -EFAULT;
+ goto err;
+ }
+
len -= nbytes;
- sg++;
- npages++;
}
-out:
- kfree(pages);
+ cmd->copied_iov = 1;
+ return 0;
+
+err:
+ pr_err("Could not read %u bytes while handling misaligned cmd\n",
+ nbytes);
+
+ for_each_sgtable_sg(sg_table, sg, i) {
+ page = sg_page(sg);
+ if (page)
+ __free_page(page);
+ }
+ kfree(cmd->read_iov);
+free_iter:
+ kfree(cmd->read_iter);
return ret;
}
static int
-vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
- struct iovec *iov,
- unsigned int niov,
- int write)
+vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
+ struct sg_table *sg_table, int sg_count, bool is_prot)
{
+ struct scatterlist *sg = sg_table->sgl;
int ret;
- unsigned int i;
- u32 sgl_count;
- struct scatterlist *sg;
- /*
- * Find out how long sglist needs to be
- */
- sgl_count = 0;
- for (i = 0; i < niov; i++)
- sgl_count += iov_num_pages(&iov[i]);
+ while (iov_iter_count(iter)) {
+ ret = vhost_scsi_map_to_sgl(cmd, iter, sg_table, &sg, is_prot);
+ if (ret < 0) {
+ vhost_scsi_revert_map_iov_to_sgl(iter, sg_table->sgl,
+ sg);
+ return ret;
+ }
+ }
- /* TODO overflow checking */
+ return 0;
+}
- sg = kmalloc(sizeof(cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
- if (!sg)
- return -ENOMEM;
- pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
- sg, sgl_count, !sg);
- sg_init_table(sg, sgl_count);
+static int
+vhost_scsi_mapal(struct vhost_scsi *vs, struct vhost_scsi_cmd *cmd,
+ size_t prot_bytes, struct iov_iter *prot_iter,
+ size_t data_bytes, struct iov_iter *data_iter, int data_dir)
+{
+ int sgl_count, ret;
+
+ if (prot_bytes) {
+ sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
+ VHOST_SCSI_PREALLOC_PROT_SGLS);
+ cmd->prot_table.sgl = cmd->prot_sgl;
+ ret = sg_alloc_table_chained(&cmd->prot_table, sgl_count,
+ cmd->prot_table.sgl,
+ vs->inline_sg_cnt);
+ if (ret)
+ return ret;
- cmd->tvc_sgl = sg;
- cmd->tvc_sgl_count = sgl_count;
+ cmd->tvc_prot_sgl_count = sgl_count;
+ pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
+ cmd->prot_table.sgl, cmd->tvc_prot_sgl_count);
- pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
- for (i = 0; i < niov; i++) {
- ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
+ ret = vhost_scsi_map_iov_to_sgl(cmd, prot_iter,
+ &cmd->prot_table,
+ cmd->tvc_prot_sgl_count, true);
if (ret < 0) {
- for (i = 0; i < cmd->tvc_sgl_count; i++)
- put_page(sg_page(&cmd->tvc_sgl[i]));
- kfree(cmd->tvc_sgl);
- cmd->tvc_sgl = NULL;
- cmd->tvc_sgl_count = 0;
+ sg_free_table_chained(&cmd->prot_table,
+ vs->inline_sg_cnt);
+ cmd->tvc_prot_sgl_count = 0;
return ret;
}
+ }
+ sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
+ VHOST_SCSI_PREALLOC_SGLS);
+ if (sgl_count < 0)
+ return sgl_count;
+
+ cmd->table.sgl = cmd->sgl;
+ ret = sg_alloc_table_chained(&cmd->table, sgl_count, cmd->table.sgl,
+ vs->inline_sg_cnt);
+ if (ret)
+ return ret;
- sg += ret;
- sgl_count -= ret;
+ cmd->tvc_sgl_count = sgl_count;
+ pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
+ cmd->table.sgl, cmd->tvc_sgl_count);
+
+ ret = vhost_scsi_map_iov_to_sgl(cmd, data_iter, &cmd->table,
+ cmd->tvc_sgl_count, false);
+ if (ret == -EINVAL)
+ ret = vhost_scsi_copy_iov_to_sgl(cmd, data_iter, &cmd->table,
+ cmd->tvc_sgl_count, data_dir);
+ if (ret < 0) {
+ sg_free_table_chained(&cmd->table, vs->inline_sg_cnt);
+ cmd->tvc_sgl_count = 0;
+ return ret;
}
return 0;
}
-static void tcm_vhost_submission_work(struct work_struct *work)
+static int vhost_scsi_to_tcm_attr(int attr)
+{
+ switch (attr) {
+ case VIRTIO_SCSI_S_SIMPLE:
+ return TCM_SIMPLE_TAG;
+ case VIRTIO_SCSI_S_ORDERED:
+ return TCM_ORDERED_TAG;
+ case VIRTIO_SCSI_S_HEAD:
+ return TCM_HEAD_TAG;
+ case VIRTIO_SCSI_S_ACA:
+ return TCM_ACA_TAG;
+ default:
+ break;
+ }
+ return TCM_SIMPLE_TAG;
+}
+
+static void vhost_scsi_target_queue_cmd(struct vhost_scsi_nexus *nexus,
+ struct vhost_scsi_cmd *cmd,
+ unsigned char *cdb, u16 lun,
+ int task_attr, int data_dir,
+ u32 exp_data_len)
{
- struct tcm_vhost_cmd *cmd =
- container_of(work, struct tcm_vhost_cmd, work);
- struct tcm_vhost_nexus *tv_nexus;
struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
- struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
- int rc, sg_no_bidi = 0;
+ struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
+ /* FIXME: BIDI operation */
if (cmd->tvc_sgl_count) {
- sg_ptr = cmd->tvc_sgl;
-/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
-#if 0
- if (se_cmd->se_cmd_flags & SCF_BIDI) {
- sg_bidi_ptr = NULL;
- sg_no_bidi = 0;
- }
-#endif
+ sg_ptr = cmd->table.sgl;
+
+ if (cmd->tvc_prot_sgl_count)
+ sg_prot_ptr = cmd->prot_table.sgl;
+ else
+ se_cmd->prot_pto = true;
} else {
sg_ptr = NULL;
}
- tv_nexus = cmd->tvc_nexus;
- rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
- cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
- cmd->tvc_lun, cmd->tvc_exp_data_len,
- cmd->tvc_task_attr, cmd->tvc_data_direction,
- TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
- sg_bidi_ptr, sg_no_bidi);
- if (rc < 0) {
- transport_send_check_condition_and_sense(se_cmd,
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
- transport_generic_free_cmd(se_cmd, 0);
- }
+ se_cmd->tag = 0;
+ target_init_cmd(se_cmd, nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
+ lun, exp_data_len, vhost_scsi_to_tcm_attr(task_attr),
+ data_dir, TARGET_SCF_ACK_KREF);
+
+ if (target_submit_prep(se_cmd, cdb, sg_ptr,
+ cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
+ cmd->tvc_prot_sgl_count, GFP_KERNEL))
+ return;
+
+ target_submit(se_cmd);
}
static void
-vhost_scsi_send_bad_target(struct vhost_scsi *vs,
- struct vhost_virtqueue *vq,
- int head, unsigned out)
+vhost_scsi_send_status(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ struct vhost_scsi_ctx *vc, u8 status)
{
- struct virtio_scsi_cmd_resp __user *resp;
struct virtio_scsi_cmd_resp rsp;
+ struct iov_iter iov_iter;
int ret;
memset(&rsp, 0, sizeof(rsp));
- rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
- resp = vq->iov[out].iov_base;
- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
- if (!ret)
- vhost_add_used_and_signal(&vs->dev, vq, head, 0);
+ rsp.status = status;
+
+ iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in,
+ sizeof(rsp));
+
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
+
+ if (likely(ret == sizeof(rsp)))
+ vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
}
+#define TYPE_IO_CMD 0
+#define TYPE_CTRL_TMF 1
+#define TYPE_CTRL_AN 2
+
+static void
+vhost_scsi_send_bad_target(struct vhost_scsi *vs,
+ struct vhost_virtqueue *vq,
+ struct vhost_scsi_ctx *vc, int type)
+{
+ union {
+ struct virtio_scsi_cmd_resp cmd;
+ struct virtio_scsi_ctrl_tmf_resp tmf;
+ struct virtio_scsi_ctrl_an_resp an;
+ } rsp;
+ struct iov_iter iov_iter;
+ size_t rsp_size;
+ int ret;
+
+ memset(&rsp, 0, sizeof(rsp));
+
+ if (type == TYPE_IO_CMD) {
+ rsp_size = sizeof(struct virtio_scsi_cmd_resp);
+ rsp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
+ } else if (type == TYPE_CTRL_TMF) {
+ rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
+ rsp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
+ } else {
+ rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
+ rsp.an.response = VIRTIO_SCSI_S_BAD_TARGET;
+ }
+
+ iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in,
+ rsp_size);
+
+ ret = copy_to_iter(&rsp, rsp_size, &iov_iter);
+
+ if (likely(ret == rsp_size))
+ vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
+ else
+ pr_err("Faulted on virtio scsi type=%d\n", type);
+}
+
+static int
+vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ struct vhost_scsi_ctx *vc,
+ struct vhost_log *log, unsigned int *log_num)
+{
+ int ret = -ENXIO;
+
+ if (likely(log_num))
+ *log_num = 0;
+
+ vc->head = vhost_get_vq_desc(vq, vq->iov,
+ ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
+ log, log_num);
+
+ pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
+ vc->head, vc->out, vc->in);
+
+ /* On error, stop handling until the next kick. */
+ if (unlikely(vc->head < 0))
+ goto done;
+
+ /* Nothing new? Wait for eventfd to tell us they refilled. */
+ if (vc->head == vq->num) {
+ if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
+ vhost_disable_notify(&vs->dev, vq);
+ ret = -EAGAIN;
+ }
+ goto done;
+ }
+
+ /*
+ * Get the size of request and response buffers.
+ * FIXME: Not correct for BIDI operation
+ */
+ vc->out_size = iov_length(vq->iov, vc->out);
+ vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
+
+ /*
+ * Copy over the virtio-scsi request header, which for a
+ * ANY_LAYOUT enabled guest may span multiple iovecs, or a
+ * single iovec may contain both the header + outgoing
+ * WRITE payloads.
+ *
+ * copy_from_iter() will advance out_iter, so that it will
+ * point at the start of the outgoing WRITE payload, if
+ * DMA_TO_DEVICE is set.
+ */
+ iov_iter_init(&vc->out_iter, ITER_SOURCE, vq->iov, vc->out, vc->out_size);
+ ret = 0;
+
+done:
+ return ret;
+}
+
+static int
+vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
+{
+ if (unlikely(vc->in_size < vc->rsp_size)) {
+ vq_err(vq,
+ "Response buf too small, need min %zu bytes got %zu",
+ vc->rsp_size, vc->in_size);
+ return -EINVAL;
+ } else if (unlikely(vc->out_size < vc->req_size)) {
+ vq_err(vq,
+ "Request buf too small, need min %zu bytes got %zu",
+ vc->req_size, vc->out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
+ struct vhost_scsi_tpg **tpgp)
+{
+ int ret = -EIO;
+
+ if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
+ &vc->out_iter))) {
+ vq_err(vq, "Faulted on copy_from_iter_full\n");
+ } else if (unlikely(*vc->lunp != 1)) {
+ /* virtio-scsi spec requires byte 0 of the lun to be 1 */
+ vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
+ } else {
+ struct vhost_scsi_tpg **vs_tpg, *tpg = NULL;
+
+ if (vc->target) {
+ /* validated at handler entry */
+ vs_tpg = vhost_vq_get_backend(vq);
+ tpg = READ_ONCE(vs_tpg[*vc->target]);
+ if (unlikely(!tpg))
+ goto out;
+ }
+
+ if (tpgp)
+ *tpgp = tpg;
+ ret = 0;
+ }
+out:
+ return ret;
+}
+
+static int
+vhost_scsi_setup_resp_iovs(struct vhost_scsi_cmd *cmd, struct iovec *in_iovs,
+ unsigned int in_iovs_cnt)
+{
+ int i, cnt;
+
+ if (!in_iovs_cnt)
+ return 0;
+ /*
+ * Initiators normally just put the virtio_scsi_cmd_resp in the first
+ * iov, but just in case they wedged in some data with it we check for
+ * greater than or equal to the response struct.
+ */
+ if (in_iovs[0].iov_len >= sizeof(struct virtio_scsi_cmd_resp)) {
+ cmd->tvc_resp_iovs = &cmd->tvc_resp_iov;
+ cmd->tvc_resp_iovs_cnt = 1;
+ } else {
+ /*
+ * Legacy descriptor layouts didn't specify that we must put
+ * the entire response in one iov. Worst case we have a
+ * iov per byte.
+ */
+ cnt = min(VHOST_SCSI_MAX_RESP_IOVS, in_iovs_cnt);
+ cmd->tvc_resp_iovs = kcalloc(cnt, sizeof(struct iovec),
+ GFP_KERNEL);
+ if (!cmd->tvc_resp_iovs)
+ return -ENOMEM;
+
+ cmd->tvc_resp_iovs_cnt = cnt;
+ }
+
+ for (i = 0; i < cmd->tvc_resp_iovs_cnt; i++)
+ cmd->tvc_resp_iovs[i] = in_iovs[i];
+
+ return 0;
+}
+
+static u16 vhost_buf_to_lun(u8 *lun_buf)
+{
+ return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
+}
+
static void
vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
{
- struct tcm_vhost_tpg **vs_tpg;
+ struct vhost_scsi_tpg **vs_tpg, *tpg;
struct virtio_scsi_cmd_req v_req;
- struct tcm_vhost_tpg *tpg;
- struct tcm_vhost_cmd *cmd;
- u32 exp_data_len, data_first, data_num, data_direction;
- unsigned out, in, i;
- int head, ret;
- u8 target;
+ struct virtio_scsi_cmd_req_pi v_req_pi;
+ struct vhost_scsi_nexus *nexus;
+ struct vhost_scsi_ctx vc;
+ struct vhost_scsi_cmd *cmd;
+ struct iov_iter in_iter, prot_iter, data_iter;
+ u64 tag;
+ u32 exp_data_len, data_direction;
+ int ret, prot_bytes, c = 0;
+ u16 lun;
+ u8 task_attr;
+ bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
+ u8 *cdb;
+ struct vhost_log *vq_log;
+ unsigned int log_num;
+ mutex_lock(&vq->mutex);
/*
* We can handle the vq only after the endpoint is setup by calling the
* VHOST_SCSI_SET_ENDPOINT ioctl.
- *
- * TODO: Check that we are running from vhost_worker which acts
- * as read-side critical section for vhost kind of RCU.
- * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
*/
- vs_tpg = rcu_dereference_check(vq->private_data, 1);
+ vs_tpg = vhost_vq_get_backend(vq);
if (!vs_tpg)
- return;
+ goto out;
+
+ memset(&vc, 0, sizeof(vc));
+ vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
- mutex_lock(&vq->mutex);
vhost_disable_notify(&vs->dev, vq);
- for (;;) {
- head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
- ARRAY_SIZE(vq->iov), &out, &in,
- NULL, NULL);
- pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
- head, out, in);
- /* On error, stop handling until the next kick. */
- if (unlikely(head < 0))
- break;
- /* Nothing new? Wait for eventfd to tell us they refilled. */
- if (head == vq->num) {
- if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
- vhost_disable_notify(&vs->dev, vq);
- continue;
- }
- break;
- }
+ vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
+ vq->log : NULL;
-/* FIXME: BIDI operation */
- if (out == 1 && in == 1) {
- data_direction = DMA_NONE;
- data_first = 0;
- data_num = 0;
- } else if (out == 1 && in > 1) {
- data_direction = DMA_FROM_DEVICE;
- data_first = out + 1;
- data_num = in - 1;
- } else if (out > 1 && in == 1) {
- data_direction = DMA_TO_DEVICE;
- data_first = 1;
- data_num = out - 1;
+ do {
+ ret = vhost_scsi_get_desc(vs, vq, &vc, vq_log, &log_num);
+ if (ret)
+ goto err;
+
+ /*
+ * Setup pointers and values based upon different virtio-scsi
+ * request header if T10_PI is enabled in KVM guest.
+ */
+ if (t10_pi) {
+ vc.req = &v_req_pi;
+ vc.req_size = sizeof(v_req_pi);
+ vc.lunp = &v_req_pi.lun[0];
+ vc.target = &v_req_pi.lun[1];
} else {
- vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
- out, in);
- break;
+ vc.req = &v_req;
+ vc.req_size = sizeof(v_req);
+ vc.lunp = &v_req.lun[0];
+ vc.target = &v_req.lun[1];
}
/*
- * Check for a sane resp buffer so we can report errors to
- * the guest.
+ * Validate the size of request and response buffers.
+ * Check for a sane response buffer so we can report
+ * early errors back to the guest.
*/
- if (unlikely(vq->iov[out].iov_len !=
- sizeof(struct virtio_scsi_cmd_resp))) {
- vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
- " bytes\n", vq->iov[out].iov_len);
- break;
- }
+ ret = vhost_scsi_chk_size(vq, &vc);
+ if (ret)
+ goto err;
- if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
- vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
- " bytes\n", vq->iov[0].iov_len);
- break;
- }
- pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
- " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
- ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
- sizeof(v_req));
- if (unlikely(ret)) {
- vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
- break;
- }
+ ret = vhost_scsi_get_req(vq, &vc, &tpg);
+ if (ret)
+ goto err;
- /* Extract the tpgt */
- target = v_req.lun[1];
- tpg = ACCESS_ONCE(vs_tpg[target]);
+ ret = -EIO; /* bad target on any error from here on */
- /* Target does not exist, fail the request */
- if (unlikely(!tpg)) {
- vhost_scsi_send_bad_target(vs, vq, head, out);
- continue;
- }
+ /*
+ * Determine data_direction by calculating the total outgoing
+ * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
+ * response headers respectively.
+ *
+ * For DMA_TO_DEVICE this is out_iter, which is already pointing
+ * to the right place.
+ *
+ * For DMA_FROM_DEVICE, the iovec will be just past the end
+ * of the virtio-scsi response header in either the same
+ * or immediately following iovec.
+ *
+ * Any associated T10_PI bytes for the outgoing / incoming
+ * payloads are included in calculation of exp_data_len here.
+ */
+ prot_bytes = 0;
- exp_data_len = 0;
- for (i = 0; i < data_num; i++)
- exp_data_len += vq->iov[data_first + i].iov_len;
+ if (vc.out_size > vc.req_size) {
+ data_direction = DMA_TO_DEVICE;
+ exp_data_len = vc.out_size - vc.req_size;
+ data_iter = vc.out_iter;
+ } else if (vc.in_size > vc.rsp_size) {
+ data_direction = DMA_FROM_DEVICE;
+ exp_data_len = vc.in_size - vc.rsp_size;
- cmd = vhost_scsi_allocate_cmd(vq, tpg, &v_req,
- exp_data_len, data_direction);
- if (IS_ERR(cmd)) {
- vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
- PTR_ERR(cmd));
- goto err_cmd;
+ iov_iter_init(&in_iter, ITER_DEST, &vq->iov[vc.out], vc.in,
+ vc.rsp_size + exp_data_len);
+ iov_iter_advance(&in_iter, vc.rsp_size);
+ data_iter = in_iter;
+ } else {
+ data_direction = DMA_NONE;
+ exp_data_len = 0;
}
- pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
- ": %d\n", cmd, exp_data_len, data_direction);
-
- cmd->tvc_vhost = vs;
- cmd->tvc_vq = vq;
- cmd->tvc_resp = vq->iov[out].iov_base;
-
/*
- * Copy in the recieved CDB descriptor into cmd->tvc_cdb
- * that will be used by tcm_vhost_new_cmd_map() and down into
- * target_setup_cmd_from_cdb()
+ * If T10_PI header + payload is present, setup prot_iter values
+ * and recalculate data_iter for vhost_scsi_mapal() mapping to
+ * host scatterlists via get_user_pages_fast().
*/
- memcpy(cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
+ if (t10_pi) {
+ if (v_req_pi.pi_bytesout) {
+ if (data_direction != DMA_TO_DEVICE) {
+ vq_err(vq, "Received non zero pi_bytesout,"
+ " but wrong data_direction\n");
+ goto err;
+ }
+ prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
+ } else if (v_req_pi.pi_bytesin) {
+ if (data_direction != DMA_FROM_DEVICE) {
+ vq_err(vq, "Received non zero pi_bytesin,"
+ " but wrong data_direction\n");
+ goto err;
+ }
+ prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
+ }
+ /*
+ * Set prot_iter to data_iter and truncate it to
+ * prot_bytes, and advance data_iter past any
+ * preceding prot_bytes that may be present.
+ *
+ * Also fix up the exp_data_len to reflect only the
+ * actual data payload length.
+ */
+ if (prot_bytes) {
+ exp_data_len -= prot_bytes;
+ prot_iter = data_iter;
+ iov_iter_truncate(&prot_iter, prot_bytes);
+ iov_iter_advance(&data_iter, prot_bytes);
+ }
+ tag = vhost64_to_cpu(vq, v_req_pi.tag);
+ task_attr = v_req_pi.task_attr;
+ cdb = &v_req_pi.cdb[0];
+ lun = vhost_buf_to_lun(v_req_pi.lun);
+ } else {
+ tag = vhost64_to_cpu(vq, v_req.tag);
+ task_attr = v_req.task_attr;
+ cdb = &v_req.cdb[0];
+ lun = vhost_buf_to_lun(v_req.lun);
+ }
/*
- * Check that the recieved CDB size does not exceeded our
- * hardcoded max for tcm_vhost
+ * Check that the received CDB size does not exceeded our
+ * hardcoded max for vhost-scsi, then get a pre-allocated
+ * cmd descriptor for the new virtio-scsi tag.
+ *
+ * TODO what if cdb was too small for varlen cdb header?
*/
- /* TODO what if cdb was too small for varlen cdb header? */
- if (unlikely(scsi_command_size(cmd->tvc_cdb) >
- TCM_VHOST_MAX_CDB_SIZE)) {
+ if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
vq_err(vq, "Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
- scsi_command_size(cmd->tvc_cdb),
- TCM_VHOST_MAX_CDB_SIZE);
- goto err_free;
+ scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
+ goto err;
+ }
+
+ nexus = tpg->tpg_nexus;
+ if (!nexus) {
+ vq_err(vq, "Unable to locate active struct vhost_scsi_nexus\n");
+ ret = -EIO;
+ goto err;
+ }
+
+ cmd = vhost_scsi_get_cmd(vq, tag);
+ if (IS_ERR(cmd)) {
+ ret = PTR_ERR(cmd);
+ vq_err(vq, "vhost_scsi_get_tag failed %d\n", ret);
+ goto err;
+ }
+ cmd->tvc_vq = vq;
+
+ ret = vhost_scsi_setup_resp_iovs(cmd, &vq->iov[vc.out], vc.in);
+ if (ret) {
+ vq_err(vq, "Failed to alloc recv iovs\n");
+ vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
+ goto err;
+ }
+
+ if (unlikely(vq_log && log_num)) {
+ ret = vhost_scsi_copy_cmd_log(vq, cmd, vq_log, log_num);
+ if (unlikely(ret)) {
+ vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
+ goto err;
+ }
}
- cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
- cmd->tvc_cdb[0], cmd->tvc_lun);
+ cdb[0], lun);
+ pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
+ " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
if (data_direction != DMA_NONE) {
- ret = vhost_scsi_map_iov_to_sgl(cmd,
- &vq->iov[data_first], data_num,
- data_direction == DMA_TO_DEVICE);
+ ret = vhost_scsi_mapal(vs, cmd, prot_bytes, &prot_iter,
+ exp_data_len, &data_iter,
+ data_direction);
if (unlikely(ret)) {
vq_err(vq, "Failed to map iov to sgl\n");
- goto err_free;
+ vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
+ goto err;
}
}
-
/*
* Save the descriptor from vhost_get_vq_desc() to be used to
* complete the virtio-scsi request in TCM callback context via
- * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
+ * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
*/
- cmd->tvc_vq_desc = head;
+ cmd->tvc_vq_desc = vc.head;
+ vhost_scsi_target_queue_cmd(nexus, cmd, cdb, lun, task_attr,
+ data_direction,
+ exp_data_len + prot_bytes);
+ ret = 0;
+err:
/*
- * Dispatch tv_cmd descriptor for cmwq execution in process
- * context provided by tcm_vhost_workqueue. This also ensures
- * tv_cmd is executed on the same kworker CPU as this vhost
- * thread to gain positive L2 cache locality effects..
+ * ENXIO: No more requests, or read error, wait for next kick
+ * EINVAL: Invalid response buffer, drop the request
+ * EIO: Respond with bad target
+ * EAGAIN: Pending request
+ * ENOMEM: Could not allocate resources for request
*/
- INIT_WORK(&cmd->work, tcm_vhost_submission_work);
- queue_work(tcm_vhost_workqueue, &cmd->work);
+ if (ret == -ENXIO)
+ break;
+ else if (ret == -EIO) {
+ vhost_scsi_send_bad_target(vs, vq, &vc, TYPE_IO_CMD);
+ vhost_scsi_log_write(vq, vq_log, log_num);
+ } else if (ret == -ENOMEM) {
+ vhost_scsi_send_status(vs, vq, &vc,
+ SAM_STAT_TASK_SET_FULL);
+ vhost_scsi_log_write(vq, vq_log, log_num);
+ }
+ } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
+out:
+ mutex_unlock(&vq->mutex);
+}
+
+static void
+vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ int in_iovs, int vq_desc, struct iovec *resp_iov,
+ int tmf_resp_code)
+{
+ struct virtio_scsi_ctrl_tmf_resp rsp;
+ struct iov_iter iov_iter;
+ int ret;
+
+ pr_debug("%s\n", __func__);
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.response = tmf_resp_code;
+
+ iov_iter_init(&iov_iter, ITER_DEST, resp_iov, in_iovs, sizeof(rsp));
+
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
+ if (likely(ret == sizeof(rsp)))
+ vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
+ else
+ pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
+}
+
+static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
+{
+ struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
+ vwork);
+ int resp_code;
+
+ if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
+ resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
+ else
+ resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
+
+ mutex_lock(&tmf->svq->vq.mutex);
+ vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
+ tmf->vq_desc, &tmf->resp_iov, resp_code);
+ vhost_scsi_log_write(&tmf->svq->vq, tmf->tmf_log,
+ tmf->tmf_log_num);
+ mutex_unlock(&tmf->svq->vq.mutex);
+
+ vhost_scsi_release_tmf_res(tmf);
+}
+
+static void vhost_scsi_tmf_flush_work(struct work_struct *work)
+{
+ struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
+ flush_work);
+ struct vhost_virtqueue *vq = &tmf->svq->vq;
+ /*
+ * Make sure we have sent responses for other commands before we
+ * send our response.
+ */
+ vhost_dev_flush(vq->dev);
+ if (!vhost_vq_work_queue(vq, &tmf->vwork))
+ vhost_scsi_release_tmf_res(tmf);
+}
+
+static void
+vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
+ struct vhost_virtqueue *vq,
+ struct virtio_scsi_ctrl_tmf_req *vtmf,
+ struct vhost_scsi_ctx *vc,
+ struct vhost_log *log, unsigned int log_num)
+{
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi_tmf *tmf;
+
+ if (vhost32_to_cpu(vq, vtmf->subtype) !=
+ VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
+ goto send_reject;
+
+ if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
+ pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
+ goto send_reject;
+ }
+
+ tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
+ if (!tmf)
+ goto send_reject;
+
+ INIT_WORK(&tmf->flush_work, vhost_scsi_tmf_flush_work);
+ vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
+ tmf->vhost = vs;
+ tmf->svq = svq;
+ tmf->resp_iov = vq->iov[vc->out];
+ tmf->vq_desc = vc->head;
+ tmf->in_iovs = vc->in;
+ tmf->inflight = vhost_scsi_get_inflight(vq);
+
+ if (unlikely(log && log_num)) {
+ tmf->tmf_log = kmalloc_array(log_num, sizeof(*tmf->tmf_log),
+ GFP_KERNEL);
+ if (tmf->tmf_log) {
+ memcpy(tmf->tmf_log, log, sizeof(*tmf->tmf_log) * log_num);
+ tmf->tmf_log_num = log_num;
+ } else {
+ pr_err("vhost_scsi tmf log allocation error\n");
+ vhost_scsi_release_tmf_res(tmf);
+ goto send_reject;
+ }
+ }
+
+ if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
+ vhost_buf_to_lun(vtmf->lun), NULL,
+ TMR_LUN_RESET, GFP_KERNEL, 0,
+ TARGET_SCF_ACK_KREF) < 0) {
+ vhost_scsi_release_tmf_res(tmf);
+ goto send_reject;
}
- mutex_unlock(&vq->mutex);
return;
-err_free:
- vhost_scsi_free_cmd(cmd);
-err_cmd:
- vhost_scsi_send_bad_target(vs, vq, head, out);
+send_reject:
+ vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
+ VIRTIO_SCSI_S_FUNCTION_REJECTED);
+ vhost_scsi_log_write(vq, log, log_num);
+}
+
+static void
+vhost_scsi_send_an_resp(struct vhost_scsi *vs,
+ struct vhost_virtqueue *vq,
+ struct vhost_scsi_ctx *vc)
+{
+ struct virtio_scsi_ctrl_an_resp rsp;
+ struct iov_iter iov_iter;
+ int ret;
+
+ pr_debug("%s\n", __func__);
+ memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
+ rsp.response = VIRTIO_SCSI_S_OK;
+
+ iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, sizeof(rsp));
+
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
+ if (likely(ret == sizeof(rsp)))
+ vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
+ else
+ pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
+}
+
+static void
+vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+{
+ struct vhost_scsi_tpg *tpg;
+ union {
+ __virtio32 type;
+ struct virtio_scsi_ctrl_an_req an;
+ struct virtio_scsi_ctrl_tmf_req tmf;
+ } v_req;
+ struct vhost_scsi_ctx vc;
+ size_t typ_size;
+ int ret, c = 0;
+ struct vhost_log *vq_log;
+ unsigned int log_num;
+
+ mutex_lock(&vq->mutex);
+ /*
+ * We can handle the vq only after the endpoint is setup by calling the
+ * VHOST_SCSI_SET_ENDPOINT ioctl.
+ */
+ if (!vhost_vq_get_backend(vq))
+ goto out;
+
+ memset(&vc, 0, sizeof(vc));
+
+ vhost_disable_notify(&vs->dev, vq);
+
+ vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
+ vq->log : NULL;
+
+ do {
+ ret = vhost_scsi_get_desc(vs, vq, &vc, vq_log, &log_num);
+ if (ret)
+ goto err;
+
+ /*
+ * Get the request type first in order to setup
+ * other parameters dependent on the type.
+ */
+ vc.req = &v_req.type;
+ typ_size = sizeof(v_req.type);
+
+ if (unlikely(!copy_from_iter_full(vc.req, typ_size,
+ &vc.out_iter))) {
+ vq_err(vq, "Faulted on copy_from_iter tmf type\n");
+ /*
+ * The size of the response buffer depends on the
+ * request type and must be validated against it.
+ * Since the request type is not known, don't send
+ * a response.
+ */
+ continue;
+ }
+
+ switch (vhost32_to_cpu(vq, v_req.type)) {
+ case VIRTIO_SCSI_T_TMF:
+ vc.req = &v_req.tmf;
+ vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
+ vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
+ vc.lunp = &v_req.tmf.lun[0];
+ vc.target = &v_req.tmf.lun[1];
+ break;
+ case VIRTIO_SCSI_T_AN_QUERY:
+ case VIRTIO_SCSI_T_AN_SUBSCRIBE:
+ vc.req = &v_req.an;
+ vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
+ vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
+ vc.lunp = &v_req.an.lun[0];
+ vc.target = NULL;
+ break;
+ default:
+ vq_err(vq, "Unknown control request %d", v_req.type);
+ continue;
+ }
+
+ /*
+ * Validate the size of request and response buffers.
+ * Check for a sane response buffer so we can report
+ * early errors back to the guest.
+ */
+ ret = vhost_scsi_chk_size(vq, &vc);
+ if (ret)
+ goto err;
+
+ /*
+ * Get the rest of the request now that its size is known.
+ */
+ vc.req += typ_size;
+ vc.req_size -= typ_size;
+
+ ret = vhost_scsi_get_req(vq, &vc, &tpg);
+ if (ret)
+ goto err;
+
+ if (v_req.type == VIRTIO_SCSI_T_TMF)
+ vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc,
+ vq_log, log_num);
+ else {
+ vhost_scsi_send_an_resp(vs, vq, &vc);
+ vhost_scsi_log_write(vq, vq_log, log_num);
+ }
+err:
+ /*
+ * ENXIO: No more requests, or read error, wait for next kick
+ * EINVAL: Invalid response buffer, drop the request
+ * EIO: Respond with bad target
+ * EAGAIN: Pending request
+ */
+ if (ret == -ENXIO)
+ break;
+ else if (ret == -EIO) {
+ vhost_scsi_send_bad_target(vs, vq, &vc,
+ v_req.type == VIRTIO_SCSI_T_TMF ?
+ TYPE_CTRL_TMF :
+ TYPE_CTRL_AN);
+ vhost_scsi_log_write(vq, vq_log, log_num);
+ }
+ } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
+out:
mutex_unlock(&vq->mutex);
}
static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
+
pr_debug("%s: The handling func for control queue.\n", __func__);
+ vhost_scsi_ctl_handle_vq(vs, vq);
}
static void
-tcm_vhost_send_evt(struct vhost_scsi *vs,
- struct tcm_vhost_tpg *tpg,
- struct se_lun *lun,
- u32 event,
- u32 reason)
+vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ struct vhost_scsi_tpg *tpg, struct se_lun *lun,
+ u32 event, u32 reason)
{
- struct tcm_vhost_evt *evt;
+ struct vhost_scsi_evt *evt;
- evt = tcm_vhost_allocate_evt(vs, event, reason);
+ evt = vhost_scsi_allocate_evt(vs, event, reason);
if (!evt)
return;
@@ -1092,14 +1817,15 @@ tcm_vhost_send_evt(struct vhost_scsi *vs,
* lun[4-7] need to be zero according to virtio-scsi spec.
*/
evt->event.lun[0] = 0x01;
- evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
+ evt->event.lun[1] = tpg->tport_tpgt;
if (lun->unpacked_lun >= 256)
evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
evt->event.lun[3] = lun->unpacked_lun & 0xFF;
}
llist_add(&evt->list, &vs->vs_event_list);
- vhost_work_queue(&vs->dev, &vs->vs_event_work);
+ if (!vhost_vq_work_queue(vq, &vs->vs_event_work))
+ vhost_scsi_complete_events(vs, true);
}
static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
@@ -1109,11 +1835,12 @@ static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
mutex_lock(&vq->mutex);
- if (!vq->private_data)
+ if (!vhost_vq_get_backend(vq))
goto out;
if (vs->vs_events_missed)
- tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
+ vhost_scsi_send_evt(vs, vq, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT,
+ 0);
out:
mutex_unlock(&vq->mutex);
}
@@ -1127,58 +1854,148 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
vhost_scsi_handle_vq(vs, vq);
}
-static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
-{
- vhost_poll_flush(&vs->vqs[index].vq.poll);
-}
-
/* Callers must hold dev mutex */
static void vhost_scsi_flush(struct vhost_scsi *vs)
{
- struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
int i;
/* Init new inflight and remember the old inflight */
- tcm_vhost_init_inflight(vs, old_inflight);
+ vhost_scsi_init_inflight(vs, vs->old_inflight);
/*
* The inflight->kref was initialized to 1. We decrement it here to
* indicate the start of the flush operation so that it will reach 0
* when all the reqs are finished.
*/
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
- kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
+ for (i = 0; i < vs->dev.nvqs; i++)
+ kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
/* Flush both the vhost poll and vhost work */
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
- vhost_scsi_flush_vq(vs, i);
- vhost_work_flush(&vs->dev, &vs->vs_completion_work);
- vhost_work_flush(&vs->dev, &vs->vs_event_work);
+ vhost_dev_flush(&vs->dev);
/* Wait for all reqs issued before the flush to be finished */
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
- wait_for_completion(&old_inflight[i]->comp);
+ for (i = 0; i < vs->dev.nvqs; i++)
+ wait_for_completion(&vs->old_inflight[i]->comp);
+}
+
+static void vhost_scsi_destroy_vq_log(struct vhost_virtqueue *vq)
+{
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi_cmd *tv_cmd;
+ unsigned int i;
+
+ if (!svq->scsi_cmds)
+ return;
+
+ for (i = 0; i < svq->max_cmds; i++) {
+ tv_cmd = &svq->scsi_cmds[i];
+ kfree(tv_cmd->tvc_log);
+ tv_cmd->tvc_log = NULL;
+ tv_cmd->tvc_log_num = 0;
+ }
+}
+
+static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
+{
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi_cmd *tv_cmd;
+ unsigned int i;
+
+ if (!svq->scsi_cmds)
+ return;
+
+ for (i = 0; i < svq->max_cmds; i++) {
+ tv_cmd = &svq->scsi_cmds[i];
+
+ kfree(tv_cmd->sgl);
+ kfree(tv_cmd->prot_sgl);
+ }
+
+ sbitmap_free(&svq->scsi_tags);
+ kfree(svq->upages);
+ vhost_scsi_destroy_vq_log(vq);
+ kfree(svq->scsi_cmds);
+ svq->scsi_cmds = NULL;
+}
+
+static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
+{
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi *vs = svq->vs;
+ struct vhost_scsi_cmd *tv_cmd;
+ unsigned int i;
+
+ if (svq->scsi_cmds)
+ return 0;
+
+ if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
+ NUMA_NO_NODE, false, true))
+ return -ENOMEM;
+ svq->max_cmds = max_cmds;
+
+ svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
+ if (!svq->scsi_cmds) {
+ sbitmap_free(&svq->scsi_tags);
+ return -ENOMEM;
+ }
+
+ svq->upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES, sizeof(struct page *),
+ GFP_KERNEL);
+ if (!svq->upages)
+ goto out;
+
+ for (i = 0; i < max_cmds; i++) {
+ tv_cmd = &svq->scsi_cmds[i];
+
+ if (vs->inline_sg_cnt) {
+ tv_cmd->sgl = kcalloc(vs->inline_sg_cnt,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!tv_cmd->sgl) {
+ pr_err("Unable to allocate tv_cmd->sgl\n");
+ goto out;
+ }
+ }
+
+ if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI) &&
+ vs->inline_sg_cnt) {
+ tv_cmd->prot_sgl = kcalloc(vs->inline_sg_cnt,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!tv_cmd->prot_sgl) {
+ pr_err("Unable to allocate tv_cmd->prot_sgl\n");
+ goto out;
+ }
+ }
+ }
+ return 0;
+out:
+ vhost_scsi_destroy_vq_cmds(vq);
+ return -ENOMEM;
}
/*
* Called from vhost_scsi_ioctl() context to walk the list of available
- * tcm_vhost_tpg with an active struct tcm_vhost_nexus
+ * vhost_scsi_tpg with an active struct vhost_scsi_nexus
*
* The lock nesting rule is:
- * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
+ * vs->dev.mutex -> vhost_scsi_mutex -> tpg->tv_tpg_mutex -> vq->mutex
*/
static int
vhost_scsi_set_endpoint(struct vhost_scsi *vs,
struct vhost_scsi_target *t)
{
- struct tcm_vhost_tport *tv_tport;
- struct tcm_vhost_tpg *tpg;
- struct tcm_vhost_tpg **vs_tpg;
+ struct se_portal_group *se_tpg;
+ struct vhost_scsi_tport *tv_tport;
+ struct vhost_scsi_tpg *tpg;
+ struct vhost_scsi_tpg **vs_tpg;
struct vhost_virtqueue *vq;
int index, ret, i, len;
bool match = false;
- mutex_lock(&tcm_vhost_mutex);
mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */
@@ -1190,16 +2007,22 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
}
}
+ if (vs->vs_tpg) {
+ pr_err("vhost-scsi endpoint already set for %s.\n",
+ vs->vs_vhost_wwpn);
+ ret = -EEXIST;
+ goto out;
+ }
+
len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
vs_tpg = kzalloc(len, GFP_KERNEL);
if (!vs_tpg) {
ret = -ENOMEM;
goto out;
}
- if (vs->vs_tpg)
- memcpy(vs_tpg, vs->vs_tpg, len);
- list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) {
+ mutex_lock(&vhost_scsi_mutex);
+ list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
mutex_lock(&tpg->tv_tpg_mutex);
if (!tpg->tpg_nexus) {
mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1212,48 +2035,84 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
tv_tport = tpg->tport;
if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
- if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
- kfree(vs_tpg);
+ /*
+ * In order to ensure individual vhost-scsi configfs
+ * groups cannot be removed while in use by vhost ioctl,
+ * go ahead and take an explicit se_tpg->tpg_group.cg_item
+ * dependency now.
+ */
+ se_tpg = &tpg->se_tpg;
+ ret = target_depend_item(&se_tpg->tpg_group.cg_item);
+ if (ret) {
+ pr_warn("target_depend_item() failed: %d\n", ret);
mutex_unlock(&tpg->tv_tpg_mutex);
- ret = -EEXIST;
- goto out;
+ mutex_unlock(&vhost_scsi_mutex);
+ goto undepend;
}
tpg->tv_tpg_vhost_count++;
tpg->vhost_scsi = vs;
vs_tpg[tpg->tport_tpgt] = tpg;
- smp_mb__after_atomic_inc();
match = true;
}
mutex_unlock(&tpg->tv_tpg_mutex);
}
+ mutex_unlock(&vhost_scsi_mutex);
if (match) {
memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
sizeof(vs->vs_vhost_wwpn));
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+
+ for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
+ vq = &vs->vqs[i].vq;
+ if (!vhost_vq_is_setup(vq))
+ continue;
+
+ ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
+ if (ret)
+ goto destroy_vq_cmds;
+ }
+
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
- /* Flushing the vhost_work acts as synchronize_rcu */
mutex_lock(&vq->mutex);
- rcu_assign_pointer(vq->private_data, vs_tpg);
- vhost_init_used(vq);
+ vhost_vq_set_backend(vq, vs_tpg);
+ vhost_vq_init_access(vq);
mutex_unlock(&vq->mutex);
}
ret = 0;
} else {
- ret = -EEXIST;
+ ret = -ENODEV;
+ goto free_tpg;
}
/*
- * Act as synchronize_rcu to make sure access to
- * old vs->vs_tpg is finished.
+ * Act as synchronize_rcu to make sure requests after this point
+ * see a fully setup device.
*/
vhost_scsi_flush(vs);
- kfree(vs->vs_tpg);
vs->vs_tpg = vs_tpg;
+ goto out;
+destroy_vq_cmds:
+ for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
+ if (!vhost_vq_get_backend(&vs->vqs[i].vq))
+ vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
+ }
+undepend:
+ for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
+ tpg = vs_tpg[i];
+ if (tpg) {
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tpg->vhost_scsi = NULL;
+ tpg->tv_tpg_vhost_count--;
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
+ }
+ }
+free_tpg:
+ kfree(vs_tpg);
out:
mutex_unlock(&vs->dev.mutex);
- mutex_unlock(&tcm_vhost_mutex);
return ret;
}
@@ -1261,14 +2120,14 @@ static int
vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
struct vhost_scsi_target *t)
{
- struct tcm_vhost_tport *tv_tport;
- struct tcm_vhost_tpg *tpg;
+ struct se_portal_group *se_tpg;
+ struct vhost_scsi_tport *tv_tport;
+ struct vhost_scsi_tpg *tpg;
struct vhost_virtqueue *vq;
bool match = false;
int index, ret, i;
u8 target;
- mutex_lock(&tcm_vhost_mutex);
mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */
for (index = 0; index < vs->dev.nvqs; ++index) {
@@ -1289,11 +2148,10 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
if (!tpg)
continue;
- mutex_lock(&tpg->tv_tpg_mutex);
tv_tport = tpg->tport;
if (!tv_tport) {
ret = -ENODEV;
- goto err_tpg;
+ goto err_dev;
}
if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
@@ -1302,23 +2160,51 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
tv_tport->tport_name, tpg->tport_tpgt,
t->vhost_wwpn, t->vhost_tpgt);
ret = -EINVAL;
- goto err_tpg;
+ goto err_dev;
}
+ match = true;
+ }
+ if (!match)
+ goto free_vs_tpg;
+
+ /* Prevent new cmds from starting and accessing the tpgs/sessions */
+ for (i = 0; i < vs->dev.nvqs; i++) {
+ vq = &vs->vqs[i].vq;
+ mutex_lock(&vq->mutex);
+ vhost_vq_set_backend(vq, NULL);
+ mutex_unlock(&vq->mutex);
+ }
+ /* Make sure cmds are not running before tearing them down. */
+ vhost_scsi_flush(vs);
+
+ for (i = 0; i < vs->dev.nvqs; i++) {
+ vq = &vs->vqs[i].vq;
+ vhost_scsi_destroy_vq_cmds(vq);
+ }
+
+ /*
+ * We can now release our hold on the tpg and sessions and userspace
+ * can free them after this point.
+ */
+ for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
+ target = i;
+ tpg = vs->vs_tpg[target];
+ if (!tpg)
+ continue;
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+
tpg->tv_tpg_vhost_count--;
tpg->vhost_scsi = NULL;
vs->vs_tpg[target] = NULL;
- match = true;
+
mutex_unlock(&tpg->tv_tpg_mutex);
+
+ se_tpg = &tpg->se_tpg;
+ target_undepend_item(&se_tpg->tpg_group.cg_item);
}
- if (match) {
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
- vq = &vs->vqs[i].vq;
- /* Flushing the vhost_work acts as synchronize_rcu */
- mutex_lock(&vq->mutex);
- rcu_assign_pointer(vq->private_data, NULL);
- mutex_unlock(&vq->mutex);
- }
- }
+
+free_vs_tpg:
/*
* Act as synchronize_rcu to make sure access to
* old vs->vs_tpg is finished.
@@ -1326,21 +2212,22 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
vhost_scsi_flush(vs);
kfree(vs->vs_tpg);
vs->vs_tpg = NULL;
+ memset(vs->vs_vhost_wwpn, 0, sizeof(vs->vs_vhost_wwpn));
WARN_ON(vs->vs_events_nr);
mutex_unlock(&vs->dev.mutex);
- mutex_unlock(&tcm_vhost_mutex);
return 0;
-err_tpg:
- mutex_unlock(&tpg->tv_tpg_mutex);
err_dev:
mutex_unlock(&vs->dev.mutex);
- mutex_unlock(&tcm_vhost_mutex);
return ret;
}
static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
{
+ struct vhost_virtqueue *vq;
+ bool is_log, was_log;
+ int i;
+
if (features & ~VHOST_SCSI_FEATURES)
return -EOPNOTSUPP;
@@ -1350,31 +2237,81 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
mutex_unlock(&vs->dev.mutex);
return -EFAULT;
}
- vs->dev.acked_features = features;
- smp_wmb();
- vhost_scsi_flush(vs);
+
+ if (!vs->dev.nvqs)
+ goto out;
+
+ is_log = features & (1 << VHOST_F_LOG_ALL);
+ /*
+ * All VQs should have same feature.
+ */
+ was_log = vhost_has_feature(&vs->vqs[0].vq, VHOST_F_LOG_ALL);
+
+ for (i = 0; i < vs->dev.nvqs; i++) {
+ vq = &vs->vqs[i].vq;
+ mutex_lock(&vq->mutex);
+ vq->acked_features = features;
+ mutex_unlock(&vq->mutex);
+ }
+
+ /*
+ * If VHOST_F_LOG_ALL is removed, free tvc_log after
+ * vq->acked_features is committed.
+ */
+ if (!is_log && was_log) {
+ for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
+ if (!vs->vqs[i].scsi_cmds)
+ continue;
+
+ vq = &vs->vqs[i].vq;
+ mutex_lock(&vq->mutex);
+ vhost_scsi_destroy_vq_log(vq);
+ mutex_unlock(&vq->mutex);
+ }
+ }
+
+out:
mutex_unlock(&vs->dev.mutex);
return 0;
}
static int vhost_scsi_open(struct inode *inode, struct file *f)
{
+ struct vhost_scsi_virtqueue *svq;
struct vhost_scsi *vs;
struct vhost_virtqueue **vqs;
- int r, i;
+ int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
- vs = kzalloc(sizeof(*vs), GFP_KERNEL);
+ vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
if (!vs)
- return -ENOMEM;
-
- vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
- if (!vqs) {
- kfree(vs);
- return -ENOMEM;
+ goto err_vs;
+ vs->inline_sg_cnt = vhost_scsi_inline_sg_cnt;
+
+ if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
+ pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
+ VHOST_SCSI_MAX_IO_VQ);
+ nvqs = VHOST_SCSI_MAX_IO_VQ;
+ } else if (nvqs == 0) {
+ pr_err("Invalid max_io_vqs of %d. Using 1.\n", nvqs);
+ nvqs = 1;
}
+ nvqs += VHOST_SCSI_VQ_IO;
+
+ vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vs->old_inflight)
+ goto err_inflight;
+
+ vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vs->vqs)
+ goto err_vqs;
- vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
- vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
+ vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
+ if (!vqs)
+ goto err_local_vqs;
+
+ vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
vs->vs_events_nr = 0;
vs->vs_events_missed = false;
@@ -1383,22 +2320,32 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
- for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
- vqs[i] = &vs->vqs[i].vq;
- vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
+ for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
+ svq = &vs->vqs[i];
+
+ vqs[i] = &svq->vq;
+ svq->vs = vs;
+ init_llist_head(&svq->completion_list);
+ vhost_work_init(&svq->completion_work,
+ vhost_scsi_complete_cmd_work);
+ svq->vq.handle_kick = vhost_scsi_handle_kick;
}
- r = vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
-
- tcm_vhost_init_inflight(vs, NULL);
+ vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
+ VHOST_SCSI_WEIGHT, 0, true, NULL);
- if (r < 0) {
- kfree(vqs);
- kfree(vs);
- return r;
- }
+ vhost_scsi_init_inflight(vs, NULL);
f->private_data = vs;
return 0;
+
+err_local_vqs:
+ kfree(vs->vqs);
+err_vqs:
+ kfree(vs->old_inflight);
+err_inflight:
+ kvfree(vs);
+err_vs:
+ return r;
}
static int vhost_scsi_release(struct inode *inode, struct file *f)
@@ -1411,11 +2358,11 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
mutex_unlock(&vs->dev.mutex);
vhost_scsi_clear_endpoint(vs, &t);
vhost_dev_stop(&vs->dev);
- vhost_dev_cleanup(&vs->dev, false);
- /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
- vhost_scsi_flush(vs);
+ vhost_dev_cleanup(&vs->dev);
kfree(vs->dev.vqs);
- kfree(vs);
+ kfree(vs->vqs);
+ kfree(vs->old_inflight);
+ kvfree(vs);
return 0;
}
@@ -1476,6 +2423,14 @@ vhost_scsi_ioctl(struct file *f,
if (copy_from_user(&features, featurep, sizeof features))
return -EFAULT;
return vhost_scsi_set_features(vs, features);
+ case VHOST_NEW_WORKER:
+ case VHOST_FREE_WORKER:
+ case VHOST_ATTACH_VRING_WORKER:
+ case VHOST_GET_VRING_WORKER:
+ mutex_lock(&vs->dev.mutex);
+ r = vhost_worker_ioctl(&vs->dev, ioctl, argp);
+ mutex_unlock(&vs->dev.mutex);
+ return r;
default:
mutex_lock(&vs->dev.mutex);
r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
@@ -1487,21 +2442,11 @@ vhost_scsi_ioctl(struct file *f,
}
}
-#ifdef CONFIG_COMPAT
-static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
- unsigned long arg)
-{
- return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static const struct file_operations vhost_scsi_fops = {
.owner = THIS_MODULE,
.release = vhost_scsi_release,
.unlocked_ioctl = vhost_scsi_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vhost_scsi_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = vhost_scsi_open,
.llseek = noop_llseek,
};
@@ -1517,12 +2462,12 @@ static int __init vhost_scsi_register(void)
return misc_register(&vhost_scsi_misc);
}
-static int vhost_scsi_deregister(void)
+static void vhost_scsi_deregister(void)
{
- return misc_deregister(&vhost_scsi_misc);
+ misc_deregister(&vhost_scsi_misc);
}
-static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
+static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
{
switch (tport->tport_proto_id) {
case SCSI_PROTOCOL_SAS:
@@ -1539,7 +2484,7 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
}
static void
-tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
+vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
struct se_lun *lun, bool plug)
{
@@ -1550,12 +2495,6 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
if (!vs)
return;
- mutex_lock(&vs->dev.mutex);
- if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
- mutex_unlock(&vs->dev.mutex);
- return;
- }
-
if (plug)
reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
else
@@ -1563,107 +2502,99 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
mutex_lock(&vq->mutex);
- tcm_vhost_send_evt(vs, tpg, lun,
- VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
+ /*
+ * We can't queue events if the backend has been cleared, because
+ * we could end up queueing an event after the flush.
+ */
+ if (!vhost_vq_get_backend(vq))
+ goto unlock;
+
+ if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
+ vhost_scsi_send_evt(vs, vq, tpg, lun,
+ VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
+unlock:
mutex_unlock(&vq->mutex);
- mutex_unlock(&vs->dev.mutex);
}
-static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
+static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
{
- tcm_vhost_do_plug(tpg, lun, true);
+ vhost_scsi_do_plug(tpg, lun, true);
}
-static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
+static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
{
- tcm_vhost_do_plug(tpg, lun, false);
+ vhost_scsi_do_plug(tpg, lun, false);
}
-static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
+static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
struct se_lun *lun)
{
- struct tcm_vhost_tpg *tpg = container_of(se_tpg,
- struct tcm_vhost_tpg, se_tpg);
-
- mutex_lock(&tcm_vhost_mutex);
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_port_count++;
+ vhost_scsi_hotplug(tpg, lun);
mutex_unlock(&tpg->tv_tpg_mutex);
- tcm_vhost_hotplug(tpg, lun);
-
- mutex_unlock(&tcm_vhost_mutex);
-
return 0;
}
-static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
+static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
struct se_lun *lun)
{
- struct tcm_vhost_tpg *tpg = container_of(se_tpg,
- struct tcm_vhost_tpg, se_tpg);
-
- mutex_lock(&tcm_vhost_mutex);
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_port_count--;
+ vhost_scsi_hotunplug(tpg, lun);
mutex_unlock(&tpg->tv_tpg_mutex);
-
- tcm_vhost_hotunplug(tpg, lun);
-
- mutex_unlock(&tcm_vhost_mutex);
}
-static struct se_node_acl *
-tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
- struct config_group *group,
- const char *name)
+static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
+ struct config_item *item, const char *page, size_t count)
{
- struct se_node_acl *se_nacl, *se_nacl_new;
- struct tcm_vhost_nacl *nacl;
- u64 wwpn = 0;
- u32 nexus_depth;
-
- /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
- return ERR_PTR(-EINVAL); */
- se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
- if (!se_nacl_new)
- return ERR_PTR(-ENOMEM);
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+ unsigned long val;
+ int ret = kstrtoul(page, 0, &val);
- nexus_depth = 1;
- /*
- * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
- * when converting a NodeACL from demo mode -> explict
- */
- se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
- name, nexus_depth);
- if (IS_ERR(se_nacl)) {
- tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
- return se_nacl;
+ if (ret) {
+ pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
+ return ret;
}
- /*
- * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
- */
- nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
- nacl->iport_wwpn = wwpn;
+ if (val != 0 && val != 1 && val != 3) {
+ pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
+ return -EINVAL;
+ }
+ tpg->tv_fabric_prot_type = val;
- return se_nacl;
+ return count;
}
-static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
+static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
+ struct config_item *item, char *page)
{
- struct tcm_vhost_nacl *nacl = container_of(se_acl,
- struct tcm_vhost_nacl, se_node_acl);
- core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
- kfree(nacl);
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+
+ return sysfs_emit(page, "%d\n", tpg->tv_fabric_prot_type);
}
-static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
+CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
+
+static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
+ &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
+ NULL,
+};
+
+static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
const char *name)
{
- struct se_portal_group *se_tpg;
- struct tcm_vhost_nexus *tv_nexus;
+ struct vhost_scsi_nexus *tv_nexus;
mutex_lock(&tpg->tv_tpg_mutex);
if (tpg->tpg_nexus) {
@@ -1671,54 +2602,36 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
pr_debug("tpg->tpg_nexus already exists\n");
return -EEXIST;
}
- se_tpg = &tpg->se_tpg;
- tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
+ tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
if (!tv_nexus) {
mutex_unlock(&tpg->tv_tpg_mutex);
- pr_err("Unable to allocate struct tcm_vhost_nexus\n");
- return -ENOMEM;
- }
- /*
- * Initialize the struct se_session pointer
- */
- tv_nexus->tvn_se_sess = transport_init_session();
- if (IS_ERR(tv_nexus->tvn_se_sess)) {
- mutex_unlock(&tpg->tv_tpg_mutex);
- kfree(tv_nexus);
+ pr_err("Unable to allocate struct vhost_scsi_nexus\n");
return -ENOMEM;
}
/*
- * Since we are running in 'demo mode' this call with generate a
- * struct se_node_acl for the tcm_vhost struct se_portal_group with
+ * Since we are running in 'demo mode' this call will generate a
+ * struct se_node_acl for the vhost_scsi struct se_portal_group with
* the SCSI Initiator port name of the passed configfs group 'name'.
*/
- tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
- se_tpg, (unsigned char *)name);
- if (!tv_nexus->tvn_se_sess->se_node_acl) {
+ tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
+ TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
+ (unsigned char *)name, tv_nexus, NULL);
+ if (IS_ERR(tv_nexus->tvn_se_sess)) {
mutex_unlock(&tpg->tv_tpg_mutex);
- pr_debug("core_tpg_check_initiator_node_acl() failed"
- " for %s\n", name);
- transport_free_session(tv_nexus->tvn_se_sess);
kfree(tv_nexus);
return -ENOMEM;
}
- /*
- * Now register the TCM vhost virtual I_T Nexus as active with the
- * call to __transport_register_session()
- */
- __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
- tv_nexus->tvn_se_sess, tv_nexus);
tpg->tpg_nexus = tv_nexus;
mutex_unlock(&tpg->tv_tpg_mutex);
return 0;
}
-static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
+static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
{
struct se_session *se_sess;
- struct tcm_vhost_nexus *tv_nexus;
+ struct vhost_scsi_nexus *tv_nexus;
mutex_lock(&tpg->tv_tpg_mutex);
tv_nexus = tpg->tpg_nexus;
@@ -1750,12 +2663,13 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
}
pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
- " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
+ " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+
/*
* Release the SCSI I_T Nexus to the emulated vhost Target Port
*/
- transport_deregister_session(tv_nexus->tvn_se_sess);
+ target_remove_session(se_sess);
tpg->tpg_nexus = NULL;
mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1763,12 +2677,12 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
return 0;
}
-static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
- char *page)
+static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
{
- struct tcm_vhost_tpg *tpg = container_of(se_tpg,
- struct tcm_vhost_tpg, se_tpg);
- struct tcm_vhost_nexus *tv_nexus;
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+ struct vhost_scsi_nexus *tv_nexus;
ssize_t ret;
mutex_lock(&tpg->tv_tpg_mutex);
@@ -1777,47 +2691,47 @@ static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
mutex_unlock(&tpg->tv_tpg_mutex);
return -ENODEV;
}
- ret = snprintf(page, PAGE_SIZE, "%s\n",
+ ret = sysfs_emit(page, "%s\n",
tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
mutex_unlock(&tpg->tv_tpg_mutex);
return ret;
}
-static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
- const char *page,
- size_t count)
+static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
+ const char *page, size_t count)
{
- struct tcm_vhost_tpg *tpg = container_of(se_tpg,
- struct tcm_vhost_tpg, se_tpg);
- struct tcm_vhost_tport *tport_wwn = tpg->tport;
- unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+ struct vhost_scsi_tport *tport_wwn = tpg->tport;
+ unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
int ret;
/*
* Shutdown the active I_T nexus if 'NULL' is passed..
*/
if (!strncmp(page, "NULL", 4)) {
- ret = tcm_vhost_drop_nexus(tpg);
+ ret = vhost_scsi_drop_nexus(tpg);
return (!ret) ? count : ret;
}
/*
* Otherwise make sure the passed virtual Initiator port WWN matches
- * the fabric protocol_id set in tcm_vhost_make_tport(), and call
- * tcm_vhost_make_nexus().
+ * the fabric protocol_id set in vhost_scsi_make_tport(), and call
+ * vhost_scsi_make_nexus().
*/
- if (strlen(page) >= TCM_VHOST_NAMELEN) {
+ if (strlen(page) >= VHOST_SCSI_NAMELEN) {
pr_err("Emulated NAA Sas Address: %s, exceeds"
- " max: %d\n", page, TCM_VHOST_NAMELEN);
+ " max: %d\n", page, VHOST_SCSI_NAMELEN);
return -EINVAL;
}
- snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
+ snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
ptr = strstr(i_port, "naa.");
if (ptr) {
if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
pr_err("Passed SAS Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
- tcm_vhost_dump_proto_id(tport_wwn));
+ vhost_scsi_dump_proto_id(tport_wwn));
return -EINVAL;
}
port_ptr = &i_port[0];
@@ -1828,7 +2742,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
pr_err("Passed FCP Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
- tcm_vhost_dump_proto_id(tport_wwn));
+ vhost_scsi_dump_proto_id(tport_wwn));
return -EINVAL;
}
port_ptr = &i_port[3]; /* Skip over "fc." */
@@ -1839,7 +2753,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
pr_err("Passed iSCSI Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
- tcm_vhost_dump_proto_id(tport_wwn));
+ vhost_scsi_dump_proto_id(tport_wwn));
return -EINVAL;
}
port_ptr = &i_port[0];
@@ -1855,40 +2769,38 @@ check_newline:
if (i_port[strlen(i_port)-1] == '\n')
i_port[strlen(i_port)-1] = '\0';
- ret = tcm_vhost_make_nexus(tpg, port_ptr);
+ ret = vhost_scsi_make_nexus(tpg, port_ptr);
if (ret < 0)
return ret;
return count;
}
-TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
+CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
-static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
- &tcm_vhost_tpg_nexus.attr,
+static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
+ &vhost_scsi_tpg_attr_nexus,
NULL,
};
static struct se_portal_group *
-tcm_vhost_make_tpg(struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
{
- struct tcm_vhost_tport *tport = container_of(wwn,
- struct tcm_vhost_tport, tport_wwn);
+ struct vhost_scsi_tport *tport = container_of(wwn,
+ struct vhost_scsi_tport, tport_wwn);
- struct tcm_vhost_tpg *tpg;
- unsigned long tpgt;
+ struct vhost_scsi_tpg *tpg;
+ u16 tpgt;
int ret;
if (strstr(name, "tpgt_") != name)
return ERR_PTR(-EINVAL);
- if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
+ if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
return ERR_PTR(-EINVAL);
- tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
+ tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
if (!tpg) {
- pr_err("Unable to allocate struct tcm_vhost_tpg");
+ pr_err("Unable to allocate struct vhost_scsi_tpg");
return ERR_PTR(-ENOMEM);
}
mutex_init(&tpg->tv_tpg_mutex);
@@ -1896,31 +2808,30 @@ tcm_vhost_make_tpg(struct se_wwn *wwn,
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
- ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
- &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
if (ret < 0) {
kfree(tpg);
return NULL;
}
- mutex_lock(&tcm_vhost_mutex);
- list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
- mutex_unlock(&tcm_vhost_mutex);
+ mutex_lock(&vhost_scsi_mutex);
+ list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
+ mutex_unlock(&vhost_scsi_mutex);
return &tpg->se_tpg;
}
-static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
+static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
{
- struct tcm_vhost_tpg *tpg = container_of(se_tpg,
- struct tcm_vhost_tpg, se_tpg);
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
- mutex_lock(&tcm_vhost_mutex);
+ mutex_lock(&vhost_scsi_mutex);
list_del(&tpg->tv_tpg_list);
- mutex_unlock(&tcm_vhost_mutex);
+ mutex_unlock(&vhost_scsi_mutex);
/*
* Release the virtual I_T Nexus for this vhost TPG
*/
- tcm_vhost_drop_nexus(tpg);
+ vhost_scsi_drop_nexus(tpg);
/*
* Deregister the se_tpg from TCM..
*/
@@ -1929,21 +2840,21 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
}
static struct se_wwn *
-tcm_vhost_make_tport(struct target_fabric_configfs *tf,
+vhost_scsi_make_tport(struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
- struct tcm_vhost_tport *tport;
+ struct vhost_scsi_tport *tport;
char *ptr;
u64 wwpn = 0;
int off = 0;
- /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
+ /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
return ERR_PTR(-EINVAL); */
- tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
+ tport = kzalloc(sizeof(*tport), GFP_KERNEL);
if (!tport) {
- pr_err("Unable to allocate struct tcm_vhost_tport");
+ pr_err("Unable to allocate struct vhost_scsi_tport");
return ERR_PTR(-ENOMEM);
}
tport->tport_wwpn = wwpn;
@@ -1974,169 +2885,96 @@ tcm_vhost_make_tport(struct target_fabric_configfs *tf,
return ERR_PTR(-EINVAL);
check_len:
- if (strlen(name) >= TCM_VHOST_NAMELEN) {
+ if (strlen(name) >= VHOST_SCSI_NAMELEN) {
pr_err("Emulated %s Address: %s, exceeds"
- " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
- TCM_VHOST_NAMELEN);
+ " max: %d\n", vhost_scsi_dump_proto_id(tport), name,
+ VHOST_SCSI_NAMELEN);
kfree(tport);
return ERR_PTR(-EINVAL);
}
- snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
+ snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
- " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
+ " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
return &tport->tport_wwn;
}
-static void tcm_vhost_drop_tport(struct se_wwn *wwn)
+static void vhost_scsi_drop_tport(struct se_wwn *wwn)
{
- struct tcm_vhost_tport *tport = container_of(wwn,
- struct tcm_vhost_tport, tport_wwn);
+ struct vhost_scsi_tport *tport = container_of(wwn,
+ struct vhost_scsi_tport, tport_wwn);
pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
- " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
+ " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
tport->tport_name);
kfree(tport);
}
static ssize_t
-tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf,
- char *page)
+vhost_scsi_wwn_version_show(struct config_item *item, char *page)
{
- return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
- "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
+ return sysfs_emit(page, "TCM_VHOST fabric module %s on %s/%s"
+ " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
utsname()->machine);
}
-TF_WWN_ATTR_RO(tcm_vhost, version);
+CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
-static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
- &tcm_vhost_wwn_version.attr,
+static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
+ &vhost_scsi_wwn_attr_version,
NULL,
};
-static struct target_core_fabric_ops tcm_vhost_ops = {
- .get_fabric_name = tcm_vhost_get_fabric_name,
- .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident,
- .tpg_get_wwn = tcm_vhost_get_fabric_wwn,
- .tpg_get_tag = tcm_vhost_get_tag,
- .tpg_get_default_depth = tcm_vhost_get_default_depth,
- .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id,
- .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len,
- .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id,
- .tpg_check_demo_mode = tcm_vhost_check_true,
- .tpg_check_demo_mode_cache = tcm_vhost_check_true,
- .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
- .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
- .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl,
- .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl,
- .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index,
- .release_cmd = tcm_vhost_release_cmd,
+static const struct target_core_fabric_ops vhost_scsi_ops = {
+ .module = THIS_MODULE,
+ .fabric_name = "vhost",
+ .max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS,
+ .tpg_get_wwn = vhost_scsi_get_fabric_wwn,
+ .tpg_get_tag = vhost_scsi_get_tpgt,
+ .tpg_check_demo_mode = vhost_scsi_check_true,
+ .tpg_check_demo_mode_cache = vhost_scsi_check_true,
+ .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only,
+ .release_cmd = vhost_scsi_release_cmd,
.check_stop_free = vhost_scsi_check_stop_free,
- .shutdown_session = tcm_vhost_shutdown_session,
- .close_session = tcm_vhost_close_session,
- .sess_get_index = tcm_vhost_sess_get_index,
.sess_get_initiator_sid = NULL,
- .write_pending = tcm_vhost_write_pending,
- .write_pending_status = tcm_vhost_write_pending_status,
- .set_default_node_attributes = tcm_vhost_set_default_node_attrs,
- .get_task_tag = tcm_vhost_get_task_tag,
- .get_cmd_state = tcm_vhost_get_cmd_state,
- .queue_data_in = tcm_vhost_queue_data_in,
- .queue_status = tcm_vhost_queue_status,
- .queue_tm_rsp = tcm_vhost_queue_tm_rsp,
+ .write_pending = vhost_scsi_write_pending,
+ .queue_data_in = vhost_scsi_queue_data_in,
+ .queue_status = vhost_scsi_queue_status,
+ .queue_tm_rsp = vhost_scsi_queue_tm_rsp,
+ .aborted_task = vhost_scsi_aborted_task,
/*
* Setup callers for generic logic in target_core_fabric_configfs.c
*/
- .fabric_make_wwn = tcm_vhost_make_tport,
- .fabric_drop_wwn = tcm_vhost_drop_tport,
- .fabric_make_tpg = tcm_vhost_make_tpg,
- .fabric_drop_tpg = tcm_vhost_drop_tpg,
- .fabric_post_link = tcm_vhost_port_link,
- .fabric_pre_unlink = tcm_vhost_port_unlink,
- .fabric_make_np = NULL,
- .fabric_drop_np = NULL,
- .fabric_make_nodeacl = tcm_vhost_make_nodeacl,
- .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl,
+ .fabric_make_wwn = vhost_scsi_make_tport,
+ .fabric_drop_wwn = vhost_scsi_drop_tport,
+ .fabric_make_tpg = vhost_scsi_make_tpg,
+ .fabric_drop_tpg = vhost_scsi_drop_tpg,
+ .fabric_post_link = vhost_scsi_port_link,
+ .fabric_pre_unlink = vhost_scsi_port_unlink,
+
+ .tfc_wwn_attrs = vhost_scsi_wwn_attrs,
+ .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs,
+ .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs,
+
+ .default_submit_type = TARGET_QUEUE_SUBMIT,
+ .direct_submit_supp = 1,
};
-static int tcm_vhost_register_configfs(void)
+static int __init vhost_scsi_init(void)
{
- struct target_fabric_configfs *fabric;
- int ret;
+ int ret = -ENOMEM;
pr_debug("TCM_VHOST fabric module %s on %s/%s"
- " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
+ " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
utsname()->machine);
- /*
- * Register the top level struct config_item_type with TCM core
- */
- fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
- if (IS_ERR(fabric)) {
- pr_err("target_fabric_configfs_init() failed\n");
- return PTR_ERR(fabric);
- }
- /*
- * Setup fabric->tf_ops from our local tcm_vhost_ops
- */
- fabric->tf_ops = tcm_vhost_ops;
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- */
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
- /*
- * Register the fabric for use within TCM
- */
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- pr_err("target_fabric_configfs_register() failed"
- " for TCM_VHOST\n");
- return ret;
- }
- /*
- * Setup our local pointer to *fabric
- */
- tcm_vhost_fabric_configfs = fabric;
- pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
- return 0;
-};
-
-static void tcm_vhost_deregister_configfs(void)
-{
- if (!tcm_vhost_fabric_configfs)
- return;
-
- target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
- tcm_vhost_fabric_configfs = NULL;
- pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
-};
-
-static int __init tcm_vhost_init(void)
-{
- int ret = -ENOMEM;
- /*
- * Use our own dedicated workqueue for submitting I/O into
- * target core to avoid contention within system_wq.
- */
- tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
- if (!tcm_vhost_workqueue)
- goto out;
ret = vhost_scsi_register();
if (ret < 0)
- goto out_destroy_workqueue;
+ goto out;
- ret = tcm_vhost_register_configfs();
+ ret = target_register_template(&vhost_scsi_ops);
if (ret < 0)
goto out_vhost_scsi_deregister;
@@ -2144,21 +2982,18 @@ static int __init tcm_vhost_init(void)
out_vhost_scsi_deregister:
vhost_scsi_deregister();
-out_destroy_workqueue:
- destroy_workqueue(tcm_vhost_workqueue);
out:
return ret;
-};
+}
-static void tcm_vhost_exit(void)
+static void vhost_scsi_exit(void)
{
- tcm_vhost_deregister_configfs();
+ target_unregister_template(&vhost_scsi_ops);
vhost_scsi_deregister();
- destroy_workqueue(tcm_vhost_workqueue);
-};
+}
MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
MODULE_ALIAS("tcm_vhost");
MODULE_LICENSE("GPL");
-module_init(tcm_vhost_init);
-module_exit(tcm_vhost_exit);
+module_init(vhost_scsi_init);
+module_exit(vhost_scsi_exit);
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index a73ea217f24d..1e4e36edbcd2 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2009 Red Hat, Inc.
* Author: Michael S. Tsirkin <mst@redhat.com>
*
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
* test virtio server in host kernel.
*/
@@ -13,7 +12,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
-#include <linux/rcupdate.h>
#include <linux/file.h>
#include <linux/slab.h>
@@ -24,6 +22,18 @@
* Using this limit prevents one virtqueue from starving others. */
#define VHOST_TEST_WEIGHT 0x80000
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * pkts.
+ */
+#define VHOST_TEST_PKT_WEIGHT 256
+
+static const int vhost_test_bits[] = {
+ VHOST_FEATURES
+};
+
+#define VHOST_TEST_FEATURES VHOST_FEATURES_U64(vhost_test_bits, 0)
+
enum {
VHOST_TEST_VQ = 0,
VHOST_TEST_VQ_MAX = 1,
@@ -45,7 +55,7 @@ static void handle_vq(struct vhost_test *n)
void *private;
mutex_lock(&vq->mutex);
- private = vq->private_data;
+ private = vhost_vq_get_backend(vq);
if (!private) {
mutex_unlock(&vq->mutex);
return;
@@ -54,7 +64,7 @@ static void handle_vq(struct vhost_test *n)
vhost_disable_notify(&n->dev, vq);
for (;;) {
- head = vhost_get_vq_desc(&n->dev, vq, vq->iov,
+ head = vhost_get_vq_desc(vq, vq->iov,
ARRAY_SIZE(vq->iov),
&out, &in,
NULL, NULL);
@@ -82,10 +92,8 @@ static void handle_vq(struct vhost_test *n)
}
vhost_add_used_and_signal(&n->dev, vq, head, 0);
total_len += len;
- if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
- vhost_poll_queue(&vq->poll);
+ if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
break;
- }
}
mutex_unlock(&vq->mutex);
@@ -105,11 +113,10 @@ static int vhost_test_open(struct inode *inode, struct file *f)
struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
- int r;
if (!n)
return -ENOMEM;
- vqs = kmalloc(VHOST_TEST_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
+ vqs = kmalloc_array(VHOST_TEST_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
kfree(n);
return -ENOMEM;
@@ -118,12 +125,8 @@ static int vhost_test_open(struct inode *inode, struct file *f)
dev = &n->dev;
vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
- r = vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
- if (r < 0) {
- kfree(vqs);
- kfree(n);
- return r;
- }
+ vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
+ VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, true, NULL);
f->private_data = n;
@@ -136,8 +139,8 @@ static void *vhost_test_stop_vq(struct vhost_test *n,
void *private;
mutex_lock(&vq->mutex);
- private = vq->private_data;
- vq->private_data = NULL;
+ private = vhost_vq_get_backend(vq);
+ vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
return private;
}
@@ -147,14 +150,9 @@ static void vhost_test_stop(struct vhost_test *n, void **privatep)
*privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
}
-static void vhost_test_flush_vq(struct vhost_test *n, int index)
-{
- vhost_poll_flush(&n->vqs[index].poll);
-}
-
static void vhost_test_flush(struct vhost_test *n)
{
- vhost_test_flush_vq(n, VHOST_TEST_VQ);
+ vhost_dev_flush(&n->dev);
}
static int vhost_test_release(struct inode *inode, struct file *f)
@@ -164,10 +162,9 @@ static int vhost_test_release(struct inode *inode, struct file *f)
vhost_test_stop(n, &private);
vhost_test_flush(n);
- vhost_dev_cleanup(&n->dev, false);
- /* We do an extra flush before freeing memory,
- * since jobs can re-queue themselves. */
- vhost_test_flush(n);
+ vhost_dev_stop(&n->dev);
+ vhost_dev_cleanup(&n->dev);
+ kfree(n->dev.vqs);
kfree(n);
return 0;
}
@@ -200,11 +197,10 @@ static long vhost_test_run(struct vhost_test *n, int test)
priv = test ? n : NULL;
/* start polling new socket */
- oldpriv = rcu_dereference_protected(vq->private_data,
- lockdep_is_held(&vq->mutex));
- rcu_assign_pointer(vq->private_data, priv);
+ oldpriv = vhost_vq_get_backend(vq);
+ vhost_vq_set_backend(vq, priv);
- r = vhost_init_used(&n->vqs[index]);
+ r = vhost_vq_init_access(&n->vqs[index]);
mutex_unlock(&vq->mutex);
@@ -212,7 +208,7 @@ static long vhost_test_run(struct vhost_test *n, int test)
goto err;
if (oldpriv) {
- vhost_test_flush_vq(n, index);
+ vhost_test_flush(n);
}
}
@@ -228,20 +224,21 @@ static long vhost_test_reset_owner(struct vhost_test *n)
{
void *priv = NULL;
long err;
- struct vhost_memory *memory;
+ struct vhost_iotlb *umem;
mutex_lock(&n->dev.mutex);
err = vhost_dev_check_owner(&n->dev);
if (err)
goto done;
- memory = vhost_dev_reset_owner_prepare();
- if (!memory) {
+ umem = vhost_dev_reset_owner_prepare();
+ if (!umem) {
err = -ENOMEM;
goto done;
}
vhost_test_stop(n, &priv);
vhost_test_flush(n);
- vhost_dev_reset_owner(&n->dev, memory);
+ vhost_dev_stop(&n->dev);
+ vhost_dev_reset_owner(&n->dev, umem);
done:
mutex_unlock(&n->dev.mutex);
return err;
@@ -249,22 +246,78 @@ done:
static int vhost_test_set_features(struct vhost_test *n, u64 features)
{
+ struct vhost_virtqueue *vq;
+
mutex_lock(&n->dev.mutex);
if ((features & (1 << VHOST_F_LOG_ALL)) &&
!vhost_log_access_ok(&n->dev)) {
mutex_unlock(&n->dev.mutex);
return -EFAULT;
}
- n->dev.acked_features = features;
- smp_wmb();
- vhost_test_flush(n);
+ vq = &n->vqs[VHOST_TEST_VQ];
+ mutex_lock(&vq->mutex);
+ vq->acked_features = features;
+ mutex_unlock(&vq->mutex);
mutex_unlock(&n->dev.mutex);
return 0;
}
+static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd)
+{
+ static void *backend;
+
+ const bool enable = fd != -1;
+ struct vhost_virtqueue *vq;
+ int r;
+
+ mutex_lock(&n->dev.mutex);
+ r = vhost_dev_check_owner(&n->dev);
+ if (r)
+ goto err;
+
+ if (index >= VHOST_TEST_VQ_MAX) {
+ r = -ENOBUFS;
+ goto err;
+ }
+ vq = &n->vqs[index];
+ mutex_lock(&vq->mutex);
+
+ /* Verify that ring has been setup correctly. */
+ if (!vhost_vq_access_ok(vq)) {
+ r = -EFAULT;
+ goto err_vq;
+ }
+ if (!enable) {
+ vhost_poll_stop(&vq->poll);
+ backend = vhost_vq_get_backend(vq);
+ vhost_vq_set_backend(vq, NULL);
+ } else {
+ vhost_vq_set_backend(vq, backend);
+ r = vhost_vq_init_access(vq);
+ if (r == 0)
+ r = vhost_poll_start(&vq->poll, vq->kick);
+ }
+
+ mutex_unlock(&vq->mutex);
+
+ if (enable) {
+ vhost_test_flush(n);
+ }
+
+ mutex_unlock(&n->dev.mutex);
+ return 0;
+
+err_vq:
+ mutex_unlock(&vq->mutex);
+err:
+ mutex_unlock(&n->dev.mutex);
+ return r;
+}
+
static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
unsigned long arg)
{
+ struct vhost_vring_file backend;
struct vhost_test *n = f->private_data;
void __user *argp = (void __user *)arg;
u64 __user *featurep = argp;
@@ -276,15 +329,19 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
if (copy_from_user(&test, argp, sizeof test))
return -EFAULT;
return vhost_test_run(n, test);
+ case VHOST_TEST_SET_BACKEND:
+ if (copy_from_user(&backend, argp, sizeof backend))
+ return -EFAULT;
+ return vhost_test_set_backend(n, backend.index, backend.fd);
case VHOST_GET_FEATURES:
- features = VHOST_FEATURES;
+ features = VHOST_TEST_FEATURES;
if (copy_to_user(featurep, &features, sizeof features))
return -EFAULT;
return 0;
case VHOST_SET_FEATURES:
if (copy_from_user(&features, featurep, sizeof features))
return -EFAULT;
- if (features & ~VHOST_FEATURES)
+ if (features & ~VHOST_TEST_FEATURES)
return -EOPNOTSUPP;
return vhost_test_set_features(n, features);
case VHOST_RESET_OWNER:
@@ -300,21 +357,11 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
}
}
-#ifdef CONFIG_COMPAT
-static long vhost_test_compat_ioctl(struct file *f, unsigned int ioctl,
- unsigned long arg)
-{
- return vhost_test_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static const struct file_operations vhost_test_fops = {
.owner = THIS_MODULE,
.release = vhost_test_release,
.unlocked_ioctl = vhost_test_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vhost_test_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = vhost_test_open,
.llseek = noop_llseek,
};
@@ -324,18 +371,7 @@ static struct miscdevice vhost_test_misc = {
"vhost-test",
&vhost_test_fops,
};
-
-static int vhost_test_init(void)
-{
- return misc_register(&vhost_test_misc);
-}
-module_init(vhost_test_init);
-
-static void vhost_test_exit(void)
-{
- misc_deregister(&vhost_test_misc);
-}
-module_exit(vhost_test_exit);
+module_misc_device(vhost_test_misc);
MODULE_VERSION("0.0.1");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/vhost/test.h b/drivers/vhost/test.h
index 1fef5df82153..822bc4bee03a 100644
--- a/drivers/vhost/test.h
+++ b/drivers/vhost/test.h
@@ -1,7 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_VHOST_TEST_H
#define LINUX_VHOST_TEST_H
/* Start a given test on the virtio null device. 0 stops all tests. */
#define VHOST_TEST_RUN _IOW(VHOST_VIRTIO, 0x31, int)
+#define VHOST_TEST_SET_BACKEND _IOW(VHOST_VIRTIO, 0x32, int)
#endif
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
new file mode 100644
index 000000000000..05a481e4c385
--- /dev/null
+++ b/drivers/vhost/vdpa.c
@@ -0,0 +1,1681 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2020 Intel Corporation.
+ * Copyright (C) 2020 Red Hat, Inc.
+ *
+ * Author: Tiwei Bie <tiwei.bie@intel.com>
+ * Jason Wang <jasowang@redhat.com>
+ *
+ * Thanks Michael S. Tsirkin for the valuable comments and
+ * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
+ * their supports.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/uuid.h>
+#include <linux/vdpa.h>
+#include <linux/nospec.h>
+#include <linux/vhost.h>
+
+#include "vhost.h"
+
+enum {
+ VHOST_VDPA_BACKEND_FEATURES =
+ (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
+ (1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
+ (1ULL << VHOST_BACKEND_F_IOTLB_ASID),
+};
+
+#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
+
+#define VHOST_VDPA_IOTLB_BUCKETS 16
+
+struct vhost_vdpa_as {
+ struct hlist_node hash_link;
+ struct vhost_iotlb iotlb;
+ u32 id;
+};
+
+struct vhost_vdpa {
+ struct vhost_dev vdev;
+ struct iommu_domain *domain;
+ struct vhost_virtqueue *vqs;
+ struct completion completion;
+ struct vdpa_device *vdpa;
+ struct hlist_head as[VHOST_VDPA_IOTLB_BUCKETS];
+ struct device dev;
+ struct cdev cdev;
+ atomic_t opened;
+ u32 nvqs;
+ int virtio_id;
+ int minor;
+ struct eventfd_ctx *config_ctx;
+ int in_batch;
+ struct vdpa_iova_range range;
+ u32 batch_asid;
+ bool suspended;
+};
+
+static DEFINE_IDA(vhost_vdpa_ida);
+
+static dev_t vhost_vdpa_major;
+
+static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb, u64 start,
+ u64 last, u32 asid);
+
+static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
+{
+ struct vhost_vdpa_as *as = container_of(iotlb, struct
+ vhost_vdpa_as, iotlb);
+ return as->id;
+}
+
+static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
+{
+ struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
+ struct vhost_vdpa_as *as;
+
+ hlist_for_each_entry(as, head, hash_link)
+ if (as->id == asid)
+ return as;
+
+ return NULL;
+}
+
+static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid)
+{
+ struct vhost_vdpa_as *as = asid_to_as(v, asid);
+
+ if (!as)
+ return NULL;
+
+ return &as->iotlb;
+}
+
+static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
+{
+ struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
+ struct vhost_vdpa_as *as;
+
+ if (asid_to_as(v, asid))
+ return NULL;
+
+ if (asid >= v->vdpa->nas)
+ return NULL;
+
+ as = kmalloc(sizeof(*as), GFP_KERNEL);
+ if (!as)
+ return NULL;
+
+ vhost_iotlb_init(&as->iotlb, 0, 0);
+ as->id = asid;
+ hlist_add_head(&as->hash_link, head);
+
+ return as;
+}
+
+static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
+ u32 asid)
+{
+ struct vhost_vdpa_as *as = asid_to_as(v, asid);
+
+ if (as)
+ return as;
+
+ return vhost_vdpa_alloc_as(v, asid);
+}
+
+static void vhost_vdpa_reset_map(struct vhost_vdpa *v, u32 asid)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (ops->reset_map)
+ ops->reset_map(vdpa, asid);
+}
+
+static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
+{
+ struct vhost_vdpa_as *as = asid_to_as(v, asid);
+
+ if (!as)
+ return -EINVAL;
+
+ hlist_del(&as->hash_link);
+ vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
+ /*
+ * Devices with vendor specific IOMMU may need to restore
+ * iotlb to the initial or default state, which cannot be
+ * cleaned up in the all range unmap call above. Give them
+ * a chance to clean up or reset the map to the desired
+ * state.
+ */
+ vhost_vdpa_reset_map(v, asid);
+ kfree(as);
+
+ return 0;
+}
+
+static void handle_vq_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
+ const struct vdpa_config_ops *ops = v->vdpa->config;
+
+ ops->kick_vq(v->vdpa, vq - v->vqs);
+}
+
+static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
+{
+ struct vhost_virtqueue *vq = private;
+ struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
+
+ if (call_ctx)
+ eventfd_signal(call_ctx);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vhost_vdpa_config_cb(void *private)
+{
+ struct vhost_vdpa *v = private;
+ struct eventfd_ctx *config_ctx = v->config_ctx;
+
+ if (config_ctx)
+ eventfd_signal(config_ctx);
+
+ return IRQ_HANDLED;
+}
+
+static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
+{
+ struct vhost_virtqueue *vq = &v->vqs[qid];
+ const struct vdpa_config_ops *ops = v->vdpa->config;
+ struct vdpa_device *vdpa = v->vdpa;
+ int ret, irq;
+
+ if (!ops->get_vq_irq)
+ return;
+
+ irq = ops->get_vq_irq(vdpa, qid);
+ if (irq < 0)
+ return;
+
+ if (!vq->call_ctx.ctx)
+ return;
+
+ ret = irq_bypass_register_producer(&vq->call_ctx.producer,
+ vq->call_ctx.ctx, irq);
+ if (unlikely(ret))
+ dev_info(&v->dev, "vq %u, irq bypass producer (eventfd %p) registration fails, ret = %d\n",
+ qid, vq->call_ctx.ctx, ret);
+}
+
+static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
+{
+ struct vhost_virtqueue *vq = &v->vqs[qid];
+
+ irq_bypass_unregister_producer(&vq->call_ctx.producer);
+}
+
+static int _compat_vdpa_reset(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ u32 flags = 0;
+
+ v->suspended = false;
+
+ if (v->vdev.vqs) {
+ flags |= !vhost_backend_has_feature(v->vdev.vqs[0],
+ VHOST_BACKEND_F_IOTLB_PERSIST) ?
+ VDPA_RESET_F_CLEAN_MAP : 0;
+ }
+
+ return vdpa_reset(vdpa, flags);
+}
+
+static int vhost_vdpa_reset(struct vhost_vdpa *v)
+{
+ v->in_batch = 0;
+ return _compat_vdpa_reset(v);
+}
+
+static long vhost_vdpa_bind_mm(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (!vdpa->use_va || !ops->bind_mm)
+ return 0;
+
+ return ops->bind_mm(vdpa, v->vdev.mm);
+}
+
+static void vhost_vdpa_unbind_mm(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (!vdpa->use_va || !ops->unbind_mm)
+ return;
+
+ ops->unbind_mm(vdpa);
+}
+
+static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u32 device_id;
+
+ device_id = ops->get_device_id(vdpa);
+
+ if (copy_to_user(argp, &device_id, sizeof(device_id)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u8 status;
+
+ status = ops->get_status(vdpa);
+
+ if (copy_to_user(statusp, &status, sizeof(status)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u8 status, status_old;
+ u32 nvqs = v->nvqs;
+ int ret;
+ u16 i;
+
+ if (copy_from_user(&status, statusp, sizeof(status)))
+ return -EFAULT;
+
+ status_old = ops->get_status(vdpa);
+
+ /*
+ * Userspace shouldn't remove status bits unless reset the
+ * status to 0.
+ */
+ if (status != 0 && (status_old & ~status) != 0)
+ return -EINVAL;
+
+ if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
+ for (i = 0; i < nvqs; i++)
+ vhost_vdpa_unsetup_vq_irq(v, i);
+
+ if (status == 0) {
+ ret = _compat_vdpa_reset(v);
+ if (ret)
+ return ret;
+ } else
+ vdpa_set_status(vdpa, status);
+
+ if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
+ for (i = 0; i < nvqs; i++)
+ vhost_vdpa_setup_vq_irq(v, i);
+
+ return 0;
+}
+
+static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
+ struct vhost_vdpa_config *c)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ size_t size = vdpa->config->get_config_size(vdpa);
+
+ if (c->len == 0 || c->off > size)
+ return -EINVAL;
+
+ if (c->len > size - c->off)
+ return -E2BIG;
+
+ return 0;
+}
+
+static long vhost_vdpa_get_config(struct vhost_vdpa *v,
+ struct vhost_vdpa_config __user *c)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ struct vhost_vdpa_config config;
+ unsigned long size = offsetof(struct vhost_vdpa_config, buf);
+ u8 *buf;
+
+ if (copy_from_user(&config, c, size))
+ return -EFAULT;
+ if (vhost_vdpa_config_validate(v, &config))
+ return -EINVAL;
+ buf = kvzalloc(config.len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ vdpa_get_config(vdpa, config.off, buf, config.len);
+
+ if (copy_to_user(c->buf, buf, config.len)) {
+ kvfree(buf);
+ return -EFAULT;
+ }
+
+ kvfree(buf);
+ return 0;
+}
+
+static long vhost_vdpa_set_config(struct vhost_vdpa *v,
+ struct vhost_vdpa_config __user *c)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ struct vhost_vdpa_config config;
+ unsigned long size = offsetof(struct vhost_vdpa_config, buf);
+ u8 *buf;
+
+ if (copy_from_user(&config, c, size))
+ return -EFAULT;
+ if (vhost_vdpa_config_validate(v, &config))
+ return -EINVAL;
+
+ buf = vmemdup_user(c->buf, config.len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ vdpa_set_config(vdpa, config.off, buf, config.len);
+
+ kvfree(buf);
+ return 0;
+}
+
+static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->suspend;
+}
+
+static bool vhost_vdpa_can_resume(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->resume;
+}
+
+static bool vhost_vdpa_has_desc_group(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->get_vq_desc_group;
+}
+
+static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u64 features;
+
+ features = ops->get_device_features(vdpa);
+
+ if (copy_to_user(featurep, &features, sizeof(features)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static u64 vhost_vdpa_get_backend_features(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (!ops->get_backend_features)
+ return 0;
+ else
+ return ops->get_backend_features(vdpa);
+}
+
+static bool vhost_vdpa_has_persistent_map(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return (!ops->set_map && !ops->dma_map) || ops->reset_map ||
+ vhost_vdpa_get_backend_features(v) & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
+}
+
+static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vhost_dev *d = &v->vdev;
+ u64 actual_features;
+ u64 features;
+ int i;
+
+ /*
+ * It's not allowed to change the features after they have
+ * been negotiated.
+ */
+ if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
+ return -EBUSY;
+
+ if (copy_from_user(&features, featurep, sizeof(features)))
+ return -EFAULT;
+
+ if (vdpa_set_features(vdpa, features))
+ return -EINVAL;
+
+ /* let the vqs know what has been configured */
+ actual_features = ops->get_driver_features(vdpa);
+ for (i = 0; i < d->nvqs; ++i) {
+ struct vhost_virtqueue *vq = d->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vq->acked_features = actual_features;
+ mutex_unlock(&vq->mutex);
+ }
+
+ return 0;
+}
+
+static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u16 num;
+
+ num = ops->get_vq_num_max(vdpa);
+
+ if (copy_to_user(argp, &num, sizeof(num)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static void vhost_vdpa_config_put(struct vhost_vdpa *v)
+{
+ if (v->config_ctx) {
+ eventfd_ctx_put(v->config_ctx);
+ v->config_ctx = NULL;
+ }
+}
+
+static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
+{
+ struct vdpa_callback cb;
+ int fd;
+ struct eventfd_ctx *ctx;
+
+ cb.callback = vhost_vdpa_config_cb;
+ cb.private = v;
+ if (copy_from_user(&fd, argp, sizeof(fd)))
+ return -EFAULT;
+
+ ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
+ swap(ctx, v->config_ctx);
+
+ if (!IS_ERR_OR_NULL(ctx))
+ eventfd_ctx_put(ctx);
+
+ if (IS_ERR(v->config_ctx)) {
+ long ret = PTR_ERR(v->config_ctx);
+
+ v->config_ctx = NULL;
+ return ret;
+ }
+
+ v->vdpa->config->set_config_cb(v->vdpa, &cb);
+
+ return 0;
+}
+
+static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
+{
+ struct vhost_vdpa_iova_range range = {
+ .first = v->range.first,
+ .last = v->range.last,
+ };
+
+ if (copy_to_user(argp, &range, sizeof(range)))
+ return -EFAULT;
+ return 0;
+}
+
+static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u32 size;
+
+ size = ops->get_config_size(vdpa);
+
+ if (copy_to_user(argp, &size, sizeof(size)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+
+ if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* After a successful return of ioctl the device must not process more
+ * virtqueue descriptors. The device can answer to read or writes of config
+ * fields as if it were not suspended. In particular, writing to "queue_enable"
+ * with a value of 1 will not make the device start processing buffers.
+ */
+static long vhost_vdpa_suspend(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ int ret;
+
+ if (!(ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK))
+ return 0;
+
+ if (!ops->suspend)
+ return -EOPNOTSUPP;
+
+ ret = ops->suspend(vdpa);
+ if (!ret)
+ v->suspended = true;
+
+ return ret;
+}
+
+/* After a successful return of this ioctl the device resumes processing
+ * virtqueue descriptors. The device becomes fully operational the same way it
+ * was before it was suspended.
+ */
+static long vhost_vdpa_resume(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ int ret;
+
+ if (!(ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK))
+ return 0;
+
+ if (!ops->resume)
+ return -EOPNOTSUPP;
+
+ ret = ops->resume(vdpa);
+ if (!ret)
+ v->suspended = false;
+
+ return ret;
+}
+
+static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
+ void __user *argp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vdpa_vq_state vq_state;
+ struct vdpa_callback cb;
+ struct vhost_virtqueue *vq;
+ struct vhost_vring_state s;
+ u32 idx;
+ long r;
+
+ r = get_user(idx, (u32 __user *)argp);
+ if (r < 0)
+ return r;
+
+ if (idx >= v->nvqs)
+ return -ENOBUFS;
+
+ idx = array_index_nospec(idx, v->nvqs);
+ vq = &v->vqs[idx];
+
+ switch (cmd) {
+ case VHOST_VDPA_SET_VRING_ENABLE:
+ if (copy_from_user(&s, argp, sizeof(s)))
+ return -EFAULT;
+ ops->set_vq_ready(vdpa, idx, s.num);
+ return 0;
+ case VHOST_VDPA_GET_VRING_GROUP:
+ if (!ops->get_vq_group)
+ return -EOPNOTSUPP;
+ s.index = idx;
+ s.num = ops->get_vq_group(vdpa, idx);
+ if (s.num >= vdpa->ngroups)
+ return -EIO;
+ else if (copy_to_user(argp, &s, sizeof(s)))
+ return -EFAULT;
+ return 0;
+ case VHOST_VDPA_GET_VRING_DESC_GROUP:
+ if (!vhost_vdpa_has_desc_group(v))
+ return -EOPNOTSUPP;
+ s.index = idx;
+ s.num = ops->get_vq_desc_group(vdpa, idx);
+ if (s.num >= vdpa->ngroups)
+ return -EIO;
+ else if (copy_to_user(argp, &s, sizeof(s)))
+ return -EFAULT;
+ return 0;
+ case VHOST_VDPA_SET_GROUP_ASID:
+ if (copy_from_user(&s, argp, sizeof(s)))
+ return -EFAULT;
+ if (s.num >= vdpa->nas)
+ return -EINVAL;
+ if (!ops->set_group_asid)
+ return -EOPNOTSUPP;
+ return ops->set_group_asid(vdpa, idx, s.num);
+ case VHOST_VDPA_GET_VRING_SIZE:
+ if (!ops->get_vq_size)
+ return -EOPNOTSUPP;
+ s.index = idx;
+ s.num = ops->get_vq_size(vdpa, idx);
+ if (copy_to_user(argp, &s, sizeof(s)))
+ return -EFAULT;
+ return 0;
+ case VHOST_GET_VRING_BASE:
+ r = ops->get_vq_state(v->vdpa, idx, &vq_state);
+ if (r)
+ return r;
+
+ if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
+ vq->last_avail_idx = vq_state.packed.last_avail_idx |
+ (vq_state.packed.last_avail_counter << 15);
+ vq->last_used_idx = vq_state.packed.last_used_idx |
+ (vq_state.packed.last_used_counter << 15);
+ } else {
+ vq->last_avail_idx = vq_state.split.avail_index;
+ }
+ break;
+ case VHOST_SET_VRING_CALL:
+ if (vq->call_ctx.ctx) {
+ if (ops->get_status(vdpa) &
+ VIRTIO_CONFIG_S_DRIVER_OK)
+ vhost_vdpa_unsetup_vq_irq(v, idx);
+ }
+ break;
+ }
+
+ r = vhost_vring_ioctl(&v->vdev, cmd, argp);
+ if (r)
+ return r;
+
+ switch (cmd) {
+ case VHOST_SET_VRING_ADDR:
+ if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended)
+ return -EINVAL;
+
+ if (ops->set_vq_address(vdpa, idx,
+ (u64)(uintptr_t)vq->desc,
+ (u64)(uintptr_t)vq->avail,
+ (u64)(uintptr_t)vq->used))
+ r = -EINVAL;
+ break;
+
+ case VHOST_SET_VRING_BASE:
+ if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended)
+ return -EINVAL;
+
+ if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
+ vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff;
+ vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000);
+ vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff;
+ vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000);
+ } else {
+ vq_state.split.avail_index = vq->last_avail_idx;
+ }
+ r = ops->set_vq_state(vdpa, idx, &vq_state);
+ break;
+
+ case VHOST_SET_VRING_CALL:
+ if (vq->call_ctx.ctx) {
+ cb.callback = vhost_vdpa_virtqueue_cb;
+ cb.private = vq;
+ cb.trigger = vq->call_ctx.ctx;
+ if (ops->get_status(vdpa) &
+ VIRTIO_CONFIG_S_DRIVER_OK)
+ vhost_vdpa_setup_vq_irq(v, idx);
+ } else {
+ cb.callback = NULL;
+ cb.private = NULL;
+ cb.trigger = NULL;
+ }
+ ops->set_vq_cb(vdpa, idx, &cb);
+ break;
+
+ case VHOST_SET_VRING_NUM:
+ ops->set_vq_num(vdpa, idx, vq->num);
+ break;
+ }
+
+ return r;
+}
+
+static long vhost_vdpa_unlocked_ioctl(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ struct vhost_vdpa *v = filep->private_data;
+ struct vhost_dev *d = &v->vdev;
+ void __user *argp = (void __user *)arg;
+ u64 __user *featurep = argp;
+ u64 features;
+ long r = 0;
+
+ if (cmd == VHOST_SET_BACKEND_FEATURES) {
+ if (copy_from_user(&features, featurep, sizeof(features)))
+ return -EFAULT;
+ if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
+ BIT_ULL(VHOST_BACKEND_F_DESC_ASID) |
+ BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST) |
+ BIT_ULL(VHOST_BACKEND_F_SUSPEND) |
+ BIT_ULL(VHOST_BACKEND_F_RESUME) |
+ BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK)))
+ return -EOPNOTSUPP;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
+ !vhost_vdpa_can_suspend(v))
+ return -EOPNOTSUPP;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_RESUME)) &&
+ !vhost_vdpa_can_resume(v))
+ return -EOPNOTSUPP;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
+ !(features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)))
+ return -EINVAL;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
+ !vhost_vdpa_has_desc_group(v))
+ return -EOPNOTSUPP;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST)) &&
+ !vhost_vdpa_has_persistent_map(v))
+ return -EOPNOTSUPP;
+ vhost_set_backend_features(&v->vdev, features);
+ return 0;
+ }
+
+ mutex_lock(&d->mutex);
+
+ switch (cmd) {
+ case VHOST_VDPA_GET_DEVICE_ID:
+ r = vhost_vdpa_get_device_id(v, argp);
+ break;
+ case VHOST_VDPA_GET_STATUS:
+ r = vhost_vdpa_get_status(v, argp);
+ break;
+ case VHOST_VDPA_SET_STATUS:
+ r = vhost_vdpa_set_status(v, argp);
+ break;
+ case VHOST_VDPA_GET_CONFIG:
+ r = vhost_vdpa_get_config(v, argp);
+ break;
+ case VHOST_VDPA_SET_CONFIG:
+ r = vhost_vdpa_set_config(v, argp);
+ break;
+ case VHOST_GET_FEATURES:
+ r = vhost_vdpa_get_features(v, argp);
+ break;
+ case VHOST_SET_FEATURES:
+ r = vhost_vdpa_set_features(v, argp);
+ break;
+ case VHOST_VDPA_GET_VRING_NUM:
+ r = vhost_vdpa_get_vring_num(v, argp);
+ break;
+ case VHOST_VDPA_GET_GROUP_NUM:
+ if (copy_to_user(argp, &v->vdpa->ngroups,
+ sizeof(v->vdpa->ngroups)))
+ r = -EFAULT;
+ break;
+ case VHOST_VDPA_GET_AS_NUM:
+ if (copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas)))
+ r = -EFAULT;
+ break;
+ case VHOST_SET_LOG_BASE:
+ case VHOST_SET_LOG_FD:
+ r = -ENOIOCTLCMD;
+ break;
+ case VHOST_VDPA_SET_CONFIG_CALL:
+ r = vhost_vdpa_set_config_call(v, argp);
+ break;
+ case VHOST_GET_BACKEND_FEATURES:
+ features = VHOST_VDPA_BACKEND_FEATURES;
+ if (vhost_vdpa_can_suspend(v))
+ features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
+ if (vhost_vdpa_can_resume(v))
+ features |= BIT_ULL(VHOST_BACKEND_F_RESUME);
+ if (vhost_vdpa_has_desc_group(v))
+ features |= BIT_ULL(VHOST_BACKEND_F_DESC_ASID);
+ if (vhost_vdpa_has_persistent_map(v))
+ features |= BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
+ features |= vhost_vdpa_get_backend_features(v);
+ if (copy_to_user(featurep, &features, sizeof(features)))
+ r = -EFAULT;
+ break;
+ case VHOST_VDPA_GET_IOVA_RANGE:
+ r = vhost_vdpa_get_iova_range(v, argp);
+ break;
+ case VHOST_VDPA_GET_CONFIG_SIZE:
+ r = vhost_vdpa_get_config_size(v, argp);
+ break;
+ case VHOST_VDPA_GET_VQS_COUNT:
+ r = vhost_vdpa_get_vqs_count(v, argp);
+ break;
+ case VHOST_VDPA_SUSPEND:
+ r = vhost_vdpa_suspend(v);
+ break;
+ case VHOST_VDPA_RESUME:
+ r = vhost_vdpa_resume(v);
+ break;
+ default:
+ r = vhost_dev_ioctl(&v->vdev, cmd, argp);
+ if (r == -ENOIOCTLCMD)
+ r = vhost_vdpa_vring_ioctl(v, cmd, argp);
+ break;
+ }
+
+ if (r)
+ goto out;
+
+ switch (cmd) {
+ case VHOST_SET_OWNER:
+ r = vhost_vdpa_bind_mm(v);
+ if (r)
+ vhost_dev_reset_owner(d, NULL);
+ break;
+ }
+out:
+ mutex_unlock(&d->mutex);
+ return r;
+}
+static void vhost_vdpa_general_unmap(struct vhost_vdpa *v,
+ struct vhost_iotlb_map *map, u32 asid)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ if (ops->dma_map) {
+ ops->dma_unmap(vdpa, asid, map->start, map->size);
+ } else if (ops->set_map == NULL) {
+ iommu_unmap(v->domain, map->start, map->size);
+ }
+}
+
+static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
+ u64 start, u64 last, u32 asid)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct vhost_iotlb_map *map;
+ struct page *page;
+ unsigned long pfn, pinned;
+
+ while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
+ pinned = PFN_DOWN(map->size);
+ for (pfn = PFN_DOWN(map->addr);
+ pinned > 0; pfn++, pinned--) {
+ page = pfn_to_page(pfn);
+ if (map->perm & VHOST_ACCESS_WO)
+ set_page_dirty_lock(page);
+ unpin_user_page(page);
+ }
+ atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
+ vhost_vdpa_general_unmap(v, map, asid);
+ vhost_iotlb_map_free(iotlb, map);
+ }
+}
+
+static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
+ u64 start, u64 last, u32 asid)
+{
+ struct vhost_iotlb_map *map;
+ struct vdpa_map_file *map_file;
+
+ while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
+ map_file = (struct vdpa_map_file *)map->opaque;
+ fput(map_file->file);
+ kfree(map_file);
+ vhost_vdpa_general_unmap(v, map, asid);
+ vhost_iotlb_map_free(iotlb, map);
+ }
+}
+
+static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb, u64 start,
+ u64 last, u32 asid)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+
+ if (vdpa->use_va)
+ return vhost_vdpa_va_unmap(v, iotlb, start, last, asid);
+
+ return vhost_vdpa_pa_unmap(v, iotlb, start, last, asid);
+}
+
+static int perm_to_iommu_flags(u32 perm)
+{
+ int flags = 0;
+
+ switch (perm) {
+ case VHOST_ACCESS_WO:
+ flags |= IOMMU_WRITE;
+ break;
+ case VHOST_ACCESS_RO:
+ flags |= IOMMU_READ;
+ break;
+ case VHOST_ACCESS_RW:
+ flags |= (IOMMU_WRITE | IOMMU_READ);
+ break;
+ default:
+ WARN(1, "invalidate vhost IOTLB permission\n");
+ break;
+ }
+
+ return flags | IOMMU_CACHE;
+}
+
+static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
+ u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u32 asid = iotlb_to_asid(iotlb);
+ int r = 0;
+
+ r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
+ pa, perm, opaque);
+ if (r)
+ return r;
+
+ if (ops->dma_map) {
+ r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
+ } else if (ops->set_map) {
+ if (!v->in_batch)
+ r = ops->set_map(vdpa, asid, iotlb);
+ } else {
+ r = iommu_map(v->domain, iova, pa, size,
+ perm_to_iommu_flags(perm),
+ GFP_KERNEL_ACCOUNT);
+ }
+ if (r) {
+ vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
+ return r;
+ }
+
+ if (!vdpa->use_va)
+ atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
+
+ return 0;
+}
+
+static void vhost_vdpa_unmap(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
+ u64 iova, u64 size)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u32 asid = iotlb_to_asid(iotlb);
+
+ vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid);
+
+ if (ops->set_map) {
+ if (!v->in_batch)
+ ops->set_map(vdpa, asid, iotlb);
+ }
+
+}
+
+static int vhost_vdpa_va_map(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
+ u64 iova, u64 size, u64 uaddr, u32 perm)
+{
+ struct vhost_dev *dev = &v->vdev;
+ u64 offset, map_size, map_iova = iova;
+ struct vdpa_map_file *map_file;
+ struct vm_area_struct *vma;
+ int ret = 0;
+
+ mmap_read_lock(dev->mm);
+
+ while (size) {
+ vma = find_vma(dev->mm, uaddr);
+ if (!vma) {
+ ret = -EINVAL;
+ break;
+ }
+ map_size = min(size, vma->vm_end - uaddr);
+ if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) &&
+ !(vma->vm_flags & (VM_IO | VM_PFNMAP))))
+ goto next;
+
+ map_file = kzalloc(sizeof(*map_file), GFP_KERNEL);
+ if (!map_file) {
+ ret = -ENOMEM;
+ break;
+ }
+ offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
+ map_file->offset = offset;
+ map_file->file = get_file(vma->vm_file);
+ ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
+ perm, map_file);
+ if (ret) {
+ fput(map_file->file);
+ kfree(map_file);
+ break;
+ }
+next:
+ size -= map_size;
+ uaddr += map_size;
+ map_iova += map_size;
+ }
+ if (ret)
+ vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
+
+ mmap_read_unlock(dev->mm);
+
+ return ret;
+}
+
+static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
+ u64 iova, u64 size, u64 uaddr, u32 perm)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct page **page_list;
+ unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
+ unsigned int gup_flags = FOLL_LONGTERM;
+ unsigned long npages, cur_base, map_pfn, last_pfn = 0;
+ unsigned long lock_limit, sz2pin, nchunks, i;
+ u64 start = iova;
+ long pinned;
+ int ret = 0;
+
+ /* Limit the use of memory for bookkeeping */
+ page_list = (struct page **) __get_free_page(GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ if (perm & VHOST_ACCESS_WO)
+ gup_flags |= FOLL_WRITE;
+
+ npages = PFN_UP(size + (iova & ~PAGE_MASK));
+ if (!npages) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ mmap_read_lock(dev->mm);
+
+ lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
+ if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ cur_base = uaddr & PAGE_MASK;
+ iova &= PAGE_MASK;
+ nchunks = 0;
+
+ while (npages) {
+ sz2pin = min_t(unsigned long, npages, list_size);
+ pinned = pin_user_pages(cur_base, sz2pin,
+ gup_flags, page_list);
+ if (sz2pin != pinned) {
+ if (pinned < 0) {
+ ret = pinned;
+ } else {
+ unpin_user_pages(page_list, pinned);
+ ret = -ENOMEM;
+ }
+ goto out;
+ }
+ nchunks++;
+
+ if (!last_pfn)
+ map_pfn = page_to_pfn(page_list[0]);
+
+ for (i = 0; i < pinned; i++) {
+ unsigned long this_pfn = page_to_pfn(page_list[i]);
+ u64 csize;
+
+ if (last_pfn && (this_pfn != last_pfn + 1)) {
+ /* Pin a contiguous chunk of memory */
+ csize = PFN_PHYS(last_pfn - map_pfn + 1);
+ ret = vhost_vdpa_map(v, iotlb, iova, csize,
+ PFN_PHYS(map_pfn),
+ perm, NULL);
+ if (ret) {
+ /*
+ * Unpin the pages that are left unmapped
+ * from this point on in the current
+ * page_list. The remaining outstanding
+ * ones which may stride across several
+ * chunks will be covered in the common
+ * error path subsequently.
+ */
+ unpin_user_pages(&page_list[i],
+ pinned - i);
+ goto out;
+ }
+
+ map_pfn = this_pfn;
+ iova += csize;
+ nchunks = 0;
+ }
+
+ last_pfn = this_pfn;
+ }
+
+ cur_base += PFN_PHYS(pinned);
+ npages -= pinned;
+ }
+
+ /* Pin the rest chunk */
+ ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
+ PFN_PHYS(map_pfn), perm, NULL);
+out:
+ if (ret) {
+ if (nchunks) {
+ unsigned long pfn;
+
+ /*
+ * Unpin the outstanding pages which are yet to be
+ * mapped but haven't due to vdpa_map() or
+ * pin_user_pages() failure.
+ *
+ * Mapped pages are accounted in vdpa_map(), hence
+ * the corresponding unpinning will be handled by
+ * vdpa_unmap().
+ */
+ WARN_ON(!last_pfn);
+ for (pfn = map_pfn; pfn <= last_pfn; pfn++)
+ unpin_user_page(pfn_to_page(pfn));
+ }
+ vhost_vdpa_unmap(v, iotlb, start, size);
+ }
+unlock:
+ mmap_read_unlock(dev->mm);
+free:
+ free_page((unsigned long)page_list);
+ return ret;
+
+}
+
+static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb,
+ struct vhost_iotlb_msg *msg)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+
+ if (msg->iova < v->range.first || !msg->size ||
+ msg->iova > U64_MAX - msg->size + 1 ||
+ msg->iova + msg->size - 1 > v->range.last)
+ return -EINVAL;
+
+ if (vhost_iotlb_itree_first(iotlb, msg->iova,
+ msg->iova + msg->size - 1))
+ return -EEXIST;
+
+ if (vdpa->use_va)
+ return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
+ msg->uaddr, msg->perm);
+
+ return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
+ msg->perm);
+}
+
+static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
+ struct vhost_iotlb_msg *msg)
+{
+ struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vhost_iotlb *iotlb = NULL;
+ struct vhost_vdpa_as *as = NULL;
+ int r = 0;
+
+ mutex_lock(&dev->mutex);
+
+ r = vhost_dev_check_owner(dev);
+ if (r)
+ goto unlock;
+
+ if (msg->type == VHOST_IOTLB_UPDATE ||
+ msg->type == VHOST_IOTLB_BATCH_BEGIN) {
+ as = vhost_vdpa_find_alloc_as(v, asid);
+ if (!as) {
+ dev_err(&v->dev, "can't find and alloc asid %d\n",
+ asid);
+ r = -EINVAL;
+ goto unlock;
+ }
+ iotlb = &as->iotlb;
+ } else
+ iotlb = asid_to_iotlb(v, asid);
+
+ if ((v->in_batch && v->batch_asid != asid) || !iotlb) {
+ if (v->in_batch && v->batch_asid != asid) {
+ dev_info(&v->dev, "batch id %d asid %d\n",
+ v->batch_asid, asid);
+ }
+ if (!iotlb)
+ dev_err(&v->dev, "no iotlb for asid %d\n", asid);
+ r = -EINVAL;
+ goto unlock;
+ }
+
+ switch (msg->type) {
+ case VHOST_IOTLB_UPDATE:
+ r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
+ break;
+ case VHOST_IOTLB_INVALIDATE:
+ vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
+ break;
+ case VHOST_IOTLB_BATCH_BEGIN:
+ v->batch_asid = asid;
+ v->in_batch = true;
+ break;
+ case VHOST_IOTLB_BATCH_END:
+ if (v->in_batch && ops->set_map)
+ ops->set_map(vdpa, asid, iotlb);
+ v->in_batch = false;
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+unlock:
+ mutex_unlock(&dev->mutex);
+
+ return r;
+}
+
+static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vdpa *v = file->private_data;
+ struct vhost_dev *dev = &v->vdev;
+
+ return vhost_chr_write_iter(dev, from);
+}
+
+static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ union virtio_map map = vdpa_get_map(vdpa);
+ struct device *dma_dev = map.dma_dev;
+ int ret;
+
+ /* Device want to do DMA by itself */
+ if (ops->set_map || ops->dma_map)
+ return 0;
+
+ if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY)) {
+ dev_warn_once(&v->dev,
+ "Failed to allocate domain, device is not IOMMU cache coherent capable\n");
+ return -ENOTSUPP;
+ }
+
+ v->domain = iommu_paging_domain_alloc(dma_dev);
+ if (IS_ERR(v->domain)) {
+ ret = PTR_ERR(v->domain);
+ v->domain = NULL;
+ return ret;
+ }
+
+ ret = iommu_attach_device(v->domain, dma_dev);
+ if (ret)
+ goto err_attach;
+
+ return 0;
+
+err_attach:
+ iommu_domain_free(v->domain);
+ v->domain = NULL;
+ return ret;
+}
+
+static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ union virtio_map map = vdpa_get_map(vdpa);
+ struct device *dma_dev = map.dma_dev;
+
+ if (v->domain) {
+ iommu_detach_device(v->domain, dma_dev);
+ iommu_domain_free(v->domain);
+ }
+
+ v->domain = NULL;
+}
+
+static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
+{
+ struct vdpa_iova_range *range = &v->range;
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (ops->get_iova_range) {
+ *range = ops->get_iova_range(vdpa);
+ } else if (v->domain && v->domain->geometry.force_aperture) {
+ range->first = v->domain->geometry.aperture_start;
+ range->last = v->domain->geometry.aperture_end;
+ } else {
+ range->first = 0;
+ range->last = ULLONG_MAX;
+ }
+}
+
+static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
+{
+ struct vhost_vdpa_as *as;
+ u32 asid;
+
+ for (asid = 0; asid < v->vdpa->nas; asid++) {
+ as = asid_to_as(v, asid);
+ if (as)
+ vhost_vdpa_remove_as(v, asid);
+ }
+
+ vhost_vdpa_free_domain(v);
+ vhost_dev_cleanup(&v->vdev);
+ kfree(v->vdev.vqs);
+ v->vdev.vqs = NULL;
+}
+
+static int vhost_vdpa_open(struct inode *inode, struct file *filep)
+{
+ struct vhost_vdpa *v;
+ struct vhost_dev *dev;
+ struct vhost_virtqueue **vqs;
+ int r, opened;
+ u32 i, nvqs;
+
+ v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
+
+ opened = atomic_cmpxchg(&v->opened, 0, 1);
+ if (opened)
+ return -EBUSY;
+
+ nvqs = v->nvqs;
+ r = vhost_vdpa_reset(v);
+ if (r)
+ goto err;
+
+ vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
+ if (!vqs) {
+ r = -ENOMEM;
+ goto err;
+ }
+
+ dev = &v->vdev;
+ for (i = 0; i < nvqs; i++) {
+ vqs[i] = &v->vqs[i];
+ vqs[i]->handle_kick = handle_vq_kick;
+ vqs[i]->call_ctx.ctx = NULL;
+ }
+ vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
+ vhost_vdpa_process_iotlb_msg);
+
+ r = vhost_vdpa_alloc_domain(v);
+ if (r)
+ goto err_alloc_domain;
+
+ vhost_vdpa_set_iova_range(v);
+
+ filep->private_data = v;
+
+ return 0;
+
+err_alloc_domain:
+ vhost_vdpa_cleanup(v);
+err:
+ atomic_dec(&v->opened);
+ return r;
+}
+
+static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
+{
+ u32 i;
+
+ for (i = 0; i < v->nvqs; i++)
+ vhost_vdpa_unsetup_vq_irq(v, i);
+}
+
+static int vhost_vdpa_release(struct inode *inode, struct file *filep)
+{
+ struct vhost_vdpa *v = filep->private_data;
+ struct vhost_dev *d = &v->vdev;
+
+ mutex_lock(&d->mutex);
+ filep->private_data = NULL;
+ vhost_vdpa_clean_irq(v);
+ vhost_vdpa_reset(v);
+ vhost_dev_stop(&v->vdev);
+ vhost_vdpa_unbind_mm(v);
+ vhost_vdpa_config_put(v);
+ vhost_vdpa_cleanup(v);
+ mutex_unlock(&d->mutex);
+
+ atomic_dec(&v->opened);
+ complete(&v->completion);
+
+ return 0;
+}
+
+#ifdef CONFIG_MMU
+static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
+{
+ struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vdpa_notification_area notify;
+ struct vm_area_struct *vma = vmf->vma;
+ u16 index = vma->vm_pgoff;
+
+ notify = ops->get_vq_notification(vdpa, index);
+
+ return vmf_insert_pfn(vma, vmf->address & PAGE_MASK, PFN_DOWN(notify.addr));
+}
+
+static const struct vm_operations_struct vhost_vdpa_vm_ops = {
+ .fault = vhost_vdpa_fault,
+};
+
+static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct vhost_vdpa *v = vma->vm_file->private_data;
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vdpa_notification_area notify;
+ unsigned long index = vma->vm_pgoff;
+
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+ if (vma->vm_flags & VM_READ)
+ return -EINVAL;
+ if (index > 65535)
+ return -EINVAL;
+ if (!ops->get_vq_notification)
+ return -ENOTSUPP;
+
+ /* To be safe and easily modelled by userspace, We only
+ * support the doorbell which sits on the page boundary and
+ * does not share the page with other registers.
+ */
+ notify = ops->get_vq_notification(vdpa, index);
+ if (notify.addr & (PAGE_SIZE - 1))
+ return -EINVAL;
+ if (vma->vm_end - vma->vm_start != notify.size)
+ return -ENOTSUPP;
+
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
+ vma->vm_ops = &vhost_vdpa_vm_ops;
+ return 0;
+}
+#endif /* CONFIG_MMU */
+
+static const struct file_operations vhost_vdpa_fops = {
+ .owner = THIS_MODULE,
+ .open = vhost_vdpa_open,
+ .release = vhost_vdpa_release,
+ .write_iter = vhost_vdpa_chr_write_iter,
+ .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
+#ifdef CONFIG_MMU
+ .mmap = vhost_vdpa_mmap,
+#endif /* CONFIG_MMU */
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+static void vhost_vdpa_release_dev(struct device *device)
+{
+ struct vhost_vdpa *v =
+ container_of(device, struct vhost_vdpa, dev);
+
+ ida_free(&vhost_vdpa_ida, v->minor);
+ kfree(v->vqs);
+ kfree(v);
+}
+
+static int vhost_vdpa_probe(struct vdpa_device *vdpa)
+{
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vhost_vdpa *v;
+ int minor;
+ int i, r;
+
+ /* We can't support platform IOMMU device with more than 1
+ * group or as
+ */
+ if (!ops->set_map && !ops->dma_map &&
+ (vdpa->ngroups > 1 || vdpa->nas > 1))
+ return -EOPNOTSUPP;
+
+ v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+ if (!v)
+ return -ENOMEM;
+
+ minor = ida_alloc_max(&vhost_vdpa_ida, VHOST_VDPA_DEV_MAX - 1,
+ GFP_KERNEL);
+ if (minor < 0) {
+ kfree(v);
+ return minor;
+ }
+
+ atomic_set(&v->opened, 0);
+ v->minor = minor;
+ v->vdpa = vdpa;
+ v->nvqs = vdpa->nvqs;
+ v->virtio_id = ops->get_device_id(vdpa);
+
+ device_initialize(&v->dev);
+ v->dev.release = vhost_vdpa_release_dev;
+ v->dev.parent = &vdpa->dev;
+ v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
+ v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
+ GFP_KERNEL);
+ if (!v->vqs) {
+ r = -ENOMEM;
+ goto err;
+ }
+
+ r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
+ if (r)
+ goto err;
+
+ cdev_init(&v->cdev, &vhost_vdpa_fops);
+ v->cdev.owner = THIS_MODULE;
+
+ r = cdev_device_add(&v->cdev, &v->dev);
+ if (r)
+ goto err;
+
+ init_completion(&v->completion);
+ vdpa_set_drvdata(vdpa, v);
+
+ for (i = 0; i < VHOST_VDPA_IOTLB_BUCKETS; i++)
+ INIT_HLIST_HEAD(&v->as[i]);
+
+ return 0;
+
+err:
+ put_device(&v->dev);
+ return r;
+}
+
+static void vhost_vdpa_remove(struct vdpa_device *vdpa)
+{
+ struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
+ int opened;
+
+ cdev_device_del(&v->cdev, &v->dev);
+
+ do {
+ opened = atomic_cmpxchg(&v->opened, 0, 1);
+ if (!opened)
+ break;
+ wait_for_completion(&v->completion);
+ } while (1);
+
+ put_device(&v->dev);
+}
+
+static struct vdpa_driver vhost_vdpa_driver = {
+ .driver = {
+ .name = "vhost_vdpa",
+ },
+ .probe = vhost_vdpa_probe,
+ .remove = vhost_vdpa_remove,
+};
+
+static int __init vhost_vdpa_init(void)
+{
+ int r;
+
+ r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
+ "vhost-vdpa");
+ if (r)
+ goto err_alloc_chrdev;
+
+ r = vdpa_register_driver(&vhost_vdpa_driver);
+ if (r)
+ goto err_vdpa_register_driver;
+
+ return 0;
+
+err_vdpa_register_driver:
+ unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
+err_alloc_chrdev:
+ return r;
+}
+module_init(vhost_vdpa_init);
+
+static void __exit vhost_vdpa_exit(void)
+{
+ vdpa_unregister_driver(&vhost_vdpa_driver);
+ unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
+}
+module_exit(vhost_vdpa_exit);
+
+MODULE_VERSION("0.0.1");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e58cf0001cee..bccdc9eab267 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2009 Red Hat, Inc.
* Copyright (C) 2006 Rusty Russell IBM Corporation
*
@@ -6,36 +7,157 @@
* Inspiration, some code, and most witty comments come from
* Documentation/virtual/lguest/lguest.c, by Rusty Russell
*
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
* Generic code for virtio server in host kernel.
*/
#include <linux/eventfd.h>
#include <linux/vhost.h>
-#include <linux/socket.h> /* memcpy_fromiovec */
+#include <linux/uio.h>
#include <linux/mm.h>
-#include <linux/mmu_context.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
-#include <linux/rcupdate.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/kthread.h>
#include <linux/cgroup.h>
#include <linux/module.h>
+#include <linux/sort.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/vhost_task.h>
+#include <linux/interval_tree_generic.h>
+#include <linux/nospec.h>
+#include <linux/kcov.h>
#include "vhost.h"
+static ushort max_mem_regions = 64;
+module_param(max_mem_regions, ushort, 0444);
+MODULE_PARM_DESC(max_mem_regions,
+ "Maximum number of memory regions in memory map. (default: 64)");
+static int max_iotlb_entries = 2048;
+module_param(max_iotlb_entries, int, 0444);
+MODULE_PARM_DESC(max_iotlb_entries,
+ "Maximum number of iotlb entries. (default: 2048)");
+static bool fork_from_owner_default = VHOST_FORK_OWNER_TASK;
+
+#ifdef CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL
+module_param(fork_from_owner_default, bool, 0444);
+MODULE_PARM_DESC(fork_from_owner_default,
+ "Set task mode as the default(default: Y)");
+#endif
+
enum {
- VHOST_MEMORY_MAX_NREGIONS = 64,
VHOST_MEMORY_F_LOG = 0x1,
};
-#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
-#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
+#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
+#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
+
+#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
+static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
+{
+ vq->user_be = !virtio_legacy_is_little_endian();
+}
+
+static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
+{
+ vq->user_be = true;
+}
+
+static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
+{
+ vq->user_be = false;
+}
+
+static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
+{
+ struct vhost_vring_state s;
+
+ if (vq->private_data)
+ return -EBUSY;
+
+ if (copy_from_user(&s, argp, sizeof(s)))
+ return -EFAULT;
+
+ if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
+ s.num != VHOST_VRING_BIG_ENDIAN)
+ return -EINVAL;
+
+ if (s.num == VHOST_VRING_BIG_ENDIAN)
+ vhost_enable_cross_endian_big(vq);
+ else
+ vhost_enable_cross_endian_little(vq);
+
+ return 0;
+}
+
+static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
+ int __user *argp)
+{
+ struct vhost_vring_state s = {
+ .index = idx,
+ .num = vq->user_be
+ };
+
+ if (copy_to_user(argp, &s, sizeof(s)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static void vhost_init_is_le(struct vhost_virtqueue *vq)
+{
+ /* Note for legacy virtio: user_be is initialized at reset time
+ * according to the host endianness. If userspace does not set an
+ * explicit endianness, the default behavior is native endian, as
+ * expected by legacy virtio.
+ */
+ vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
+}
+#else
+static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
+{
+}
+
+static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
+{
+ return -ENOIOCTLCMD;
+}
+
+static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
+ int __user *argp)
+{
+ return -ENOIOCTLCMD;
+}
+
+static void vhost_init_is_le(struct vhost_virtqueue *vq)
+{
+ vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
+ || virtio_legacy_is_little_endian();
+}
+#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
+
+static void vhost_reset_is_le(struct vhost_virtqueue *vq)
+{
+ vhost_init_is_le(vq);
+}
+
+struct vhost_flush_struct {
+ struct vhost_work work;
+ struct completion wait_event;
+};
+
+static void vhost_flush_work(struct vhost_work *work)
+{
+ struct vhost_flush_struct *s;
+
+ s = container_of(work, struct vhost_flush_struct, work);
+ complete(&s->wait_event);
+}
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
@@ -47,37 +169,41 @@ static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
add_wait_queue(wqh, &poll->wait);
}
-static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
+static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
void *key)
{
struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
+ struct vhost_work *work = &poll->work;
- if (!((unsigned long)key & poll->mask))
+ if (!(key_to_poll(key) & poll->mask))
return 0;
- vhost_poll_queue(poll);
+ if (!poll->dev->use_worker)
+ work->fn(work);
+ else
+ vhost_poll_queue(poll);
+
return 0;
}
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
{
- INIT_LIST_HEAD(&work->node);
+ clear_bit(VHOST_WORK_QUEUED, &work->flags);
work->fn = fn;
- init_waitqueue_head(&work->done);
- work->flushing = 0;
- work->queue_seq = work->done_seq = 0;
}
EXPORT_SYMBOL_GPL(vhost_work_init);
/* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
- unsigned long mask, struct vhost_dev *dev)
+ __poll_t mask, struct vhost_dev *dev,
+ struct vhost_virtqueue *vq)
{
init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
init_poll_funcptr(&poll->table, vhost_poll_func);
poll->mask = mask;
poll->dev = dev;
poll->wqh = NULL;
+ poll->vq = vq;
vhost_work_init(&poll->work, fn);
}
@@ -87,22 +213,20 @@ EXPORT_SYMBOL_GPL(vhost_poll_init);
* keep a reference to a file until after vhost_poll_stop is called. */
int vhost_poll_start(struct vhost_poll *poll, struct file *file)
{
- unsigned long mask;
- int ret = 0;
+ __poll_t mask;
if (poll->wqh)
return 0;
- mask = file->f_op->poll(file, &poll->table);
+ mask = vfs_poll(file, &poll->table);
if (mask)
- vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
- if (mask & POLLERR) {
- if (poll->wqh)
- remove_wait_queue(poll->wqh, &poll->wait);
- ret = -EINVAL;
+ vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
+ if (mask & EPOLLERR) {
+ vhost_poll_stop(poll);
+ return -EINVAL;
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(vhost_poll_start);
@@ -117,62 +241,129 @@ void vhost_poll_stop(struct vhost_poll *poll)
}
EXPORT_SYMBOL_GPL(vhost_poll_stop);
-static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
- unsigned seq)
+static void vhost_worker_queue(struct vhost_worker *worker,
+ struct vhost_work *work)
+{
+ if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
+ /* We can only add the work to the list after we're
+ * sure it was not in the list.
+ * test_and_set_bit() implies a memory barrier.
+ */
+ llist_add(&work->node, &worker->work_list);
+ worker->ops->wakeup(worker);
+ }
+}
+
+bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
{
- int left;
+ struct vhost_worker *worker;
+ bool queued = false;
+
+ rcu_read_lock();
+ worker = rcu_dereference(vq->worker);
+ if (worker) {
+ queued = true;
+ vhost_worker_queue(worker, work);
+ }
+ rcu_read_unlock();
- spin_lock_irq(&dev->work_lock);
- left = seq - work->done_seq;
- spin_unlock_irq(&dev->work_lock);
- return left <= 0;
+ return queued;
}
+EXPORT_SYMBOL_GPL(vhost_vq_work_queue);
-void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
+/**
+ * __vhost_worker_flush - flush a worker
+ * @worker: worker to flush
+ *
+ * The worker's flush_mutex must be held.
+ */
+static void __vhost_worker_flush(struct vhost_worker *worker)
{
- unsigned seq;
- int flushing;
+ struct vhost_flush_struct flush;
- spin_lock_irq(&dev->work_lock);
- seq = work->queue_seq;
- work->flushing++;
- spin_unlock_irq(&dev->work_lock);
- wait_event(work->done, vhost_work_seq_done(dev, work, seq));
- spin_lock_irq(&dev->work_lock);
- flushing = --work->flushing;
- spin_unlock_irq(&dev->work_lock);
- BUG_ON(flushing < 0);
+ if (!worker->attachment_cnt || worker->killed)
+ return;
+
+ init_completion(&flush.wait_event);
+ vhost_work_init(&flush.work, vhost_flush_work);
+
+ vhost_worker_queue(worker, &flush.work);
+ /*
+ * Drop mutex in case our worker is killed and it needs to take the
+ * mutex to force cleanup.
+ */
+ mutex_unlock(&worker->mutex);
+ wait_for_completion(&flush.wait_event);
+ mutex_lock(&worker->mutex);
}
-EXPORT_SYMBOL_GPL(vhost_work_flush);
-/* Flush any work that has been scheduled. When calling this, don't hold any
- * locks that are also used by the callback. */
-void vhost_poll_flush(struct vhost_poll *poll)
+static void vhost_worker_flush(struct vhost_worker *worker)
{
- vhost_work_flush(poll->dev, &poll->work);
+ mutex_lock(&worker->mutex);
+ __vhost_worker_flush(worker);
+ mutex_unlock(&worker->mutex);
}
-EXPORT_SYMBOL_GPL(vhost_poll_flush);
-void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
+void vhost_dev_flush(struct vhost_dev *dev)
{
- unsigned long flags;
+ struct vhost_worker *worker;
+ unsigned long i;
- spin_lock_irqsave(&dev->work_lock, flags);
- if (list_empty(&work->node)) {
- list_add_tail(&work->node, &dev->work_list);
- work->queue_seq++;
- wake_up_process(dev->worker);
- }
- spin_unlock_irqrestore(&dev->work_lock, flags);
+ xa_for_each(&dev->worker_xa, i, worker)
+ vhost_worker_flush(worker);
}
-EXPORT_SYMBOL_GPL(vhost_work_queue);
+EXPORT_SYMBOL_GPL(vhost_dev_flush);
+
+/* A lockless hint for busy polling code to exit the loop */
+bool vhost_vq_has_work(struct vhost_virtqueue *vq)
+{
+ struct vhost_worker *worker;
+ bool has_work = false;
+
+ rcu_read_lock();
+ worker = rcu_dereference(vq->worker);
+ if (worker && !llist_empty(&worker->work_list))
+ has_work = true;
+ rcu_read_unlock();
+
+ return has_work;
+}
+EXPORT_SYMBOL_GPL(vhost_vq_has_work);
void vhost_poll_queue(struct vhost_poll *poll)
{
- vhost_work_queue(poll->dev, &poll->work);
+ vhost_vq_work_queue(poll->vq, &poll->work);
}
EXPORT_SYMBOL_GPL(vhost_poll_queue);
+static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
+{
+ int j;
+
+ for (j = 0; j < VHOST_NUM_ADDRS; j++)
+ vq->meta_iotlb[j] = NULL;
+}
+
+static void vhost_vq_meta_reset(struct vhost_dev *d)
+{
+ int i;
+
+ for (i = 0; i < d->nvqs; ++i)
+ __vhost_vq_meta_reset(d->vqs[i]);
+}
+
+static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
+{
+ call_ctx->ctx = NULL;
+ memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
+}
+
+bool vhost_vq_is_setup(struct vhost_virtqueue *vq)
+{
+ return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq);
+}
+EXPORT_SYMBOL_GPL(vhost_vq_is_setup);
+
static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
@@ -181,6 +372,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->avail = NULL;
vq->used = NULL;
vq->last_avail_idx = 0;
+ vq->next_avail_head = 0;
vq->avail_idx = 0;
vq->last_used_idx = 0;
vq->signalled_used = 0;
@@ -189,64 +381,119 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->log_used = false;
vq->log_addr = -1ull;
vq->private_data = NULL;
+ virtio_features_zero(vq->acked_features_array);
+ vq->acked_backend_features = 0;
vq->log_base = NULL;
vq->error_ctx = NULL;
- vq->error = NULL;
vq->kick = NULL;
- vq->call_ctx = NULL;
- vq->call = NULL;
vq->log_ctx = NULL;
+ vhost_disable_cross_endian(vq);
+ vhost_reset_is_le(vq);
+ vq->busyloop_timeout = 0;
+ vq->umem = NULL;
+ vq->iotlb = NULL;
+ rcu_assign_pointer(vq->worker, NULL);
+ vhost_vring_call_reset(&vq->call_ctx);
+ __vhost_vq_meta_reset(vq);
}
-static int vhost_worker(void *data)
+static int vhost_run_work_kthread_list(void *data)
{
- struct vhost_dev *dev = data;
- struct vhost_work *work = NULL;
- unsigned uninitialized_var(seq);
- mm_segment_t oldfs = get_fs();
+ struct vhost_worker *worker = data;
+ struct vhost_work *work, *work_next;
+ struct vhost_dev *dev = worker->dev;
+ struct llist_node *node;
- set_fs(USER_DS);
- use_mm(dev->mm);
+ kthread_use_mm(dev->mm);
for (;;) {
/* mb paired w/ kthread_stop */
set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irq(&dev->work_lock);
- if (work) {
- work->done_seq = seq;
- if (work->flushing)
- wake_up_all(&work->done);
- }
-
if (kthread_should_stop()) {
- spin_unlock_irq(&dev->work_lock);
__set_current_state(TASK_RUNNING);
break;
}
- if (!list_empty(&dev->work_list)) {
- work = list_first_entry(&dev->work_list,
- struct vhost_work, node);
- list_del_init(&work->node);
- seq = work->queue_seq;
- } else
- work = NULL;
- spin_unlock_irq(&dev->work_lock);
+ node = llist_del_all(&worker->work_list);
+ if (!node)
+ schedule();
- if (work) {
+ node = llist_reverse_order(node);
+ /* make sure flag is seen after deletion */
+ smp_wmb();
+ llist_for_each_entry_safe(work, work_next, node, node) {
+ clear_bit(VHOST_WORK_QUEUED, &work->flags);
__set_current_state(TASK_RUNNING);
+ kcov_remote_start_common(worker->kcov_handle);
work->fn(work);
- if (need_resched())
- schedule();
- } else
- schedule();
-
+ kcov_remote_stop();
+ cond_resched();
+ }
}
- unuse_mm(dev->mm);
- set_fs(oldfs);
+ kthread_unuse_mm(dev->mm);
+
return 0;
}
+static bool vhost_run_work_list(void *data)
+{
+ struct vhost_worker *worker = data;
+ struct vhost_work *work, *work_next;
+ struct llist_node *node;
+
+ node = llist_del_all(&worker->work_list);
+ if (node) {
+ __set_current_state(TASK_RUNNING);
+
+ node = llist_reverse_order(node);
+ /* make sure flag is seen after deletion */
+ smp_wmb();
+ llist_for_each_entry_safe(work, work_next, node, node) {
+ clear_bit(VHOST_WORK_QUEUED, &work->flags);
+ kcov_remote_start_common(worker->kcov_handle);
+ work->fn(work);
+ kcov_remote_stop();
+ cond_resched();
+ }
+ }
+
+ return !!node;
+}
+
+static void vhost_worker_killed(void *data)
+{
+ struct vhost_worker *worker = data;
+ struct vhost_dev *dev = worker->dev;
+ struct vhost_virtqueue *vq;
+ int i, attach_cnt = 0;
+
+ mutex_lock(&worker->mutex);
+ worker->killed = true;
+
+ for (i = 0; i < dev->nvqs; i++) {
+ vq = dev->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ if (worker ==
+ rcu_dereference_check(vq->worker,
+ lockdep_is_held(&vq->mutex))) {
+ rcu_assign_pointer(vq->worker, NULL);
+ attach_cnt++;
+ }
+ mutex_unlock(&vq->mutex);
+ }
+
+ worker->attachment_cnt -= attach_cnt;
+ if (attach_cnt)
+ synchronize_rcu();
+ /*
+ * Finish vhost_worker_flush calls and any other works that snuck in
+ * before the synchronize_rcu.
+ */
+ vhost_run_work_list(worker);
+ mutex_unlock(&worker->mutex);
+}
+
static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
{
kfree(vq->indirect);
@@ -255,6 +502,8 @@ static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
vq->log = NULL;
kfree(vq->heads);
vq->heads = NULL;
+ kfree(vq->nheads);
+ vq->nheads = NULL;
}
/* Helper to allocate iovec buffers for all vqs. */
@@ -265,11 +514,16 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
- vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV,
- GFP_KERNEL);
- vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL);
- vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL);
- if (!vq->indirect || !vq->log || !vq->heads)
+ vq->indirect = kmalloc_array(UIO_MAXIOV,
+ sizeof(*vq->indirect),
+ GFP_KERNEL);
+ vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
+ GFP_KERNEL);
+ vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
+ GFP_KERNEL);
+ vq->nheads = kmalloc_array(dev->iov_limit, sizeof(*vq->nheads),
+ GFP_KERNEL);
+ if (!vq->indirect || !vq->log || !vq->heads || !vq->nheads)
goto err_nomem;
}
return 0;
@@ -288,8 +542,51 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
vhost_vq_free_iovecs(dev->vqs[i]);
}
-long vhost_dev_init(struct vhost_dev *dev,
- struct vhost_virtqueue **vqs, int nvqs)
+bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
+ int pkts, int total_len)
+{
+ struct vhost_dev *dev = vq->dev;
+
+ if ((dev->byte_weight && total_len >= dev->byte_weight) ||
+ pkts >= dev->weight) {
+ vhost_poll_queue(&vq->poll);
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
+
+static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
+ unsigned int num)
+{
+ size_t event __maybe_unused =
+ vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+
+ return size_add(struct_size(vq->avail, ring, num), event);
+}
+
+static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
+ unsigned int num)
+{
+ size_t event __maybe_unused =
+ vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+
+ return size_add(struct_size(vq->used, ring, num), event);
+}
+
+static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
+ unsigned int num)
+{
+ return sizeof(*vq->desc) * num;
+}
+
+void vhost_dev_init(struct vhost_dev *dev,
+ struct vhost_virtqueue **vqs, int nvqs,
+ int iov_limit, int weight, int byte_weight,
+ bool use_worker,
+ int (*msg_handler)(struct vhost_dev *dev, u32 asid,
+ struct vhost_iotlb_msg *msg))
{
struct vhost_virtqueue *vq;
int i;
@@ -298,27 +595,34 @@ long vhost_dev_init(struct vhost_dev *dev,
dev->nvqs = nvqs;
mutex_init(&dev->mutex);
dev->log_ctx = NULL;
- dev->log_file = NULL;
- dev->memory = NULL;
+ dev->umem = NULL;
+ dev->iotlb = NULL;
dev->mm = NULL;
- spin_lock_init(&dev->work_lock);
- INIT_LIST_HEAD(&dev->work_list);
- dev->worker = NULL;
+ dev->iov_limit = iov_limit;
+ dev->weight = weight;
+ dev->byte_weight = byte_weight;
+ dev->use_worker = use_worker;
+ dev->msg_handler = msg_handler;
+ dev->fork_owner = fork_from_owner_default;
+ init_waitqueue_head(&dev->wait);
+ INIT_LIST_HEAD(&dev->read_list);
+ INIT_LIST_HEAD(&dev->pending_list);
+ spin_lock_init(&dev->iotlb_lock);
+ xa_init_flags(&dev->worker_xa, XA_FLAGS_ALLOC);
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
vq->log = NULL;
vq->indirect = NULL;
vq->heads = NULL;
+ vq->nheads = NULL;
vq->dev = dev;
mutex_init(&vq->mutex);
vhost_vq_reset(dev, vq);
if (vq->handle_kick)
vhost_poll_init(&vq->poll, vq->handle_kick,
- POLLIN, dev);
+ EPOLLIN, dev, vq);
}
-
- return 0;
}
EXPORT_SYMBOL_GPL(vhost_dev_init);
@@ -344,14 +648,29 @@ static void vhost_attach_cgroups_work(struct vhost_work *work)
s->ret = cgroup_attach_task_all(s->owner, current);
}
-static int vhost_attach_cgroups(struct vhost_dev *dev)
+static int vhost_attach_task_to_cgroups(struct vhost_worker *worker)
{
struct vhost_attach_cgroups_struct attach;
+ int saved_cnt;
attach.owner = current;
+
vhost_work_init(&attach.work, vhost_attach_cgroups_work);
- vhost_work_queue(dev, &attach.work);
- vhost_work_flush(dev, &attach.work);
+ vhost_worker_queue(worker, &attach.work);
+
+ mutex_lock(&worker->mutex);
+
+ /*
+ * Bypass attachment_cnt check in __vhost_worker_flush:
+ * Temporarily change it to INT_MAX to bypass the check
+ */
+ saved_cnt = worker->attachment_cnt;
+ worker->attachment_cnt = INT_MAX;
+ __vhost_worker_flush(worker);
+ worker->attachment_cnt = saved_cnt;
+
+ mutex_unlock(&worker->mutex);
+
return attach.ret;
}
@@ -362,11 +681,423 @@ bool vhost_dev_has_owner(struct vhost_dev *dev)
}
EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
+static void vhost_attach_mm(struct vhost_dev *dev)
+{
+ /* No owner, become one */
+ if (dev->use_worker) {
+ dev->mm = get_task_mm(current);
+ } else {
+ /* vDPA device does not use worker thread, so there's
+ * no need to hold the address space for mm. This helps
+ * to avoid deadlock in the case of mmap() which may
+ * hold the refcnt of the file and depends on release
+ * method to remove vma.
+ */
+ dev->mm = current->mm;
+ mmgrab(dev->mm);
+ }
+}
+
+static void vhost_detach_mm(struct vhost_dev *dev)
+{
+ if (!dev->mm)
+ return;
+
+ if (dev->use_worker)
+ mmput(dev->mm);
+ else
+ mmdrop(dev->mm);
+
+ dev->mm = NULL;
+}
+
+static void vhost_worker_destroy(struct vhost_dev *dev,
+ struct vhost_worker *worker)
+{
+ if (!worker)
+ return;
+
+ WARN_ON(!llist_empty(&worker->work_list));
+ xa_erase(&dev->worker_xa, worker->id);
+ worker->ops->stop(worker);
+ kfree(worker);
+}
+
+static void vhost_workers_free(struct vhost_dev *dev)
+{
+ struct vhost_worker *worker;
+ unsigned long i;
+
+ if (!dev->use_worker)
+ return;
+
+ for (i = 0; i < dev->nvqs; i++)
+ rcu_assign_pointer(dev->vqs[i]->worker, NULL);
+ /*
+ * Free the default worker we created and cleanup workers userspace
+ * created but couldn't clean up (it forgot or crashed).
+ */
+ xa_for_each(&dev->worker_xa, i, worker)
+ vhost_worker_destroy(dev, worker);
+ xa_destroy(&dev->worker_xa);
+}
+
+static void vhost_task_wakeup(struct vhost_worker *worker)
+{
+ return vhost_task_wake(worker->vtsk);
+}
+
+static void vhost_kthread_wakeup(struct vhost_worker *worker)
+{
+ wake_up_process(worker->kthread_task);
+}
+
+static void vhost_task_do_stop(struct vhost_worker *worker)
+{
+ return vhost_task_stop(worker->vtsk);
+}
+
+static void vhost_kthread_do_stop(struct vhost_worker *worker)
+{
+ kthread_stop(worker->kthread_task);
+}
+
+static int vhost_task_worker_create(struct vhost_worker *worker,
+ struct vhost_dev *dev, const char *name)
+{
+ struct vhost_task *vtsk;
+ u32 id;
+ int ret;
+
+ vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
+ worker, name);
+ if (IS_ERR(vtsk))
+ return PTR_ERR(vtsk);
+
+ worker->vtsk = vtsk;
+ vhost_task_start(vtsk);
+ ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
+ if (ret < 0) {
+ vhost_task_do_stop(worker);
+ return ret;
+ }
+ worker->id = id;
+ return 0;
+}
+
+static int vhost_kthread_worker_create(struct vhost_worker *worker,
+ struct vhost_dev *dev, const char *name)
+{
+ struct task_struct *task;
+ u32 id;
+ int ret;
+
+ task = kthread_create(vhost_run_work_kthread_list, worker, "%s", name);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ worker->kthread_task = task;
+ wake_up_process(task);
+ ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
+ if (ret < 0)
+ goto stop_worker;
+
+ ret = vhost_attach_task_to_cgroups(worker);
+ if (ret)
+ goto free_id;
+
+ worker->id = id;
+ return 0;
+
+free_id:
+ xa_erase(&dev->worker_xa, id);
+stop_worker:
+ vhost_kthread_do_stop(worker);
+ return ret;
+}
+
+static const struct vhost_worker_ops kthread_ops = {
+ .create = vhost_kthread_worker_create,
+ .stop = vhost_kthread_do_stop,
+ .wakeup = vhost_kthread_wakeup,
+};
+
+static const struct vhost_worker_ops vhost_task_ops = {
+ .create = vhost_task_worker_create,
+ .stop = vhost_task_do_stop,
+ .wakeup = vhost_task_wakeup,
+};
+
+static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
+{
+ struct vhost_worker *worker;
+ char name[TASK_COMM_LEN];
+ int ret;
+ const struct vhost_worker_ops *ops = dev->fork_owner ? &vhost_task_ops :
+ &kthread_ops;
+
+ worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
+ if (!worker)
+ return NULL;
+
+ worker->dev = dev;
+ worker->ops = ops;
+ snprintf(name, sizeof(name), "vhost-%d", current->pid);
+
+ mutex_init(&worker->mutex);
+ init_llist_head(&worker->work_list);
+ worker->kcov_handle = kcov_common_handle();
+ ret = ops->create(worker, dev, name);
+ if (ret < 0)
+ goto free_worker;
+
+ return worker;
+
+free_worker:
+ kfree(worker);
+ return NULL;
+}
+
+/* Caller must have device mutex */
+static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
+ struct vhost_worker *worker)
+{
+ struct vhost_worker *old_worker;
+
+ mutex_lock(&worker->mutex);
+ if (worker->killed) {
+ mutex_unlock(&worker->mutex);
+ return;
+ }
+
+ mutex_lock(&vq->mutex);
+
+ old_worker = rcu_dereference_check(vq->worker,
+ lockdep_is_held(&vq->mutex));
+ rcu_assign_pointer(vq->worker, worker);
+ worker->attachment_cnt++;
+
+ if (!old_worker) {
+ mutex_unlock(&vq->mutex);
+ mutex_unlock(&worker->mutex);
+ return;
+ }
+ mutex_unlock(&vq->mutex);
+ mutex_unlock(&worker->mutex);
+
+ /*
+ * Take the worker mutex to make sure we see the work queued from
+ * device wide flushes which doesn't use RCU for execution.
+ */
+ mutex_lock(&old_worker->mutex);
+ if (old_worker->killed) {
+ mutex_unlock(&old_worker->mutex);
+ return;
+ }
+
+ /*
+ * We don't want to call synchronize_rcu for every vq during setup
+ * because it will slow down VM startup. If we haven't done
+ * VHOST_SET_VRING_KICK and not done the driver specific
+ * SET_ENDPOINT/RUNNING then we can skip the sync since there will
+ * not be any works queued for scsi and net.
+ */
+ mutex_lock(&vq->mutex);
+ if (!vhost_vq_get_backend(vq) && !vq->kick) {
+ mutex_unlock(&vq->mutex);
+
+ old_worker->attachment_cnt--;
+ mutex_unlock(&old_worker->mutex);
+ /*
+ * vsock can queue anytime after VHOST_VSOCK_SET_GUEST_CID.
+ * Warn if it adds support for multiple workers but forgets to
+ * handle the early queueing case.
+ */
+ WARN_ON(!old_worker->attachment_cnt &&
+ !llist_empty(&old_worker->work_list));
+ return;
+ }
+ mutex_unlock(&vq->mutex);
+
+ /* Make sure new vq queue/flush/poll calls see the new worker */
+ synchronize_rcu();
+ /* Make sure whatever was queued gets run */
+ __vhost_worker_flush(old_worker);
+ old_worker->attachment_cnt--;
+ mutex_unlock(&old_worker->mutex);
+}
+
+ /* Caller must have device mutex */
+static int vhost_vq_attach_worker(struct vhost_virtqueue *vq,
+ struct vhost_vring_worker *info)
+{
+ unsigned long index = info->worker_id;
+ struct vhost_dev *dev = vq->dev;
+ struct vhost_worker *worker;
+
+ if (!dev->use_worker)
+ return -EINVAL;
+
+ worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT);
+ if (!worker || worker->id != info->worker_id)
+ return -ENODEV;
+
+ __vhost_vq_attach_worker(vq, worker);
+ return 0;
+}
+
+/* Caller must have device mutex */
+static int vhost_new_worker(struct vhost_dev *dev,
+ struct vhost_worker_state *info)
+{
+ struct vhost_worker *worker;
+
+ worker = vhost_worker_create(dev);
+ if (!worker)
+ return -ENOMEM;
+
+ info->worker_id = worker->id;
+ return 0;
+}
+
+/* Caller must have device mutex */
+static int vhost_free_worker(struct vhost_dev *dev,
+ struct vhost_worker_state *info)
+{
+ unsigned long index = info->worker_id;
+ struct vhost_worker *worker;
+
+ worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT);
+ if (!worker || worker->id != info->worker_id)
+ return -ENODEV;
+
+ mutex_lock(&worker->mutex);
+ if (worker->attachment_cnt || worker->killed) {
+ mutex_unlock(&worker->mutex);
+ return -EBUSY;
+ }
+ /*
+ * A flush might have raced and snuck in before attachment_cnt was set
+ * to zero. Make sure flushes are flushed from the queue before
+ * freeing.
+ */
+ __vhost_worker_flush(worker);
+ mutex_unlock(&worker->mutex);
+
+ vhost_worker_destroy(dev, worker);
+ return 0;
+}
+
+static int vhost_get_vq_from_user(struct vhost_dev *dev, void __user *argp,
+ struct vhost_virtqueue **vq, u32 *id)
+{
+ u32 __user *idxp = argp;
+ u32 idx;
+ long r;
+
+ r = get_user(idx, idxp);
+ if (r < 0)
+ return r;
+
+ if (idx >= dev->nvqs)
+ return -ENOBUFS;
+
+ idx = array_index_nospec(idx, dev->nvqs);
+
+ *vq = dev->vqs[idx];
+ *id = idx;
+ return 0;
+}
+
+/* Caller must have device mutex */
+long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl,
+ void __user *argp)
+{
+ struct vhost_vring_worker ring_worker;
+ struct vhost_worker_state state;
+ struct vhost_worker *worker;
+ struct vhost_virtqueue *vq;
+ long ret;
+ u32 idx;
+
+ if (!dev->use_worker)
+ return -EINVAL;
+
+ if (!vhost_dev_has_owner(dev))
+ return -EINVAL;
+
+ ret = vhost_dev_check_owner(dev);
+ if (ret)
+ return ret;
+
+ switch (ioctl) {
+ /* dev worker ioctls */
+ case VHOST_NEW_WORKER:
+ /*
+ * vhost_tasks will account for worker threads under the parent's
+ * NPROC value but kthreads do not. To avoid userspace overflowing
+ * the system with worker threads fork_owner must be true.
+ */
+ if (!dev->fork_owner)
+ return -EFAULT;
+
+ ret = vhost_new_worker(dev, &state);
+ if (!ret && copy_to_user(argp, &state, sizeof(state)))
+ ret = -EFAULT;
+ return ret;
+ case VHOST_FREE_WORKER:
+ if (copy_from_user(&state, argp, sizeof(state)))
+ return -EFAULT;
+ return vhost_free_worker(dev, &state);
+ /* vring worker ioctls */
+ case VHOST_ATTACH_VRING_WORKER:
+ case VHOST_GET_VRING_WORKER:
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ ret = vhost_get_vq_from_user(dev, argp, &vq, &idx);
+ if (ret)
+ return ret;
+
+ switch (ioctl) {
+ case VHOST_ATTACH_VRING_WORKER:
+ if (copy_from_user(&ring_worker, argp, sizeof(ring_worker))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = vhost_vq_attach_worker(vq, &ring_worker);
+ break;
+ case VHOST_GET_VRING_WORKER:
+ worker = rcu_dereference_check(vq->worker,
+ lockdep_is_held(&dev->mutex));
+ if (!worker) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ring_worker.index = idx;
+ ring_worker.worker_id = worker->id;
+
+ if (copy_to_user(argp, &ring_worker, sizeof(ring_worker)))
+ ret = -EFAULT;
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vhost_worker_ioctl);
+
/* Caller should have device mutex */
long vhost_dev_set_owner(struct vhost_dev *dev)
{
- struct task_struct *worker;
- int err;
+ struct vhost_worker *worker;
+ int err, i;
/* Is there an owner already? */
if (vhost_dev_has_owner(dev)) {
@@ -374,52 +1105,66 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
goto err_mm;
}
- /* No owner, become one */
- dev->mm = get_task_mm(current);
- worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
- if (IS_ERR(worker)) {
- err = PTR_ERR(worker);
- goto err_worker;
- }
-
- dev->worker = worker;
- wake_up_process(worker); /* avoid contributing to loadavg */
-
- err = vhost_attach_cgroups(dev);
- if (err)
- goto err_cgroup;
+ vhost_attach_mm(dev);
err = vhost_dev_alloc_iovecs(dev);
if (err)
- goto err_cgroup;
+ goto err_iovecs;
+
+ if (dev->use_worker) {
+ /*
+ * This should be done last, because vsock can queue work
+ * before VHOST_SET_OWNER so it simplifies the failure path
+ * below since we don't have to worry about vsock queueing
+ * while we free the worker.
+ */
+ worker = vhost_worker_create(dev);
+ if (!worker) {
+ err = -ENOMEM;
+ goto err_worker;
+ }
+
+ for (i = 0; i < dev->nvqs; i++)
+ __vhost_vq_attach_worker(dev->vqs[i], worker);
+ }
return 0;
-err_cgroup:
- kthread_stop(worker);
- dev->worker = NULL;
+
err_worker:
- if (dev->mm)
- mmput(dev->mm);
- dev->mm = NULL;
+ vhost_dev_free_iovecs(dev);
+err_iovecs:
+ vhost_detach_mm(dev);
err_mm:
return err;
}
EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
-struct vhost_memory *vhost_dev_reset_owner_prepare(void)
+static struct vhost_iotlb *iotlb_alloc(void)
+{
+ return vhost_iotlb_alloc(max_iotlb_entries,
+ VHOST_IOTLB_FLAG_RETIRE);
+}
+
+struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
{
- return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
+ return iotlb_alloc();
}
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
/* Caller should have device mutex */
-void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
+void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
{
- vhost_dev_cleanup(dev, true);
+ int i;
+
+ vhost_dev_cleanup(dev);
- /* Restore memory to default empty mapping. */
- memory->nregions = 0;
- RCU_INIT_POINTER(dev->memory, memory);
+ dev->fork_owner = fork_from_owner_default;
+ dev->umem = umem;
+ /* We don't need VQ locks below since vhost_dev_cleanup makes sure
+ * VQs aren't running.
+ */
+ for (i = 0; i < dev->nvqs; ++i)
+ dev->vqs[i]->umem = umem;
}
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
@@ -428,243 +1173,988 @@ void vhost_dev_stop(struct vhost_dev *dev)
int i;
for (i = 0; i < dev->nvqs; ++i) {
- if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
+ if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick)
vhost_poll_stop(&dev->vqs[i]->poll);
- vhost_poll_flush(&dev->vqs[i]->poll);
- }
}
+
+ vhost_dev_flush(dev);
}
EXPORT_SYMBOL_GPL(vhost_dev_stop);
-/* Caller should have device mutex if and only if locked is set */
-void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
+void vhost_clear_msg(struct vhost_dev *dev)
+{
+ struct vhost_msg_node *node, *n;
+
+ spin_lock(&dev->iotlb_lock);
+
+ list_for_each_entry_safe(node, n, &dev->read_list, node) {
+ list_del(&node->node);
+ kfree(node);
+ }
+
+ list_for_each_entry_safe(node, n, &dev->pending_list, node) {
+ list_del(&node->node);
+ kfree(node);
+ }
+
+ spin_unlock(&dev->iotlb_lock);
+}
+EXPORT_SYMBOL_GPL(vhost_clear_msg);
+
+void vhost_dev_cleanup(struct vhost_dev *dev)
{
int i;
for (i = 0; i < dev->nvqs; ++i) {
if (dev->vqs[i]->error_ctx)
eventfd_ctx_put(dev->vqs[i]->error_ctx);
- if (dev->vqs[i]->error)
- fput(dev->vqs[i]->error);
if (dev->vqs[i]->kick)
fput(dev->vqs[i]->kick);
- if (dev->vqs[i]->call_ctx)
- eventfd_ctx_put(dev->vqs[i]->call_ctx);
- if (dev->vqs[i]->call)
- fput(dev->vqs[i]->call);
+ if (dev->vqs[i]->call_ctx.ctx)
+ eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
vhost_vq_reset(dev, dev->vqs[i]);
}
vhost_dev_free_iovecs(dev);
if (dev->log_ctx)
eventfd_ctx_put(dev->log_ctx);
dev->log_ctx = NULL;
- if (dev->log_file)
- fput(dev->log_file);
- dev->log_file = NULL;
/* No one will access memory at this point */
- kfree(rcu_dereference_protected(dev->memory,
- locked ==
- lockdep_is_held(&dev->mutex)));
- RCU_INIT_POINTER(dev->memory, NULL);
- WARN_ON(!list_empty(&dev->work_list));
- if (dev->worker) {
- kthread_stop(dev->worker);
- dev->worker = NULL;
- }
- if (dev->mm)
- mmput(dev->mm);
- dev->mm = NULL;
+ vhost_iotlb_free(dev->umem);
+ dev->umem = NULL;
+ vhost_iotlb_free(dev->iotlb);
+ dev->iotlb = NULL;
+ vhost_clear_msg(dev);
+ wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
+ vhost_workers_free(dev);
+ vhost_detach_mm(dev);
}
EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
-static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
+static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
{
u64 a = addr / VHOST_PAGE_SIZE / 8;
/* Make sure 64 bit math will not overflow. */
if (a > ULONG_MAX - (unsigned long)log_base ||
a + (unsigned long)log_base > ULONG_MAX)
- return 0;
+ return false;
- return access_ok(VERIFY_WRITE, log_base + a,
+ return access_ok(log_base + a,
(sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
}
+/* Make sure 64 bit math will not overflow. */
+static bool vhost_overflow(u64 uaddr, u64 size)
+{
+ if (uaddr > ULONG_MAX || size > ULONG_MAX)
+ return true;
+
+ if (!size)
+ return false;
+
+ return uaddr > ULONG_MAX - size + 1;
+}
+
/* Caller should have vq mutex and device mutex. */
-static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
- int log_all)
+static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
+ int log_all)
{
- int i;
+ struct vhost_iotlb_map *map;
- if (!mem)
- return 0;
+ if (!umem)
+ return false;
+
+ list_for_each_entry(map, &umem->list, link) {
+ unsigned long a = map->addr;
+
+ if (vhost_overflow(map->addr, map->size))
+ return false;
- for (i = 0; i < mem->nregions; ++i) {
- struct vhost_memory_region *m = mem->regions + i;
- unsigned long a = m->userspace_addr;
- if (m->memory_size > ULONG_MAX)
- return 0;
- else if (!access_ok(VERIFY_WRITE, (void __user *)a,
- m->memory_size))
- return 0;
+
+ if (!access_ok((void __user *)a, map->size))
+ return false;
else if (log_all && !log_access_ok(log_base,
- m->guest_phys_addr,
- m->memory_size))
- return 0;
+ map->start,
+ map->size))
+ return false;
}
- return 1;
+ return true;
+}
+
+static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
+ u64 addr, unsigned int size,
+ int type)
+{
+ const struct vhost_iotlb_map *map = vq->meta_iotlb[type];
+
+ if (!map)
+ return NULL;
+
+ return (void __user *)(uintptr_t)(map->addr + addr - map->start);
}
/* Can we switch to this memory table? */
/* Caller should have device mutex but not vq mutex */
-static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
- int log_all)
+static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
+ int log_all)
{
int i;
for (i = 0; i < d->nvqs; ++i) {
- int ok;
+ bool ok;
+ bool log;
+
mutex_lock(&d->vqs[i]->mutex);
+ log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
/* If ring is inactive, will check when it's enabled. */
if (d->vqs[i]->private_data)
- ok = vq_memory_access_ok(d->vqs[i]->log_base, mem,
- log_all);
+ ok = vq_memory_access_ok(d->vqs[i]->log_base,
+ umem, log);
else
- ok = 1;
+ ok = true;
mutex_unlock(&d->vqs[i]->mutex);
if (!ok)
- return 0;
+ return false;
+ }
+ return true;
+}
+
+static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
+ struct iovec iov[], int iov_size, int access);
+
+static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
+ const void *from, unsigned size)
+{
+ int ret;
+
+ if (!vq->iotlb)
+ return __copy_to_user(to, from, size);
+ else {
+ /* This function should be called after iotlb
+ * prefetch, which means we're sure that all vq
+ * could be access through iotlb. So -EAGAIN should
+ * not happen in this case.
+ */
+ struct iov_iter t;
+ void __user *uaddr = vhost_vq_meta_fetch(vq,
+ (u64)(uintptr_t)to, size,
+ VHOST_ADDR_USED);
+
+ if (uaddr)
+ return __copy_to_user(uaddr, from, size);
+
+ ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
+ ARRAY_SIZE(vq->iotlb_iov),
+ VHOST_ACCESS_WO);
+ if (ret < 0)
+ goto out;
+ iov_iter_init(&t, ITER_DEST, vq->iotlb_iov, ret, size);
+ ret = copy_to_iter(from, size, &t);
+ if (ret == size)
+ ret = 0;
+ }
+out:
+ return ret;
+}
+
+static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
+ void __user *from, unsigned size)
+{
+ int ret;
+
+ if (!vq->iotlb)
+ return __copy_from_user(to, from, size);
+ else {
+ /* This function should be called after iotlb
+ * prefetch, which means we're sure that vq
+ * could be access through iotlb. So -EAGAIN should
+ * not happen in this case.
+ */
+ void __user *uaddr = vhost_vq_meta_fetch(vq,
+ (u64)(uintptr_t)from, size,
+ VHOST_ADDR_DESC);
+ struct iov_iter f;
+
+ if (uaddr)
+ return __copy_from_user(to, uaddr, size);
+
+ ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
+ ARRAY_SIZE(vq->iotlb_iov),
+ VHOST_ACCESS_RO);
+ if (ret < 0) {
+ vq_err(vq, "IOTLB translation failure: uaddr "
+ "%p size 0x%llx\n", from,
+ (unsigned long long) size);
+ goto out;
+ }
+ iov_iter_init(&f, ITER_SOURCE, vq->iotlb_iov, ret, size);
+ ret = copy_from_iter(to, size, &f);
+ if (ret == size)
+ ret = 0;
+ }
+
+out:
+ return ret;
+}
+
+static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
+ void __user *addr, unsigned int size,
+ int type)
+{
+ int ret;
+
+ ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
+ ARRAY_SIZE(vq->iotlb_iov),
+ VHOST_ACCESS_RO);
+ if (ret < 0) {
+ vq_err(vq, "IOTLB translation failure: uaddr "
+ "%p size 0x%llx\n", addr,
+ (unsigned long long) size);
+ return NULL;
+ }
+
+ if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
+ vq_err(vq, "Non atomic userspace memory access: uaddr "
+ "%p size 0x%llx\n", addr,
+ (unsigned long long) size);
+ return NULL;
}
+
+ return vq->iotlb_iov[0].iov_base;
+}
+
+/* This function should be called after iotlb
+ * prefetch, which means we're sure that vq
+ * could be access through iotlb. So -EAGAIN should
+ * not happen in this case.
+ */
+static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
+ void __user *addr, unsigned int size,
+ int type)
+{
+ void __user *uaddr = vhost_vq_meta_fetch(vq,
+ (u64)(uintptr_t)addr, size, type);
+ if (uaddr)
+ return uaddr;
+
+ return __vhost_get_user_slow(vq, addr, size, type);
+}
+
+#define vhost_put_user(vq, x, ptr) \
+({ \
+ int ret; \
+ if (!vq->iotlb) { \
+ ret = __put_user(x, ptr); \
+ } else { \
+ __typeof__(ptr) to = \
+ (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
+ sizeof(*ptr), VHOST_ADDR_USED); \
+ if (to != NULL) \
+ ret = __put_user(x, to); \
+ else \
+ ret = -EFAULT; \
+ } \
+ ret; \
+})
+
+static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
+{
+ return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
+ vhost_avail_event(vq));
+}
+
+static inline int vhost_put_used(struct vhost_virtqueue *vq,
+ struct vring_used_elem *head, int idx,
+ int count)
+{
+ return vhost_copy_to_user(vq, vq->used->ring + idx, head,
+ count * sizeof(*head));
+}
+
+static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
+
+{
+ return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
+ &vq->used->flags);
+}
+
+static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
+
+{
+ return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
+ &vq->used->idx);
+}
+
+#define vhost_get_user(vq, x, ptr, type) \
+({ \
+ int ret; \
+ if (!vq->iotlb) { \
+ ret = __get_user(x, ptr); \
+ } else { \
+ __typeof__(ptr) from = \
+ (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
+ sizeof(*ptr), \
+ type); \
+ if (from != NULL) \
+ ret = __get_user(x, from); \
+ else \
+ ret = -EFAULT; \
+ } \
+ ret; \
+})
+
+#define vhost_get_avail(vq, x, ptr) \
+ vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
+
+#define vhost_get_used(vq, x, ptr) \
+ vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
+
+static void vhost_dev_lock_vqs(struct vhost_dev *d)
+{
+ int i = 0;
+ for (i = 0; i < d->nvqs; ++i)
+ mutex_lock_nested(&d->vqs[i]->mutex, i);
+}
+
+static void vhost_dev_unlock_vqs(struct vhost_dev *d)
+{
+ int i = 0;
+ for (i = 0; i < d->nvqs; ++i)
+ mutex_unlock(&d->vqs[i]->mutex);
+}
+
+static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq)
+{
+ __virtio16 idx;
+ int r;
+
+ r = vhost_get_avail(vq, idx, &vq->avail->idx);
+ if (unlikely(r < 0)) {
+ vq_err(vq, "Failed to access available index at %p (%d)\n",
+ &vq->avail->idx, r);
+ return r;
+ }
+
+ /* Check it isn't doing very strange thing with available indexes */
+ vq->avail_idx = vhost16_to_cpu(vq, idx);
+ if (unlikely((u16)(vq->avail_idx - vq->last_avail_idx) > vq->num)) {
+ vq_err(vq, "Invalid available index change from %u to %u",
+ vq->last_avail_idx, vq->avail_idx);
+ return -EINVAL;
+ }
+
+ /* We're done if there is nothing new */
+ if (vq->avail_idx == vq->last_avail_idx)
+ return 0;
+
+ /*
+ * We updated vq->avail_idx so we need a memory barrier between
+ * the index read above and the caller reading avail ring entries.
+ */
+ smp_rmb();
return 1;
}
-static int vq_access_ok(struct vhost_dev *d, unsigned int num,
- struct vring_desc __user *desc,
- struct vring_avail __user *avail,
- struct vring_used __user *used)
+static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
+ __virtio16 *head, int idx)
+{
+ return vhost_get_avail(vq, *head,
+ &vq->avail->ring[idx & (vq->num - 1)]);
+}
+
+static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
+ __virtio16 *flags)
+{
+ return vhost_get_avail(vq, *flags, &vq->avail->flags);
+}
+
+static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
+ __virtio16 *event)
+{
+ return vhost_get_avail(vq, *event, vhost_used_event(vq));
+}
+
+static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
+ __virtio16 *idx)
+{
+ return vhost_get_used(vq, *idx, &vq->used->idx);
+}
+
+static inline int vhost_get_desc(struct vhost_virtqueue *vq,
+ struct vring_desc *desc, int idx)
+{
+ return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
+}
+
+static void vhost_iotlb_notify_vq(struct vhost_dev *d,
+ struct vhost_iotlb_msg *msg)
+{
+ struct vhost_msg_node *node, *n;
+
+ spin_lock(&d->iotlb_lock);
+
+ list_for_each_entry_safe(node, n, &d->pending_list, node) {
+ struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
+ if (msg->iova <= vq_msg->iova &&
+ msg->iova + msg->size - 1 >= vq_msg->iova &&
+ vq_msg->type == VHOST_IOTLB_MISS) {
+ vhost_poll_queue(&node->vq->poll);
+ list_del(&node->node);
+ kfree(node);
+ }
+ }
+
+ spin_unlock(&d->iotlb_lock);
+}
+
+static bool umem_access_ok(u64 uaddr, u64 size, int access)
+{
+ unsigned long a = uaddr;
+
+ /* Make sure 64 bit math will not overflow. */
+ if (vhost_overflow(uaddr, size))
+ return false;
+
+ if ((access & VHOST_ACCESS_RO) &&
+ !access_ok((void __user *)a, size))
+ return false;
+ if ((access & VHOST_ACCESS_WO) &&
+ !access_ok((void __user *)a, size))
+ return false;
+ return true;
+}
+
+static int vhost_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
+ struct vhost_iotlb_msg *msg)
+{
+ int ret = 0;
+
+ if (asid != 0)
+ return -EINVAL;
+
+ mutex_lock(&dev->mutex);
+ vhost_dev_lock_vqs(dev);
+ switch (msg->type) {
+ case VHOST_IOTLB_UPDATE:
+ if (!dev->iotlb) {
+ ret = -EFAULT;
+ break;
+ }
+ if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
+ ret = -EFAULT;
+ break;
+ }
+ vhost_vq_meta_reset(dev);
+ if (vhost_iotlb_add_range(dev->iotlb, msg->iova,
+ msg->iova + msg->size - 1,
+ msg->uaddr, msg->perm)) {
+ ret = -ENOMEM;
+ break;
+ }
+ vhost_iotlb_notify_vq(dev, msg);
+ break;
+ case VHOST_IOTLB_INVALIDATE:
+ if (!dev->iotlb) {
+ ret = -EFAULT;
+ break;
+ }
+ vhost_vq_meta_reset(dev);
+ vhost_iotlb_del_range(dev->iotlb, msg->iova,
+ msg->iova + msg->size - 1);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ vhost_dev_unlock_vqs(dev);
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
+ struct iov_iter *from)
+{
+ struct vhost_iotlb_msg msg;
+ size_t offset;
+ int type, ret;
+ u32 asid = 0;
+
+ ret = copy_from_iter(&type, sizeof(type), from);
+ if (ret != sizeof(type)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ switch (type) {
+ case VHOST_IOTLB_MSG:
+ /* There maybe a hole after type for V1 message type,
+ * so skip it here.
+ */
+ offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
+ break;
+ case VHOST_IOTLB_MSG_V2:
+ if (vhost_backend_has_feature(dev->vqs[0],
+ VHOST_BACKEND_F_IOTLB_ASID)) {
+ ret = copy_from_iter(&asid, sizeof(asid), from);
+ if (ret != sizeof(asid)) {
+ ret = -EINVAL;
+ goto done;
+ }
+ offset = 0;
+ } else
+ offset = sizeof(__u32);
+ break;
+ default:
+ ret = -EINVAL;
+ goto done;
+ }
+
+ iov_iter_advance(from, offset);
+ ret = copy_from_iter(&msg, sizeof(msg), from);
+ if (ret != sizeof(msg)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (msg.type == VHOST_IOTLB_UPDATE && msg.size == 0) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (dev->msg_handler)
+ ret = dev->msg_handler(dev, asid, &msg);
+ else
+ ret = vhost_process_iotlb_msg(dev, asid, &msg);
+ if (ret) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
+ sizeof(struct vhost_msg_v2);
+done:
+ return ret;
+}
+EXPORT_SYMBOL(vhost_chr_write_iter);
+
+__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
+ poll_table *wait)
+{
+ __poll_t mask = 0;
+
+ poll_wait(file, &dev->wait, wait);
+
+ if (!list_empty(&dev->read_list))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ return mask;
+}
+EXPORT_SYMBOL(vhost_chr_poll);
+
+ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
+ int noblock)
+{
+ DEFINE_WAIT(wait);
+ struct vhost_msg_node *node;
+ ssize_t ret = 0;
+ unsigned size = sizeof(struct vhost_msg);
+
+ if (iov_iter_count(to) < size)
+ return 0;
+
+ while (1) {
+ if (!noblock)
+ prepare_to_wait(&dev->wait, &wait,
+ TASK_INTERRUPTIBLE);
+
+ node = vhost_dequeue_msg(dev, &dev->read_list);
+ if (node)
+ break;
+ if (noblock) {
+ ret = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ if (!dev->iotlb) {
+ ret = -EBADFD;
+ break;
+ }
+
+ schedule();
+ }
+
+ if (!noblock)
+ finish_wait(&dev->wait, &wait);
+
+ if (node) {
+ struct vhost_iotlb_msg *msg;
+ void *start = &node->msg;
+
+ switch (node->msg.type) {
+ case VHOST_IOTLB_MSG:
+ size = sizeof(node->msg);
+ msg = &node->msg.iotlb;
+ break;
+ case VHOST_IOTLB_MSG_V2:
+ size = sizeof(node->msg_v2);
+ msg = &node->msg_v2.iotlb;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ ret = copy_to_iter(start, size, to);
+ if (ret != size || msg->type != VHOST_IOTLB_MISS) {
+ kfree(node);
+ return ret;
+ }
+ vhost_enqueue_msg(dev, &dev->pending_list, node);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
+
+static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
+{
+ struct vhost_dev *dev = vq->dev;
+ struct vhost_msg_node *node;
+ struct vhost_iotlb_msg *msg;
+ bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
+
+ node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
+ if (!node)
+ return -ENOMEM;
+
+ if (v2) {
+ node->msg_v2.type = VHOST_IOTLB_MSG_V2;
+ msg = &node->msg_v2.iotlb;
+ } else {
+ msg = &node->msg.iotlb;
+ }
+
+ msg->type = VHOST_IOTLB_MISS;
+ msg->iova = iova;
+ msg->perm = access;
+
+ vhost_enqueue_msg(dev, &dev->read_list, node);
+
+ return 0;
+}
+
+static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
+ vring_desc_t __user *desc,
+ vring_avail_t __user *avail,
+ vring_used_t __user *used)
+
+{
+ /* If an IOTLB device is present, the vring addresses are
+ * GIOVAs. Access validation occurs at prefetch time. */
+ if (vq->iotlb)
+ return true;
+
+ return access_ok(desc, vhost_get_desc_size(vq, num)) &&
+ access_ok(avail, vhost_get_avail_size(vq, num)) &&
+ access_ok(used, vhost_get_used_size(vq, num));
+}
+
+static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
+ const struct vhost_iotlb_map *map,
+ int type)
{
- size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
- return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
- access_ok(VERIFY_READ, avail,
- sizeof *avail + num * sizeof *avail->ring + s) &&
- access_ok(VERIFY_WRITE, used,
- sizeof *used + num * sizeof *used->ring + s);
+ int access = (type == VHOST_ADDR_USED) ?
+ VHOST_ACCESS_WO : VHOST_ACCESS_RO;
+
+ if (likely(map->perm & access))
+ vq->meta_iotlb[type] = map;
}
+static bool iotlb_access_ok(struct vhost_virtqueue *vq,
+ int access, u64 addr, u64 len, int type)
+{
+ const struct vhost_iotlb_map *map;
+ struct vhost_iotlb *umem = vq->iotlb;
+ u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
+
+ if (vhost_vq_meta_fetch(vq, addr, len, type))
+ return true;
+
+ while (len > s) {
+ map = vhost_iotlb_itree_first(umem, addr, last);
+ if (map == NULL || map->start > addr) {
+ vhost_iotlb_miss(vq, addr, access);
+ return false;
+ } else if (!(map->perm & access)) {
+ /* Report the possible access violation by
+ * request another translation from userspace.
+ */
+ return false;
+ }
+
+ size = map->size - addr + map->start;
+
+ if (orig_addr == addr && size >= len)
+ vhost_vq_meta_update(vq, map, type);
+
+ s += size;
+ addr += size;
+ }
+
+ return true;
+}
+
+int vq_meta_prefetch(struct vhost_virtqueue *vq)
+{
+ unsigned int num = vq->num;
+
+ if (!vq->iotlb)
+ return 1;
+
+ return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc,
+ vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
+ iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail,
+ vhost_get_avail_size(vq, num),
+ VHOST_ADDR_AVAIL) &&
+ iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used,
+ vhost_get_used_size(vq, num), VHOST_ADDR_USED);
+}
+EXPORT_SYMBOL_GPL(vq_meta_prefetch);
+
/* Can we log writes? */
/* Caller should have device mutex but not vq mutex */
-int vhost_log_access_ok(struct vhost_dev *dev)
+bool vhost_log_access_ok(struct vhost_dev *dev)
{
- struct vhost_memory *mp;
-
- mp = rcu_dereference_protected(dev->memory,
- lockdep_is_held(&dev->mutex));
- return memory_access_ok(dev, mp, 1);
+ return memory_access_ok(dev, dev->umem, 1);
}
EXPORT_SYMBOL_GPL(vhost_log_access_ok);
+static bool vq_log_used_access_ok(struct vhost_virtqueue *vq,
+ void __user *log_base,
+ bool log_used,
+ u64 log_addr)
+{
+ /* If an IOTLB device is present, log_addr is a GIOVA that
+ * will never be logged by log_used(). */
+ if (vq->iotlb)
+ return true;
+
+ return !log_used || log_access_ok(log_base, log_addr,
+ vhost_get_used_size(vq, vq->num));
+}
+
/* Verify access for write logging. */
/* Caller should have vq mutex and device mutex */
-static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
- void __user *log_base)
+static bool vq_log_access_ok(struct vhost_virtqueue *vq,
+ void __user *log_base)
{
- struct vhost_memory *mp;
- size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
-
- mp = rcu_dereference_protected(vq->dev->memory,
- lockdep_is_held(&vq->mutex));
- return vq_memory_access_ok(log_base, mp,
- vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
- (!vq->log_used || log_access_ok(log_base, vq->log_addr,
- sizeof *vq->used +
- vq->num * sizeof *vq->used->ring + s));
+ return vq_memory_access_ok(log_base, vq->umem,
+ vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
+ vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr);
}
/* Can we start vq? */
/* Caller should have vq mutex and device mutex */
-int vhost_vq_access_ok(struct vhost_virtqueue *vq)
+bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
{
- return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
- vq_log_access_ok(vq->dev, vq, vq->log_base);
+ if (!vq_log_access_ok(vq, vq->log_base))
+ return false;
+
+ return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
}
EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{
- struct vhost_memory mem, *newmem, *oldmem;
+ struct vhost_memory mem, *newmem;
+ struct vhost_memory_region *region;
+ struct vhost_iotlb *newumem, *oldumem;
unsigned long size = offsetof(struct vhost_memory, regions);
+ int i;
if (copy_from_user(&mem, m, size))
return -EFAULT;
if (mem.padding)
return -EOPNOTSUPP;
- if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
+ if (mem.nregions > max_mem_regions)
return -E2BIG;
- newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
+ newmem = kvzalloc(struct_size(newmem, regions, mem.nregions),
+ GFP_KERNEL);
if (!newmem)
return -ENOMEM;
memcpy(newmem, &mem, size);
if (copy_from_user(newmem->regions, m->regions,
- mem.nregions * sizeof *m->regions)) {
- kfree(newmem);
+ flex_array_size(newmem, regions, mem.nregions))) {
+ kvfree(newmem);
return -EFAULT;
}
- if (!memory_access_ok(d, newmem,
- vhost_has_feature(d, VHOST_F_LOG_ALL))) {
- kfree(newmem);
+ newumem = iotlb_alloc();
+ if (!newumem) {
+ kvfree(newmem);
+ return -ENOMEM;
+ }
+
+ for (region = newmem->regions;
+ region < newmem->regions + mem.nregions;
+ region++) {
+ if (vhost_iotlb_add_range(newumem,
+ region->guest_phys_addr,
+ region->guest_phys_addr +
+ region->memory_size - 1,
+ region->userspace_addr,
+ VHOST_MAP_RW))
+ goto err;
+ }
+
+ if (!memory_access_ok(d, newumem, 0))
+ goto err;
+
+ oldumem = d->umem;
+ d->umem = newumem;
+
+ /* All memory accesses are done under some VQ mutex. */
+ for (i = 0; i < d->nvqs; ++i) {
+ mutex_lock(&d->vqs[i]->mutex);
+ d->vqs[i]->umem = newumem;
+ mutex_unlock(&d->vqs[i]->mutex);
+ }
+
+ kvfree(newmem);
+ vhost_iotlb_free(oldumem);
+ return 0;
+
+err:
+ vhost_iotlb_free(newumem);
+ kvfree(newmem);
+ return -EFAULT;
+}
+
+static long vhost_vring_set_num(struct vhost_dev *d,
+ struct vhost_virtqueue *vq,
+ void __user *argp)
+{
+ struct vhost_vring_state s;
+
+ /* Resizing ring with an active backend?
+ * You don't want to do that. */
+ if (vq->private_data)
+ return -EBUSY;
+
+ if (copy_from_user(&s, argp, sizeof s))
+ return -EFAULT;
+
+ if (!s.num || s.num > 0xffff || (s.num & (s.num - 1)))
+ return -EINVAL;
+ vq->num = s.num;
+
+ return 0;
+}
+
+static long vhost_vring_set_addr(struct vhost_dev *d,
+ struct vhost_virtqueue *vq,
+ void __user *argp)
+{
+ struct vhost_vring_addr a;
+
+ if (copy_from_user(&a, argp, sizeof a))
return -EFAULT;
+ if (a.flags & ~(0x1 << VHOST_VRING_F_LOG))
+ return -EOPNOTSUPP;
+
+ /* For 32bit, verify that the top 32bits of the user
+ data are set to zero. */
+ if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
+ (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
+ (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr)
+ return -EFAULT;
+
+ /* Make sure it's safe to cast pointers to vring types. */
+ BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
+ BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
+ if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
+ (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
+ (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1)))
+ return -EINVAL;
+
+ /* We only verify access here if backend is configured.
+ * If it is not, we don't as size might not have been setup.
+ * We will verify when backend is configured. */
+ if (vq->private_data) {
+ if (!vq_access_ok(vq, vq->num,
+ (void __user *)(unsigned long)a.desc_user_addr,
+ (void __user *)(unsigned long)a.avail_user_addr,
+ (void __user *)(unsigned long)a.used_user_addr))
+ return -EINVAL;
+
+ /* Also validate log access for used ring if enabled. */
+ if (!vq_log_used_access_ok(vq, vq->log_base,
+ a.flags & (0x1 << VHOST_VRING_F_LOG),
+ a.log_guest_addr))
+ return -EINVAL;
}
- oldmem = rcu_dereference_protected(d->memory,
- lockdep_is_held(&d->mutex));
- rcu_assign_pointer(d->memory, newmem);
- synchronize_rcu();
- kfree(oldmem);
+
+ vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
+ vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
+ vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
+ vq->log_addr = a.log_guest_addr;
+ vq->used = (void __user *)(unsigned long)a.used_user_addr;
+
return 0;
}
-long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
+static long vhost_vring_set_num_addr(struct vhost_dev *d,
+ struct vhost_virtqueue *vq,
+ unsigned int ioctl,
+ void __user *argp)
+{
+ long r;
+
+ mutex_lock(&vq->mutex);
+
+ switch (ioctl) {
+ case VHOST_SET_VRING_NUM:
+ r = vhost_vring_set_num(d, vq, argp);
+ break;
+ case VHOST_SET_VRING_ADDR:
+ r = vhost_vring_set_addr(d, vq, argp);
+ break;
+ default:
+ BUG();
+ }
+
+ mutex_unlock(&vq->mutex);
+
+ return r;
+}
+long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
{
struct file *eventfp, *filep = NULL;
bool pollstart = false, pollstop = false;
struct eventfd_ctx *ctx = NULL;
- u32 __user *idxp = argp;
struct vhost_virtqueue *vq;
struct vhost_vring_state s;
struct vhost_vring_file f;
- struct vhost_vring_addr a;
u32 idx;
long r;
- r = get_user(idx, idxp);
+ r = vhost_get_vq_from_user(d, argp, &vq, &idx);
if (r < 0)
return r;
- if (idx >= d->nvqs)
- return -ENOBUFS;
- vq = d->vqs[idx];
+ if (ioctl == VHOST_SET_VRING_NUM ||
+ ioctl == VHOST_SET_VRING_ADDR) {
+ return vhost_vring_set_num_addr(d, vq, ioctl, argp);
+ }
mutex_lock(&vq->mutex);
switch (ioctl) {
- case VHOST_SET_VRING_NUM:
- /* Resizing ring with an active backend?
- * You don't want to do that. */
- if (vq->private_data) {
- r = -EBUSY;
- break;
- }
- if (copy_from_user(&s, argp, sizeof s)) {
- r = -EFAULT;
- break;
- }
- if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
- r = -EINVAL;
- break;
- }
- vq->num = s.num;
- break;
case VHOST_SET_VRING_BASE:
/* Moving base with an active backend?
* You don't want to do that. */
@@ -676,78 +2166,35 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
r = -EFAULT;
break;
}
- if (s.num > 0xffff) {
- r = -EINVAL;
- break;
+ if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
+ vq->next_avail_head = vq->last_avail_idx =
+ s.num & 0xffff;
+ vq->last_used_idx = (s.num >> 16) & 0xffff;
+ } else {
+ if (s.num > 0xffff) {
+ r = -EINVAL;
+ break;
+ }
+ vq->next_avail_head = vq->last_avail_idx = s.num;
}
- vq->last_avail_idx = s.num;
/* Forget the cached index value. */
vq->avail_idx = vq->last_avail_idx;
break;
case VHOST_GET_VRING_BASE:
s.index = idx;
- s.num = vq->last_avail_idx;
+ if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
+ s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16);
+ else
+ s.num = vq->last_avail_idx;
if (copy_to_user(argp, &s, sizeof s))
r = -EFAULT;
break;
- case VHOST_SET_VRING_ADDR:
- if (copy_from_user(&a, argp, sizeof a)) {
- r = -EFAULT;
- break;
- }
- if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
- r = -EOPNOTSUPP;
- break;
- }
- /* For 32bit, verify that the top 32bits of the user
- data are set to zero. */
- if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
- (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
- (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
- r = -EFAULT;
- break;
- }
- if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
- (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
- (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
- r = -EINVAL;
- break;
- }
-
- /* We only verify access here if backend is configured.
- * If it is not, we don't as size might not have been setup.
- * We will verify when backend is configured. */
- if (vq->private_data) {
- if (!vq_access_ok(d, vq->num,
- (void __user *)(unsigned long)a.desc_user_addr,
- (void __user *)(unsigned long)a.avail_user_addr,
- (void __user *)(unsigned long)a.used_user_addr)) {
- r = -EINVAL;
- break;
- }
-
- /* Also validate log access for used ring if enabled. */
- if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
- !log_access_ok(vq->log_base, a.log_guest_addr,
- sizeof *vq->used +
- vq->num * sizeof *vq->used->ring)) {
- r = -EINVAL;
- break;
- }
- }
-
- vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
- vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
- vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
- vq->log_addr = a.log_guest_addr;
- vq->used = (void __user *)(unsigned long)a.used_user_addr;
- break;
case VHOST_SET_VRING_KICK:
if (copy_from_user(&f, argp, sizeof f)) {
r = -EFAULT;
break;
}
- eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
+ eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd);
if (IS_ERR(eventfp)) {
r = PTR_ERR(eventfp);
break;
@@ -763,38 +2210,44 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
r = -EFAULT;
break;
}
- eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
- if (IS_ERR(eventfp)) {
- r = PTR_ERR(eventfp);
+ ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
+ if (IS_ERR(ctx)) {
+ r = PTR_ERR(ctx);
break;
}
- if (eventfp != vq->call) {
- filep = vq->call;
- ctx = vq->call_ctx;
- vq->call = eventfp;
- vq->call_ctx = eventfp ?
- eventfd_ctx_fileget(eventfp) : NULL;
- } else
- filep = eventfp;
+
+ swap(ctx, vq->call_ctx.ctx);
break;
case VHOST_SET_VRING_ERR:
if (copy_from_user(&f, argp, sizeof f)) {
r = -EFAULT;
break;
}
- eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
- if (IS_ERR(eventfp)) {
- r = PTR_ERR(eventfp);
+ ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
+ if (IS_ERR(ctx)) {
+ r = PTR_ERR(ctx);
break;
}
- if (eventfp != vq->error) {
- filep = vq->error;
- vq->error = eventfp;
- ctx = vq->error_ctx;
- vq->error_ctx = eventfp ?
- eventfd_ctx_fileget(eventfp) : NULL;
- } else
- filep = eventfp;
+ swap(ctx, vq->error_ctx);
+ break;
+ case VHOST_SET_VRING_ENDIAN:
+ r = vhost_set_vring_endian(vq, argp);
+ break;
+ case VHOST_GET_VRING_ENDIAN:
+ r = vhost_get_vring_endian(vq, idx, argp);
+ break;
+ case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
+ if (copy_from_user(&s, argp, sizeof(s))) {
+ r = -EFAULT;
+ break;
+ }
+ vq->busyloop_timeout = s.num;
+ break;
+ case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
+ s.index = idx;
+ s.num = vq->busyloop_timeout;
+ if (copy_to_user(argp, &s, sizeof(s)))
+ r = -EFAULT;
break;
default:
r = -ENOIOCTLCMD;
@@ -803,7 +2256,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
if (pollstop && vq->handle_kick)
vhost_poll_stop(&vq->poll);
- if (ctx)
+ if (!IS_ERR_OR_NULL(ctx))
eventfd_ctx_put(ctx);
if (filep)
fput(filep);
@@ -814,16 +2267,42 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
mutex_unlock(&vq->mutex);
if (pollstop && vq->handle_kick)
- vhost_poll_flush(&vq->poll);
+ vhost_dev_flush(vq->poll.dev);
return r;
}
EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
+int vhost_init_device_iotlb(struct vhost_dev *d)
+{
+ struct vhost_iotlb *niotlb, *oiotlb;
+ int i;
+
+ niotlb = iotlb_alloc();
+ if (!niotlb)
+ return -ENOMEM;
+
+ oiotlb = d->iotlb;
+ d->iotlb = niotlb;
+
+ for (i = 0; i < d->nvqs; ++i) {
+ struct vhost_virtqueue *vq = d->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vq->iotlb = niotlb;
+ __vhost_vq_meta_reset(vq);
+ mutex_unlock(&vq->mutex);
+ }
+
+ vhost_iotlb_free(oiotlb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
+
/* Caller must have device mutex */
long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
{
- struct file *eventfp, *filep = NULL;
- struct eventfd_ctx *ctx = NULL;
+ struct eventfd_ctx *ctx;
u64 p;
long r;
int i, fd;
@@ -834,6 +2313,45 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
goto done;
}
+#ifdef CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL
+ if (ioctl == VHOST_SET_FORK_FROM_OWNER) {
+ /* Only allow modification before owner is set */
+ if (vhost_dev_has_owner(d)) {
+ r = -EBUSY;
+ goto done;
+ }
+ u8 fork_owner_val;
+
+ if (get_user(fork_owner_val, (u8 __user *)argp)) {
+ r = -EFAULT;
+ goto done;
+ }
+ if (fork_owner_val != VHOST_FORK_OWNER_TASK &&
+ fork_owner_val != VHOST_FORK_OWNER_KTHREAD) {
+ r = -EINVAL;
+ goto done;
+ }
+ d->fork_owner = !!fork_owner_val;
+ r = 0;
+ goto done;
+ }
+ if (ioctl == VHOST_GET_FORK_FROM_OWNER) {
+ u8 fork_owner_val = d->fork_owner;
+
+ if (fork_owner_val != VHOST_FORK_OWNER_TASK &&
+ fork_owner_val != VHOST_FORK_OWNER_KTHREAD) {
+ r = -EINVAL;
+ goto done;
+ }
+ if (put_user(fork_owner_val, (u8 __user *)argp)) {
+ r = -EFAULT;
+ goto done;
+ }
+ r = 0;
+ goto done;
+ }
+#endif
+
/* You must be the owner to do anything else */
r = vhost_dev_check_owner(d);
if (r)
@@ -858,7 +2376,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
vq = d->vqs[i];
mutex_lock(&vq->mutex);
/* If ring is inactive, will check when it's enabled. */
- if (vq->private_data && !vq_log_access_ok(d, vq, base))
+ if (vq->private_data && !vq_log_access_ok(vq, base))
r = -EFAULT;
else
vq->log_base = base;
@@ -869,18 +2387,12 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
r = get_user(fd, (int __user *)argp);
if (r < 0)
break;
- eventfp = fd == -1 ? NULL : eventfd_fget(fd);
- if (IS_ERR(eventfp)) {
- r = PTR_ERR(eventfp);
+ ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
+ if (IS_ERR(ctx)) {
+ r = PTR_ERR(ctx);
break;
}
- if (eventfp != d->log_file) {
- filep = d->log_file;
- ctx = d->log_ctx;
- d->log_ctx = eventfp ?
- eventfd_ctx_fileget(eventfp) : NULL;
- } else
- filep = eventfp;
+ swap(ctx, d->log_ctx);
for (i = 0; i < d->nvqs; ++i) {
mutex_lock(&d->vqs[i]->mutex);
d->vqs[i]->log_ctx = d->log_ctx;
@@ -888,8 +2400,6 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
}
if (ctx)
eventfd_ctx_put(ctx);
- if (filep)
- fput(filep);
break;
default:
r = -ENOIOCTLCMD;
@@ -900,26 +2410,9 @@ done:
}
EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
-static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
- __u64 addr, __u32 len)
-{
- struct vhost_memory_region *reg;
- int i;
-
- /* linear search is not brilliant, but we really have on the order of 6
- * regions in practice */
- for (i = 0; i < mem->nregions; ++i) {
- reg = mem->regions + i;
- if (reg->guest_phys_addr <= addr &&
- reg->guest_phys_addr + reg->memory_size - 1 >= addr)
- return reg;
- }
- return NULL;
-}
-
/* TODO: This is really inefficient. We need something like get_user()
* (instruction directly accesses the data, with an exception table entry
- * returning -EFAULT). See Documentation/x86/exception-tables.txt.
+ * returning -EFAULT). See Documentation/arch/x86/exception-tables.rst.
*/
static int set_bit_to_user(int nr, void __user *addr)
{
@@ -929,15 +2422,14 @@ static int set_bit_to_user(int nr, void __user *addr)
int bit = nr + (log % PAGE_SIZE) * 8;
int r;
- r = get_user_pages_fast(log, 1, 1, &page);
+ r = pin_user_pages_fast(log, 1, FOLL_WRITE, &page);
if (r < 0)
return r;
BUG_ON(r != 1);
base = kmap_atomic(page);
set_bit(bit, base);
kunmap_atomic(base);
- set_page_dirty_lock(page);
- put_page(page);
+ unpin_user_pages_dirty_lock(&page, 1, true);
return 0;
}
@@ -967,27 +2459,112 @@ static int log_write(void __user *log_base,
return r;
}
+static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
+{
+ struct vhost_iotlb *umem = vq->umem;
+ struct vhost_iotlb_map *u;
+ u64 start, end, l, min;
+ int r;
+ bool hit = false;
+
+ while (len) {
+ min = len;
+ /* More than one GPAs can be mapped into a single HVA. So
+ * iterate all possible umems here to be safe.
+ */
+ list_for_each_entry(u, &umem->list, link) {
+ if (u->addr > hva - 1 + len ||
+ u->addr - 1 + u->size < hva)
+ continue;
+ start = max(u->addr, hva);
+ end = min(u->addr - 1 + u->size, hva - 1 + len);
+ l = end - start + 1;
+ r = log_write(vq->log_base,
+ u->start + start - u->addr,
+ l);
+ if (r < 0)
+ return r;
+ hit = true;
+ min = min(l, min);
+ }
+
+ if (!hit)
+ return -EFAULT;
+
+ len -= min;
+ hva += min;
+ }
+
+ return 0;
+}
+
+static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
+{
+ struct iovec *iov = vq->log_iov;
+ int i, ret;
+
+ if (!vq->iotlb)
+ return log_write(vq->log_base, vq->log_addr + used_offset, len);
+
+ ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
+ len, iov, 64, VHOST_ACCESS_WO);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ret; i++) {
+ ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
+ iov[i].iov_len);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * vhost_log_write() - Log in dirty page bitmap
+ * @vq: vhost virtqueue.
+ * @log: Array of dirty memory in GPA.
+ * @log_num: Size of vhost_log arrary.
+ * @len: The total length of memory buffer to log in the dirty bitmap.
+ * Some drivers may only partially use pages shared via the last
+ * vring descriptor (i.e. vhost-net RX buffer).
+ * Use (len == U64_MAX) to indicate the driver would log all
+ * pages of vring descriptors.
+ * @iov: Array of dirty memory in HVA.
+ * @count: Size of iovec array.
+ */
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
- unsigned int log_num, u64 len)
+ unsigned int log_num, u64 len, struct iovec *iov, int count)
{
int i, r;
/* Make sure data written is seen before log. */
smp_wmb();
+
+ if (vq->iotlb) {
+ for (i = 0; i < count; i++) {
+ r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
+ iov[i].iov_len);
+ if (r < 0)
+ return r;
+ }
+ return 0;
+ }
+
for (i = 0; i < log_num; ++i) {
u64 l = min(log[i].len, len);
r = log_write(vq->log_base, log[i].addr, l);
if (r < 0)
return r;
- len -= l;
- if (!len) {
- if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
- return 0;
- }
+
+ if (len != U64_MAX)
+ len -= l;
}
- /* Length written exceeds what we have stored. This is a bug. */
- BUG();
+
+ if (vq->log_ctx)
+ eventfd_signal(vq->log_ctx);
+
return 0;
}
EXPORT_SYMBOL_GPL(vhost_log_write);
@@ -995,25 +2572,24 @@ EXPORT_SYMBOL_GPL(vhost_log_write);
static int vhost_update_used_flags(struct vhost_virtqueue *vq)
{
void __user *used;
- if (__put_user(vq->used_flags, &vq->used->flags) < 0)
+ if (vhost_put_used_flags(vq))
return -EFAULT;
if (unlikely(vq->log_used)) {
/* Make sure the flag is seen before log. */
smp_wmb();
/* Log used flag write. */
used = &vq->used->flags;
- log_write(vq->log_base, vq->log_addr +
- (used - (void __user *)vq->used),
- sizeof vq->used->flags);
+ log_used(vq, (used - (void __user *)vq->used),
+ sizeof vq->used->flags);
if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
+ eventfd_signal(vq->log_ctx);
}
return 0;
}
-static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
+static int vhost_update_avail_event(struct vhost_virtqueue *vq)
{
- if (__put_user(vq->avail_idx, vhost_avail_event(vq)))
+ if (vhost_put_avail_event(vq))
return -EFAULT;
if (unlikely(vq->log_used)) {
void __user *used;
@@ -1021,88 +2597,111 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
smp_wmb();
/* Log avail event write */
used = vhost_avail_event(vq);
- log_write(vq->log_base, vq->log_addr +
- (used - (void __user *)vq->used),
- sizeof *vhost_avail_event(vq));
+ log_used(vq, (used - (void __user *)vq->used),
+ sizeof *vhost_avail_event(vq));
if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
+ eventfd_signal(vq->log_ctx);
}
return 0;
}
-int vhost_init_used(struct vhost_virtqueue *vq)
+int vhost_vq_init_access(struct vhost_virtqueue *vq)
{
+ __virtio16 last_used_idx;
int r;
+ bool is_le = vq->is_le;
+
if (!vq->private_data)
return 0;
+ vhost_init_is_le(vq);
+
r = vhost_update_used_flags(vq);
if (r)
- return r;
+ goto err;
vq->signalled_used_valid = false;
- return get_user(vq->last_used_idx, &vq->used->idx);
+ if (!vq->iotlb &&
+ !access_ok(&vq->used->idx, sizeof vq->used->idx)) {
+ r = -EFAULT;
+ goto err;
+ }
+ r = vhost_get_used_idx(vq, &last_used_idx);
+ if (r) {
+ vq_err(vq, "Can't access used idx at %p\n",
+ &vq->used->idx);
+ goto err;
+ }
+ vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
+ return 0;
+
+err:
+ vq->is_le = is_le;
+ return r;
}
-EXPORT_SYMBOL_GPL(vhost_init_used);
+EXPORT_SYMBOL_GPL(vhost_vq_init_access);
-static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
- struct iovec iov[], int iov_size)
+static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
+ struct iovec iov[], int iov_size, int access)
{
- const struct vhost_memory_region *reg;
- struct vhost_memory *mem;
+ const struct vhost_iotlb_map *map;
+ struct vhost_dev *dev = vq->dev;
+ struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
struct iovec *_iov;
- u64 s = 0;
+ u64 s = 0, last = addr + len - 1;
int ret = 0;
- rcu_read_lock();
-
- mem = rcu_dereference(dev->memory);
while ((u64)len > s) {
u64 size;
if (unlikely(ret >= iov_size)) {
ret = -ENOBUFS;
break;
}
- reg = find_region(mem, addr, len);
- if (unlikely(!reg)) {
- ret = -EFAULT;
+
+ map = vhost_iotlb_itree_first(umem, addr, last);
+ if (map == NULL || map->start > addr) {
+ if (umem != dev->iotlb) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = -EAGAIN;
+ break;
+ } else if (!(map->perm & access)) {
+ ret = -EPERM;
break;
}
+
_iov = iov + ret;
- size = reg->memory_size - addr + reg->guest_phys_addr;
+ size = map->size - addr + map->start;
_iov->iov_len = min((u64)len - s, size);
_iov->iov_base = (void __user *)(unsigned long)
- (reg->userspace_addr + addr - reg->guest_phys_addr);
+ (map->addr + addr - map->start);
s += size;
addr += size;
++ret;
}
- rcu_read_unlock();
+ if (ret == -EAGAIN)
+ vhost_iotlb_miss(vq, addr, access);
return ret;
}
/* Each buffer in the virtqueues is actually a chain of descriptors. This
* function returns the next descriptor in the chain,
* or -1U if we're at the end. */
-static unsigned next_desc(struct vring_desc *desc)
+static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
{
unsigned int next;
/* If this descriptor says it doesn't chain, we're done. */
- if (!(desc->flags & VRING_DESC_F_NEXT))
+ if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
return -1U;
/* Check they're not leading us off end of descriptors. */
- next = desc->next;
- /* Make sure compiler knows to grab that: we don't want it changing! */
- /* We will use the result as an index in an array, so most
- * architectures only need a compiler barrier here. */
- read_barrier_depends();
-
+ next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
return next;
}
-static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
+static int get_indirect(struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num,
@@ -1110,29 +2709,28 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
{
struct vring_desc desc;
unsigned int i = 0, count, found = 0;
- int ret;
+ u32 len = vhost32_to_cpu(vq, indirect->len);
+ struct iov_iter from;
+ int ret, access;
/* Sanity check */
- if (unlikely(indirect->len % sizeof desc)) {
+ if (unlikely(len % sizeof desc)) {
vq_err(vq, "Invalid length in indirect descriptor: "
"len 0x%llx not multiple of 0x%zx\n",
- (unsigned long long)indirect->len,
+ (unsigned long long)len,
sizeof desc);
return -EINVAL;
}
- ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
- UIO_MAXIOV);
+ ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
+ UIO_MAXIOV, VHOST_ACCESS_RO);
if (unlikely(ret < 0)) {
- vq_err(vq, "Translation failure %d in indirect.\n", ret);
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d in indirect.\n", ret);
return ret;
}
-
- /* We will use the result as an address to read from, so most
- * architectures only need a compiler barrier here. */
- read_barrier_depends();
-
- count = indirect->len / sizeof desc;
+ iov_iter_init(&from, ITER_SOURCE, vq->indirect, ret, len);
+ count = len / sizeof desc;
/* Buffers are chained via a 16 bit next field, so
* we can have at most 2^16 of these. */
if (unlikely(count > USHRT_MAX + 1)) {
@@ -1149,31 +2747,37 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
i, count);
return -EINVAL;
}
- if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
- vq->indirect, sizeof desc))) {
+ if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
- i, (size_t)indirect->addr + i * sizeof desc);
+ i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
return -EINVAL;
}
- if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
+ if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
- i, (size_t)indirect->addr + i * sizeof desc);
+ i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
return -EINVAL;
}
- ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
- iov_size - iov_count);
+ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
+ access = VHOST_ACCESS_WO;
+ else
+ access = VHOST_ACCESS_RO;
+
+ ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
+ vhost32_to_cpu(vq, desc.len), iov + iov_count,
+ iov_size - iov_count, access);
if (unlikely(ret < 0)) {
- vq_err(vq, "Translation failure %d indirect idx %d\n",
- ret, i);
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d indirect idx %d\n",
+ ret, i);
return ret;
}
/* If this is an input descriptor, increment that count. */
- if (desc.flags & VRING_DESC_F_WRITE) {
+ if (access == VHOST_ACCESS_WO) {
*in_num += ret;
- if (unlikely(log)) {
- log[*log_num].addr = desc.addr;
- log[*log_num].len = desc.len;
+ if (unlikely(log && ret)) {
+ log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
+ log[*log_num].len = vhost32_to_cpu(vq, desc.len);
++*log_num;
}
} else {
@@ -1186,57 +2790,68 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
}
*out_num += ret;
}
- } while ((i = next_desc(&desc)) != -1);
+ } while ((i = next_desc(vq, &desc)) != -1);
return 0;
}
-/* This looks in the virtqueue and for the first available buffer, and converts
- * it to an iovec for convenient access. Since descriptors consist of some
- * number of output then some number of input descriptors, it's actually two
- * iovecs, but we pack them into one and note how many of each there were.
+/**
+ * vhost_get_vq_desc_n - Fetch the next available descriptor chain and build iovecs
+ * @vq: target virtqueue
+ * @iov: array that receives the scatter/gather segments
+ * @iov_size: capacity of @iov in elements
+ * @out_num: the number of output segments
+ * @in_num: the number of input segments
+ * @log: optional array to record addr/len for each writable segment; NULL if unused
+ * @log_num: optional output; number of entries written to @log when provided
+ * @ndesc: optional output; number of descriptors consumed from the available ring
+ * (useful for rollback via vhost_discard_vq_desc)
*
- * This function returns the descriptor number found, or vq->num (which is
- * never a valid descriptor number) if none was found. A negative code is
- * returned on error. */
-int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
- struct iovec iov[], unsigned int iov_size,
- unsigned int *out_num, unsigned int *in_num,
- struct vhost_log *log, unsigned int *log_num)
+ * Extracts one available descriptor chain from @vq and translates guest addresses
+ * into host iovecs.
+ *
+ * On success, advances @vq->last_avail_idx by 1 and @vq->next_avail_head by the
+ * number of descriptors consumed (also stored via @ndesc when non-NULL).
+ *
+ * Return:
+ * - head index in [0, @vq->num) on success;
+ * - @vq->num if no descriptor is currently available;
+ * - negative errno on failure
+ */
+int vhost_get_vq_desc_n(struct vhost_virtqueue *vq,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num,
+ unsigned int *ndesc)
{
+ bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
struct vring_desc desc;
unsigned int i, head, found = 0;
- u16 last_avail_idx;
- int ret;
+ u16 last_avail_idx = vq->last_avail_idx;
+ __virtio16 ring_head;
+ int ret, access, c = 0;
- /* Check it isn't doing very strange things with descriptor numbers. */
- last_avail_idx = vq->last_avail_idx;
- if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) {
- vq_err(vq, "Failed to access avail idx at %p\n",
- &vq->avail->idx);
- return -EFAULT;
- }
+ if (vq->avail_idx == vq->last_avail_idx) {
+ ret = vhost_get_avail_idx(vq);
+ if (unlikely(ret < 0))
+ return ret;
- if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
- vq_err(vq, "Guest moved used index from %u to %u",
- last_avail_idx, vq->avail_idx);
- return -EFAULT;
+ if (!ret)
+ return vq->num;
}
- /* If there's nothing new since last we looked, return invalid. */
- if (vq->avail_idx == last_avail_idx)
- return vq->num;
-
- /* Only get avail ring entries after they have been exposed by guest. */
- smp_rmb();
-
- /* Grab the next descriptor number they're advertising, and increment
- * the index we've seen. */
- if (unlikely(__get_user(head,
- &vq->avail->ring[last_avail_idx % vq->num]))) {
- vq_err(vq, "Failed to read head: idx %d address %p\n",
- last_avail_idx,
- &vq->avail->ring[last_avail_idx % vq->num]);
- return -EFAULT;
+ if (in_order)
+ head = vq->next_avail_head & (vq->num - 1);
+ else {
+ /* Grab the next descriptor number they're
+ * advertising, and increment the index we've seen. */
+ if (unlikely(vhost_get_avail_head(vq, &ring_head,
+ last_avail_idx))) {
+ vq_err(vq, "Failed to read head: idx %d address %p\n",
+ last_avail_idx,
+ &vq->avail->ring[last_avail_idx % vq->num]);
+ return -EFAULT;
+ }
+ head = vhost16_to_cpu(vq, ring_head);
}
/* If their number is silly, that's an error. */
@@ -1265,38 +2880,46 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
i, vq->num, head);
return -EINVAL;
}
- ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
+ ret = vhost_get_desc(vq, &desc, i);
if (unlikely(ret)) {
vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
i, vq->desc + i);
return -EFAULT;
}
- if (desc.flags & VRING_DESC_F_INDIRECT) {
- ret = get_indirect(dev, vq, iov, iov_size,
+ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
+ ret = get_indirect(vq, iov, iov_size,
out_num, in_num,
log, log_num, &desc);
if (unlikely(ret < 0)) {
- vq_err(vq, "Failure detected "
- "in indirect descriptor at idx %d\n", i);
+ if (ret != -EAGAIN)
+ vq_err(vq, "Failure detected "
+ "in indirect descriptor at idx %d\n", i);
return ret;
}
+ ++c;
continue;
}
- ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
- iov_size - iov_count);
+ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
+ access = VHOST_ACCESS_WO;
+ else
+ access = VHOST_ACCESS_RO;
+ ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
+ vhost32_to_cpu(vq, desc.len), iov + iov_count,
+ iov_size - iov_count, access);
if (unlikely(ret < 0)) {
- vq_err(vq, "Translation failure %d descriptor idx %d\n",
- ret, i);
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d descriptor idx %d\n",
+ ret, i);
return ret;
}
- if (desc.flags & VRING_DESC_F_WRITE) {
+ if (access == VHOST_ACCESS_WO) {
/* If this is an input descriptor,
* increment that count. */
*in_num += ret;
- if (unlikely(log)) {
- log[*log_num].addr = desc.addr;
- log[*log_num].len = desc.len;
+ if (unlikely(log && ret)) {
+ log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
+ log[*log_num].len = vhost32_to_cpu(vq, desc.len);
++*log_num;
}
} else {
@@ -1309,22 +2932,56 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
}
*out_num += ret;
}
- } while ((i = next_desc(&desc)) != -1);
+ ++c;
+ } while ((i = next_desc(vq, &desc)) != -1);
/* On success, increment avail index. */
vq->last_avail_idx++;
+ vq->next_avail_head += c;
+
+ if (ndesc)
+ *ndesc = c;
/* Assume notifications from guest are disabled at this point,
* if they aren't we would need to update avail_event index. */
BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
return head;
}
+EXPORT_SYMBOL_GPL(vhost_get_vq_desc_n);
+
+/* This looks in the virtqueue and for the first available buffer, and converts
+ * it to an iovec for convenient access. Since descriptors consist of some
+ * number of output then some number of input descriptors, it's actually two
+ * iovecs, but we pack them into one and note how many of each there were.
+ *
+ * This function returns the descriptor number found, or vq->num (which is
+ * never a valid descriptor number) if none was found. A negative code is
+ * returned on error.
+ */
+int vhost_get_vq_desc(struct vhost_virtqueue *vq,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num)
+{
+ return vhost_get_vq_desc_n(vq, iov, iov_size, out_num, in_num,
+ log, log_num, NULL);
+}
EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
-/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
-void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
+/**
+ * vhost_discard_vq_desc - Reverse the effect of vhost_get_vq_desc_n()
+ * @vq: target virtqueue
+ * @nbufs: number of buffers to roll back
+ * @ndesc: number of descriptors to roll back
+ *
+ * Rewinds the internal consumer cursors after a failed attempt to use buffers
+ * returned by vhost_get_vq_desc_n().
+ */
+void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int nbufs,
+ unsigned int ndesc)
{
- vq->last_avail_idx -= n;
+ vq->next_avail_head -= ndesc;
+ vq->last_avail_idx -= nbufs;
}
EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
@@ -1332,48 +2989,13 @@ EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
* want to notify the guest, using eventfd. */
int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
{
- struct vring_used_elem __user *used;
+ struct vring_used_elem heads = {
+ cpu_to_vhost32(vq, head),
+ cpu_to_vhost32(vq, len)
+ };
+ u16 nheads = 1;
- /* The virtqueue contains a ring of used buffers. Get a pointer to the
- * next entry in that used ring. */
- used = &vq->used->ring[vq->last_used_idx % vq->num];
- if (__put_user(head, &used->id)) {
- vq_err(vq, "Failed to write used id");
- return -EFAULT;
- }
- if (__put_user(len, &used->len)) {
- vq_err(vq, "Failed to write used len");
- return -EFAULT;
- }
- /* Make sure buffer is written before we update index. */
- smp_wmb();
- if (__put_user(vq->last_used_idx + 1, &vq->used->idx)) {
- vq_err(vq, "Failed to increment used idx");
- return -EFAULT;
- }
- if (unlikely(vq->log_used)) {
- /* Make sure data is seen before log. */
- smp_wmb();
- /* Log used ring entry write. */
- log_write(vq->log_base,
- vq->log_addr +
- ((void __user *)used - (void __user *)vq->used),
- sizeof *used);
- /* Log used index update. */
- log_write(vq->log_base,
- vq->log_addr + offsetof(struct vring_used, idx),
- sizeof vq->used->idx);
- if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
- }
- vq->last_used_idx++;
- /* If the driver never bothers to signal in a very long while,
- * used index might wrap around. If that happens, invalidate
- * signalled_used index we stored. TODO: make sure driver
- * signals at least once in 2^16 and remove this. */
- if (unlikely(vq->last_used_idx == vq->signalled_used))
- vq->signalled_used_valid = false;
- return 0;
+ return vhost_add_used_n(vq, &heads, &nheads, 1);
}
EXPORT_SYMBOL_GPL(vhost_add_used);
@@ -1381,13 +3003,13 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
struct vring_used_elem *heads,
unsigned count)
{
- struct vring_used_elem __user *used;
+ vring_used_elem_t __user *used;
u16 old, new;
int start;
- start = vq->last_used_idx % vq->num;
+ start = vq->last_used_idx & (vq->num - 1);
used = vq->used->ring + start;
- if (__copy_to_user(used, heads, count * sizeof *used)) {
+ if (vhost_put_used(vq, heads, start, count)) {
vq_err(vq, "Failed to write used");
return -EFAULT;
}
@@ -1395,10 +3017,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
/* Make sure data is seen before log. */
smp_wmb();
/* Log used ring entry write. */
- log_write(vq->log_base,
- vq->log_addr +
- ((void __user *)used - (void __user *)vq->used),
- count * sizeof *used);
+ log_used(vq, ((void __user *)used - (void __user *)vq->used),
+ count * sizeof *used);
}
old = vq->last_used_idx;
new = (vq->last_used_idx += count);
@@ -1411,14 +3031,13 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
return 0;
}
-/* After we've used one of their buffers, we tell them about it. We'll then
- * want to notify the guest, using eventfd. */
-int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
- unsigned count)
+static int vhost_add_used_n_ooo(struct vhost_virtqueue *vq,
+ struct vring_used_elem *heads,
+ unsigned count)
{
int start, n, r;
- start = vq->last_used_idx % vq->num;
+ start = vq->last_used_idx & (vq->num - 1);
n = vq->num - start;
if (n < count) {
r = __vhost_add_used_n(vq, heads, n);
@@ -1427,21 +3046,87 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
heads += n;
count -= n;
}
- r = __vhost_add_used_n(vq, heads, count);
+ return __vhost_add_used_n(vq, heads, count);
+}
+
+static int vhost_add_used_n_in_order(struct vhost_virtqueue *vq,
+ struct vring_used_elem *heads,
+ const u16 *nheads,
+ unsigned count)
+{
+ vring_used_elem_t __user *used;
+ u16 old, new = vq->last_used_idx;
+ int start, i;
+
+ if (!nheads)
+ return -EINVAL;
+
+ start = vq->last_used_idx & (vq->num - 1);
+ used = vq->used->ring + start;
+
+ for (i = 0; i < count; i++) {
+ if (vhost_put_used(vq, &heads[i], start, 1)) {
+ vq_err(vq, "Failed to write used");
+ return -EFAULT;
+ }
+ start += nheads[i];
+ new += nheads[i];
+ if (start >= vq->num)
+ start -= vq->num;
+ }
+
+ if (unlikely(vq->log_used)) {
+ /* Make sure data is seen before log. */
+ smp_wmb();
+ /* Log used ring entry write. */
+ log_used(vq, ((void __user *)used - (void __user *)vq->used),
+ (vq->num - start) * sizeof *used);
+ if (start + count > vq->num)
+ log_used(vq, 0,
+ (start + count - vq->num) * sizeof *used);
+ }
+
+ old = vq->last_used_idx;
+ vq->last_used_idx = new;
+ /* If the driver never bothers to signal in a very long while,
+ * used index might wrap around. If that happens, invalidate
+ * signalled_used index we stored. TODO: make sure driver
+ * signals at least once in 2^16 and remove this. */
+ if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
+ vq->signalled_used_valid = false;
+ return 0;
+}
+
+/* After we've used one of their buffers, we tell them about it. We'll then
+ * want to notify the guest, using eventfd. */
+int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
+ u16 *nheads, unsigned count)
+{
+ bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
+ int r;
+
+ if (!in_order || !nheads)
+ r = vhost_add_used_n_ooo(vq, heads, count);
+ else
+ r = vhost_add_used_n_in_order(vq, heads, nheads, count);
+
+ if (r < 0)
+ return r;
/* Make sure buffer is written before we update index. */
smp_wmb();
- if (put_user(vq->last_used_idx, &vq->used->idx)) {
+ if (vhost_put_used_idx(vq)) {
vq_err(vq, "Failed to increment used idx");
return -EFAULT;
}
if (unlikely(vq->log_used)) {
+ /* Make sure used idx is seen before log. */
+ smp_wmb();
/* Log used index update. */
- log_write(vq->log_base,
- vq->log_addr + offsetof(struct vring_used, idx),
- sizeof vq->used->idx);
+ log_used(vq, offsetof(struct vring_used, idx),
+ sizeof vq->used->idx);
if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
+ eventfd_signal(vq->log_ctx);
}
return r;
}
@@ -1449,24 +3134,25 @@ EXPORT_SYMBOL_GPL(vhost_add_used_n);
static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
- __u16 old, new, event;
+ __u16 old, new;
+ __virtio16 event;
bool v;
/* Flush out used index updates. This is paired
* with the barrier that the Guest executes when enabling
* interrupts. */
smp_mb();
- if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
+ if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
unlikely(vq->avail_idx == vq->last_avail_idx))
return true;
- if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
- __u16 flags;
- if (__get_user(flags, &vq->avail->flags)) {
+ if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
+ __virtio16 flags;
+ if (vhost_get_avail_flags(vq, &flags)) {
vq_err(vq, "Failed to get flags");
return true;
}
- return !(flags & VRING_AVAIL_F_NO_INTERRUPT);
+ return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
}
old = vq->signalled_used;
v = vq->signalled_used_valid;
@@ -1476,19 +3162,19 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (unlikely(!v))
return true;
- if (get_user(event, vhost_used_event(vq))) {
+ if (vhost_get_used_event(vq, &event)) {
vq_err(vq, "Failed to get used event idx");
return true;
}
- return vring_need_event(event, new, old);
+ return vring_need_event(vhost16_to_cpu(vq, event), new, old);
}
/* This actually signals the guest, using eventfd. */
void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
/* Signal the Guest tell them we used something up. */
- if (vq->call_ctx && vhost_notify(dev, vq))
- eventfd_signal(vq->call_ctx, 1);
+ if (vq->call_ctx.ctx && vhost_notify(dev, vq))
+ eventfd_signal(vq->call_ctx.ctx);
}
EXPORT_SYMBOL_GPL(vhost_signal);
@@ -1505,23 +3191,39 @@ EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
/* multi-buffer version of vhost_add_used_and_signal */
void vhost_add_used_and_signal_n(struct vhost_dev *dev,
struct vhost_virtqueue *vq,
- struct vring_used_elem *heads, unsigned count)
+ struct vring_used_elem *heads,
+ u16 *nheads,
+ unsigned count)
{
- vhost_add_used_n(vq, heads, count);
+ vhost_add_used_n(vq, heads, nheads, count);
vhost_signal(dev, vq);
}
EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
+/* return true if we're sure that available ring is empty */
+bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+{
+ int r;
+
+ if (vq->avail_idx != vq->last_avail_idx)
+ return false;
+
+ r = vhost_get_avail_idx(vq);
+
+ /* Note: we treat error as non-empty here */
+ return r == 0;
+}
+EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
+
/* OK, now we need to know about added descriptors. */
bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
- u16 avail_idx;
int r;
if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
return false;
vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
- if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
+ if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
r = vhost_update_used_flags(vq);
if (r) {
vq_err(vq, "Failed to enable notification at %p: %d\n",
@@ -1529,7 +3231,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
return false;
}
} else {
- r = vhost_update_avail_event(vq, vq->avail_idx);
+ r = vhost_update_avail_event(vq);
if (r) {
vq_err(vq, "Failed to update avail event index at %p: %d\n",
vhost_avail_event(vq), r);
@@ -1539,14 +3241,13 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
/* They could have slipped one in as we were doing that: make
* sure it's written, then check again. */
smp_mb();
- r = __get_user(avail_idx, &vq->avail->idx);
- if (r) {
- vq_err(vq, "Failed to check avail idx at %p: %d\n",
- &vq->avail->idx, r);
+
+ r = vhost_get_avail_idx(vq);
+ /* Note: we treat error as empty here */
+ if (unlikely(r < 0))
return false;
- }
- return avail_idx != vq->avail_idx;
+ return r;
}
EXPORT_SYMBOL_GPL(vhost_enable_notify);
@@ -1558,15 +3259,73 @@ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
return;
vq->used_flags |= VRING_USED_F_NO_NOTIFY;
- if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
+ if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
r = vhost_update_used_flags(vq);
if (r)
- vq_err(vq, "Failed to enable notification at %p: %d\n",
+ vq_err(vq, "Failed to disable notification at %p: %d\n",
&vq->used->flags, r);
}
}
EXPORT_SYMBOL_GPL(vhost_disable_notify);
+/* Create a new message. */
+struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
+{
+ /* Make sure all padding within the structure is initialized. */
+ struct vhost_msg_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return NULL;
+
+ node->vq = vq;
+ node->msg.type = type;
+ return node;
+}
+EXPORT_SYMBOL_GPL(vhost_new_msg);
+
+void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
+ struct vhost_msg_node *node)
+{
+ spin_lock(&dev->iotlb_lock);
+ list_add_tail(&node->node, head);
+ spin_unlock(&dev->iotlb_lock);
+
+ wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
+}
+EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
+
+struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
+ struct list_head *head)
+{
+ struct vhost_msg_node *node = NULL;
+
+ spin_lock(&dev->iotlb_lock);
+ if (!list_empty(head)) {
+ node = list_first_entry(head, struct vhost_msg_node,
+ node);
+ list_del(&node->node);
+ }
+ spin_unlock(&dev->iotlb_lock);
+
+ return node;
+}
+EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
+
+void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
+{
+ struct vhost_virtqueue *vq;
+ int i;
+
+ mutex_lock(&dev->mutex);
+ for (i = 0; i < dev->nvqs; ++i) {
+ vq = dev->vqs[i];
+ mutex_lock(&vq->mutex);
+ vq->acked_backend_features = features;
+ mutex_unlock(&vq->mutex);
+ }
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(vhost_set_backend_features);
+
static int __init vhost_init(void)
{
return 0;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 42298cd23c73..4fe99765c5c7 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _VHOST_H
#define _VHOST_H
@@ -11,65 +12,98 @@
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <linux/atomic.h>
-
-struct vhost_device;
+#include <linux/vhost_iotlb.h>
+#include <linux/irqbypass.h>
+#include <linux/unroll.h>
struct vhost_work;
+struct vhost_task;
typedef void (*vhost_work_fn_t)(struct vhost_work *work);
+#define VHOST_WORK_QUEUED 1
struct vhost_work {
- struct list_head node;
- vhost_work_fn_t fn;
- wait_queue_head_t done;
- int flushing;
- unsigned queue_seq;
- unsigned done_seq;
+ struct llist_node node;
+ vhost_work_fn_t fn;
+ unsigned long flags;
+};
+
+struct vhost_worker;
+struct vhost_dev;
+
+struct vhost_worker_ops {
+ int (*create)(struct vhost_worker *worker, struct vhost_dev *dev,
+ const char *name);
+ void (*stop)(struct vhost_worker *worker);
+ void (*wakeup)(struct vhost_worker *worker);
+};
+
+struct vhost_worker {
+ struct task_struct *kthread_task;
+ struct vhost_task *vtsk;
+ struct vhost_dev *dev;
+ /* Used to serialize device wide flushing with worker swapping. */
+ struct mutex mutex;
+ struct llist_head work_list;
+ u64 kcov_handle;
+ u32 id;
+ int attachment_cnt;
+ bool killed;
+ const struct vhost_worker_ops *ops;
};
/* Poll a file (eventfd or socket) */
/* Note: there's nothing vhost specific about this structure. */
struct vhost_poll {
- poll_table table;
- wait_queue_head_t *wqh;
- wait_queue_t wait;
- struct vhost_work work;
- unsigned long mask;
- struct vhost_dev *dev;
+ poll_table table;
+ wait_queue_head_t *wqh;
+ wait_queue_entry_t wait;
+ struct vhost_work work;
+ __poll_t mask;
+ struct vhost_dev *dev;
+ struct vhost_virtqueue *vq;
};
-void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
-void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
-
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
- unsigned long mask, struct vhost_dev *dev);
+ __poll_t mask, struct vhost_dev *dev,
+ struct vhost_virtqueue *vq);
int vhost_poll_start(struct vhost_poll *poll, struct file *file);
void vhost_poll_stop(struct vhost_poll *poll);
-void vhost_poll_flush(struct vhost_poll *poll);
void vhost_poll_queue(struct vhost_poll *poll);
-void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
-long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
+
+void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
+void vhost_dev_flush(struct vhost_dev *dev);
struct vhost_log {
u64 addr;
u64 len;
};
-struct vhost_virtqueue;
+enum vhost_uaddr_type {
+ VHOST_ADDR_DESC = 0,
+ VHOST_ADDR_AVAIL = 1,
+ VHOST_ADDR_USED = 2,
+ VHOST_NUM_ADDRS = 3,
+};
+
+struct vhost_vring_call {
+ struct eventfd_ctx *ctx;
+ struct irq_bypass_producer producer;
+};
/* The virtqueue structure describes a queue attached to a device. */
struct vhost_virtqueue {
struct vhost_dev *dev;
+ struct vhost_worker __rcu *worker;
/* The actual ring of buffers. */
struct mutex mutex;
unsigned int num;
- struct vring_desc __user *desc;
- struct vring_avail __user *avail;
- struct vring_used __user *used;
+ vring_desc_t __user *desc;
+ vring_avail_t __user *avail;
+ vring_used_t __user *used;
+ const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
struct file *kick;
- struct file *call;
- struct file *error;
- struct eventfd_ctx *call_ctx;
+ struct vhost_vring_call call_ctx;
struct eventfd_ctx *error_ctx;
struct eventfd_ctx *log_ctx;
@@ -78,13 +112,19 @@ struct vhost_virtqueue {
/* The routine to call when the Guest pings us, or timeout. */
vhost_work_fn_t handle_kick;
- /* Last available index we saw. */
+ /* Last available index we saw.
+ * Values are limited to 0x7fff, and the high bit is used as
+ * a wrap counter when using VIRTIO_F_RING_PACKED. */
u16 last_avail_idx;
+ /* Next avail ring head when VIRTIO_F_IN_ORDER is negoitated */
+ u16 next_avail_head;
/* Caches available index value from user. */
u16 avail_idx;
- /* Last index we used. */
+ /* Last index we used.
+ * Values are limited to 0x7fff, and the high bit is used as
+ * a wrap counter when using VIRTIO_F_RING_PACKED. */
u16 last_used_idx;
/* Used flags */
@@ -101,92 +141,264 @@ struct vhost_virtqueue {
u64 log_addr;
struct iovec iov[UIO_MAXIOV];
+ struct iovec iotlb_iov[64];
struct iovec *indirect;
struct vring_used_elem *heads;
- /* We use a kind of RCU to access private pointer.
- * All readers access it from worker, which makes it possible to
- * flush the vhost_work instead of synchronize_rcu. Therefore readers do
- * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
- * vhost_work execution acts instead of rcu_read_lock() and the end of
- * vhost_work execution acts instead of rcu_read_unlock().
- * Writers use virtqueue mutex. */
- void __rcu *private_data;
+ u16 *nheads;
+ /* Protected by virtqueue mutex. */
+ struct vhost_iotlb *umem;
+ struct vhost_iotlb *iotlb;
+ void *private_data;
+ VIRTIO_DECLARE_FEATURES(acked_features);
+ u64 acked_backend_features;
/* Log write descriptors */
void __user *log_base;
struct vhost_log *log;
+ struct iovec log_iov[64];
+
+ /* Ring endianness. Defaults to legacy native endianness.
+ * Set to true when starting a modern virtio device. */
+ bool is_le;
+#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
+ /* Ring endianness requested by userspace for cross-endian support. */
+ bool user_be;
+#endif
+ u32 busyloop_timeout;
+};
+
+struct vhost_msg_node {
+ union {
+ struct vhost_msg msg;
+ struct vhost_msg_v2 msg_v2;
+ };
+ struct vhost_virtqueue *vq;
+ struct list_head node;
};
struct vhost_dev {
- /* Readers use RCU to access memory table pointer
- * log base pointer and features.
- * Writers use mutex below.*/
- struct vhost_memory __rcu *memory;
struct mm_struct *mm;
struct mutex mutex;
- unsigned acked_features;
struct vhost_virtqueue **vqs;
int nvqs;
- struct file *log_file;
struct eventfd_ctx *log_ctx;
- spinlock_t work_lock;
- struct list_head work_list;
- struct task_struct *worker;
+ struct vhost_iotlb *umem;
+ struct vhost_iotlb *iotlb;
+ spinlock_t iotlb_lock;
+ struct list_head read_list;
+ struct list_head pending_list;
+ wait_queue_head_t wait;
+ int iov_limit;
+ int weight;
+ int byte_weight;
+ struct xarray worker_xa;
+ bool use_worker;
+ /*
+ * If fork_owner is true we use vhost_tasks to create
+ * the worker so all settings/limits like cgroups, NPROC,
+ * scheduler, etc are inherited from the owner. If false,
+ * we use kthreads and only attach to the same cgroups
+ * as the owner for compat with older kernels.
+ * here we use true as default value.
+ * The default value is set by fork_from_owner_default
+ */
+ bool fork_owner;
+ int (*msg_handler)(struct vhost_dev *dev, u32 asid,
+ struct vhost_iotlb_msg *msg);
};
-long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
+bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
+void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
+ int nvqs, int iov_limit, int weight, int byte_weight,
+ bool use_worker,
+ int (*msg_handler)(struct vhost_dev *dev, u32 asid,
+ struct vhost_iotlb_msg *msg));
long vhost_dev_set_owner(struct vhost_dev *dev);
bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);
-struct vhost_memory *vhost_dev_reset_owner_prepare(void);
-void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *);
-void vhost_dev_cleanup(struct vhost_dev *, bool locked);
+struct vhost_iotlb *vhost_dev_reset_owner_prepare(void);
+void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
+void vhost_dev_cleanup(struct vhost_dev *);
void vhost_dev_stop(struct vhost_dev *);
long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
-long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
-int vhost_vq_access_ok(struct vhost_virtqueue *vq);
-int vhost_log_access_ok(struct vhost_dev *);
+long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
+long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl,
+ void __user *argp);
+bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
+bool vhost_log_access_ok(struct vhost_dev *);
+void vhost_clear_msg(struct vhost_dev *dev);
-int vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
- struct iovec iov[], unsigned int iov_count,
+int vhost_get_vq_desc(struct vhost_virtqueue *,
+ struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num);
-void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
-int vhost_init_used(struct vhost_virtqueue *);
+int vhost_get_vq_desc_n(struct vhost_virtqueue *vq,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num,
+ unsigned int *ndesc);
+
+void vhost_discard_vq_desc(struct vhost_virtqueue *, int nbuf,
+ unsigned int ndesc);
+
+bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work);
+bool vhost_vq_has_work(struct vhost_virtqueue *vq);
+bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
+int vhost_vq_init_access(struct vhost_virtqueue *);
int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
- unsigned count);
+ u16 *nheads, unsigned count);
void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
unsigned int id, int len);
void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
- struct vring_used_elem *heads, unsigned count);
+ struct vring_used_elem *heads, u16 *nheads,
+ unsigned count);
void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
+bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
- unsigned int log_num, u64 len);
+ unsigned int log_num, u64 len,
+ struct iovec *iov, int count);
+int vq_meta_prefetch(struct vhost_virtqueue *vq);
+
+struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
+void vhost_enqueue_msg(struct vhost_dev *dev,
+ struct list_head *head,
+ struct vhost_msg_node *node);
+struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
+ struct list_head *head);
+void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
+
+__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
+ poll_table *wait);
+ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
+ int noblock);
+ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
+ struct iov_iter *from);
+int vhost_init_device_iotlb(struct vhost_dev *d);
+
+void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
+ struct vhost_iotlb_map *map);
#define vq_err(vq, fmt, ...) do { \
pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
if ((vq)->error_ctx) \
- eventfd_signal((vq)->error_ctx, 1);\
+ eventfd_signal((vq)->error_ctx);\
} while (0)
-enum {
- VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
- (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
- (1ULL << VIRTIO_RING_F_EVENT_IDX) |
- (1ULL << VHOST_F_LOG_ALL),
-};
+#define VHOST_FEATURES \
+ VIRTIO_F_NOTIFY_ON_EMPTY, \
+ VIRTIO_RING_F_INDIRECT_DESC, \
+ VIRTIO_RING_F_EVENT_IDX, \
+ VHOST_F_LOG_ALL, \
+ VIRTIO_F_ANY_LAYOUT, \
+ VIRTIO_F_VERSION_1
+
+static inline u64 vhost_features_u64(const int *features, int size, int idx)
+{
+ u64 res = 0;
+
+ unrolled_count(VIRTIO_FEATURES_BITS)
+ for (int i = 0; i < size; ++i) {
+ int bit = features[i];
+
+ if (virtio_features_chk_bit(bit) && VIRTIO_U64(bit) == idx)
+ res |= VIRTIO_BIT(bit);
+ }
+ return res;
+}
-static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
+#define VHOST_FEATURES_U64(features, idx) \
+ vhost_features_u64(features, ARRAY_SIZE(features), idx)
+
+#define DEFINE_VHOST_FEATURES_ARRAY_ENTRY(idx, features) \
+ [idx] = VHOST_FEATURES_U64(features, idx),
+
+#define DEFINE_VHOST_FEATURES_ARRAY(array, features) \
+ u64 array[VIRTIO_FEATURES_U64S] = { \
+ UNROLL(VIRTIO_FEATURES_U64S, \
+ DEFINE_VHOST_FEATURES_ARRAY_ENTRY, features) \
+ }
+
+/**
+ * vhost_vq_set_backend - Set backend.
+ *
+ * @vq Virtqueue.
+ * @private_data The private data.
+ *
+ * Context: Need to call with vq->mutex acquired.
+ */
+static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
+ void *private_data)
{
- unsigned acked_features;
+ vq->private_data = private_data;
+}
- /* TODO: check that we are running from vhost_worker or dev mutex is
- * held? */
- acked_features = rcu_dereference_index_check(dev->acked_features, 1);
- return acked_features & (1 << bit);
+/**
+ * vhost_vq_get_backend - Get backend.
+ *
+ * @vq Virtqueue.
+ *
+ * Context: Need to call with vq->mutex acquired.
+ * Return: Private data previously set with vhost_vq_set_backend.
+ */
+static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
+{
+ return vq->private_data;
+}
+
+static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
+{
+ return virtio_features_test_bit(vq->acked_features_array, bit);
+}
+
+static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
+{
+ return vq->acked_backend_features & (1ULL << bit);
+}
+
+#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
+static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
+{
+ return vq->is_le;
+}
+#else
+static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
+{
+ return virtio_legacy_is_little_endian() || vq->is_le;
+}
+#endif
+
+/* Memory accessors */
+static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
+{
+ return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
+}
+
+static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
+{
+ return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
+}
+
+static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
+{
+ return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
+}
+
+static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
+{
+ return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
+}
+
+static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
+{
+ return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
+}
+
+static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
+{
+ return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
}
#endif
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 5174ebac288d..925858cc6096 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -1,8 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Helpers for the host side of a virtio ring.
*
* Since these may be in userspace, we use (inline) accessors.
*/
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/vringh.h>
#include <linux/virtio_ring.h>
@@ -11,6 +13,12 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/export.h>
+#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
+#include <linux/bvec.h>
+#include <linux/highmem.h>
+#include <linux/vhost_iotlb.h>
+#endif
+#include <uapi/linux/virtio_config.h>
static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
{
@@ -28,13 +36,14 @@ static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
/* Returns vring->num if empty, -ve on error. */
static inline int __vringh_get_head(const struct vringh *vrh,
- int (*getu16)(u16 *val, const u16 *p),
+ int (*getu16)(const struct vringh *vrh,
+ u16 *val, const __virtio16 *p),
u16 *last_avail_idx)
{
u16 avail_idx, i, head;
int err;
- err = getu16(&avail_idx, &vrh->vring.avail->idx);
+ err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx);
if (err) {
vringh_bad("Failed to access avail idx at %p",
&vrh->vring.avail->idx);
@@ -49,7 +58,7 @@ static inline int __vringh_get_head(const struct vringh *vrh,
i = *last_avail_idx & (vrh->vring.num - 1);
- err = getu16(&head, &vrh->vring.avail->ring[i]);
+ err = getu16(vrh, &head, &vrh->vring.avail->ring[i]);
if (err) {
vringh_bad("Failed to read head: idx %d address %p",
*last_avail_idx, &vrh->vring.avail->ring[i]);
@@ -66,10 +75,40 @@ static inline int __vringh_get_head(const struct vringh *vrh,
return head;
}
+/**
+ * vringh_kiov_advance - skip bytes from vring_kiov
+ * @iov: an iov passed to vringh_getdesc_*() (updated as we consume)
+ * @len: the maximum length to advance
+ */
+void vringh_kiov_advance(struct vringh_kiov *iov, size_t len)
+{
+ while (len && iov->i < iov->used) {
+ size_t partlen = min(iov->iov[iov->i].iov_len, len);
+
+ iov->consumed += partlen;
+ iov->iov[iov->i].iov_len -= partlen;
+ iov->iov[iov->i].iov_base += partlen;
+
+ if (!iov->iov[iov->i].iov_len) {
+ /* Fix up old iov element then increment. */
+ iov->iov[iov->i].iov_len = iov->consumed;
+ iov->iov[iov->i].iov_base -= iov->consumed;
+
+ iov->consumed = 0;
+ iov->i++;
+ }
+
+ len -= partlen;
+ }
+}
+EXPORT_SYMBOL(vringh_kiov_advance);
+
/* Copy some bytes to/from the iovec. Returns num copied. */
-static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov,
+static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
+ struct vringh_kiov *iov,
void *ptr, size_t len,
- int (*xfer)(void *addr, void *ptr,
+ int (*xfer)(const struct vringh *vrh,
+ void *addr, void *ptr,
size_t len))
{
int err, done = 0;
@@ -78,7 +117,7 @@ static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov,
size_t partlen;
partlen = min(iov->iov[iov->i].iov_len, len);
- err = xfer(iov->iov[iov->i].iov_base, ptr, partlen);
+ err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen);
if (err)
return err;
done += partlen;
@@ -92,7 +131,7 @@ static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov,
/* Fix up old iov element then increment. */
iov->iov[iov->i].iov_len = iov->consumed;
iov->iov[iov->i].iov_base -= iov->consumed;
-
+
iov->consumed = 0;
iov->i++;
}
@@ -144,28 +183,32 @@ static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len,
}
/* No reason for this code to be inline. */
-static int move_to_indirect(int *up_next, u16 *i, void *addr,
+static int move_to_indirect(const struct vringh *vrh,
+ int *up_next, u16 *i, void *addr,
const struct vring_desc *desc,
struct vring_desc **descs, int *desc_max)
{
+ u32 len;
+
/* Indirect tables can't have indirect. */
if (*up_next != -1) {
vringh_bad("Multilevel indirect %u->%u", *up_next, *i);
return -EINVAL;
}
- if (unlikely(desc->len % sizeof(struct vring_desc))) {
+ len = vringh32_to_cpu(vrh, desc->len);
+ if (unlikely(len % sizeof(struct vring_desc))) {
vringh_bad("Strange indirect len %u", desc->len);
return -EINVAL;
}
/* We will check this when we follow it! */
- if (desc->flags & VRING_DESC_F_NEXT)
- *up_next = desc->next;
+ if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT))
+ *up_next = vringh16_to_cpu(vrh, desc->next);
else
*up_next = -2;
*descs = addr;
- *desc_max = desc->len / sizeof(struct vring_desc);
+ *desc_max = len / sizeof(struct vring_desc);
/* Now, start at the first indirect. */
*i = 0;
@@ -182,9 +225,9 @@ static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp)
flag = (iov->max_num & VRINGH_IOV_ALLOCATED);
if (flag)
- new = krealloc(iov->iov, new_num * sizeof(struct iovec), gfp);
+ new = krealloc_array(iov->iov, new_num, sizeof(*new), gfp);
else {
- new = kmalloc(new_num * sizeof(struct iovec), gfp);
+ new = kmalloc_array(new_num, sizeof(*new), gfp);
if (new) {
memcpy(new, iov->iov,
iov->max_num * sizeof(struct iovec));
@@ -219,7 +262,8 @@ static int slow_copy(struct vringh *vrh, void *dst, const void *src,
u64 addr,
struct vringh_range *r),
struct vringh_range *range,
- int (*copy)(void *dst, const void *src, size_t len))
+ int (*copy)(const struct vringh *vrh,
+ void *dst, const void *src, size_t len))
{
size_t part, len = sizeof(struct vring_desc);
@@ -233,7 +277,7 @@ static int slow_copy(struct vringh *vrh, void *dst, const void *src,
if (!rcheck(vrh, addr, &part, range, getrange))
return -EINVAL;
- err = copy(dst, src, part);
+ err = copy(vrh, dst, src, part);
if (err)
return err;
@@ -254,9 +298,10 @@ __vringh_iov(struct vringh *vrh, u16 i,
struct vringh_range *)),
bool (*getrange)(struct vringh *, u64, struct vringh_range *),
gfp_t gfp,
- int (*copy)(void *dst, const void *src, size_t len))
+ int (*copy)(const struct vringh *vrh,
+ void *dst, const void *src, size_t len))
{
- int err, count = 0, up_next, desc_max;
+ int err, count = 0, indirect_count = 0, up_next, desc_max;
struct vring_desc desc, *descs;
struct vringh_range range = { -1ULL, 0 }, slowrange;
bool slow = false;
@@ -266,13 +311,14 @@ __vringh_iov(struct vringh *vrh, u16 i,
desc_max = vrh->vring.num;
up_next = -1;
+ /* You must want something! */
+ if (WARN_ON(!riov && !wiov))
+ return -EINVAL;
+
if (riov)
- riov->i = riov->used = 0;
- else if (wiov)
- wiov->i = wiov->used = 0;
- else
- /* You must want something! */
- BUG();
+ riov->i = riov->used = riov->consumed = 0;
+ if (wiov)
+ wiov->i = wiov->used = wiov->consumed = 0;
for (;;) {
void *addr;
@@ -283,43 +329,51 @@ __vringh_iov(struct vringh *vrh, u16 i,
err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange,
&slowrange, copy);
else
- err = copy(&desc, &descs[i], sizeof(desc));
+ err = copy(vrh, &desc, &descs[i], sizeof(desc));
if (unlikely(err))
goto fail;
- if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
+ if (unlikely(desc.flags &
+ cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) {
+ u64 a = vringh64_to_cpu(vrh, desc.addr);
+
/* Make sure it's OK, and get offset. */
- len = desc.len;
- if (!rcheck(vrh, desc.addr, &len, &range, getrange)) {
+ len = vringh32_to_cpu(vrh, desc.len);
+ if (!rcheck(vrh, a, &len, &range, getrange)) {
err = -EINVAL;
goto fail;
}
- if (unlikely(len != desc.len)) {
+ if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
slow = true;
/* We need to save this range to use offset */
slowrange = range;
}
- addr = (void *)(long)(desc.addr + range.offset);
- err = move_to_indirect(&up_next, &i, addr, &desc,
+ addr = (void *)(long)(a + range.offset);
+ err = move_to_indirect(vrh, &up_next, &i, addr, &desc,
&descs, &desc_max);
if (err)
goto fail;
continue;
}
- if (count++ == vrh->vring.num) {
+ if (up_next == -1)
+ count++;
+ else
+ indirect_count++;
+
+ if (count > vrh->vring.num || indirect_count > desc_max) {
vringh_bad("Descriptor loop in %p", descs);
err = -ELOOP;
goto fail;
}
- if (desc.flags & VRING_DESC_F_WRITE)
+ if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE))
iov = wiov;
else {
iov = riov;
- if (unlikely(wiov && wiov->i)) {
+ if (unlikely(wiov && wiov->used)) {
vringh_bad("Readable desc %p after writable",
&descs[i]);
err = -EINVAL;
@@ -336,12 +390,14 @@ __vringh_iov(struct vringh *vrh, u16 i,
again:
/* Make sure it's OK, and get offset. */
- len = desc.len;
- if (!rcheck(vrh, desc.addr, &len, &range, getrange)) {
+ len = vringh32_to_cpu(vrh, desc.len);
+ if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range,
+ getrange)) {
err = -EINVAL;
goto fail;
}
- addr = (void *)(unsigned long)(desc.addr + range.offset);
+ addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) +
+ range.offset);
if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) {
err = resize_iovec(iov, gfp);
@@ -353,20 +409,23 @@ __vringh_iov(struct vringh *vrh, u16 i,
iov->iov[iov->used].iov_len = len;
iov->used++;
- if (unlikely(len != desc.len)) {
- desc.len -= len;
- desc.addr += len;
+ if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
+ desc.len = cpu_to_vringh32(vrh,
+ vringh32_to_cpu(vrh, desc.len) - len);
+ desc.addr = cpu_to_vringh64(vrh,
+ vringh64_to_cpu(vrh, desc.addr) + len);
goto again;
}
- if (desc.flags & VRING_DESC_F_NEXT) {
- i = desc.next;
+ if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) {
+ i = vringh16_to_cpu(vrh, desc.next);
} else {
/* Just in case we need to finish traversing above. */
if (unlikely(up_next > 0)) {
i = return_from_indirect(vrh, &up_next,
&descs, &desc_max);
slow = false;
+ indirect_count = 0;
} else
break;
}
@@ -387,8 +446,10 @@ fail:
static inline int __vringh_complete(struct vringh *vrh,
const struct vring_used_elem *used,
unsigned int num_used,
- int (*putu16)(u16 *p, u16 val),
- int (*putused)(struct vring_used_elem *dst,
+ int (*putu16)(const struct vringh *vrh,
+ __virtio16 *p, u16 val),
+ int (*putused)(const struct vringh *vrh,
+ struct vring_used_elem *dst,
const struct vring_used_elem
*src, unsigned num))
{
@@ -404,12 +465,12 @@ static inline int __vringh_complete(struct vringh *vrh,
/* Compiler knows num_used == 1 sometimes, hence extra check */
if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) {
u16 part = vrh->vring.num - off;
- err = putused(&used_ring->ring[off], used, part);
+ err = putused(vrh, &used_ring->ring[off], used, part);
if (!err)
- err = putused(&used_ring->ring[0], used + part,
+ err = putused(vrh, &used_ring->ring[0], used + part,
num_used - part);
} else
- err = putused(&used_ring->ring[off], used, num_used);
+ err = putused(vrh, &used_ring->ring[off], used, num_used);
if (err) {
vringh_bad("Failed to write %u used entries %u at %p",
@@ -420,7 +481,7 @@ static inline int __vringh_complete(struct vringh *vrh,
/* Make sure buffer is written before we update index. */
virtio_wmb(vrh->weak_barriers);
- err = putu16(&vrh->vring.used->idx, used_idx + num_used);
+ err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used);
if (err) {
vringh_bad("Failed to update used index at %p",
&vrh->vring.used->idx);
@@ -433,7 +494,9 @@ static inline int __vringh_complete(struct vringh *vrh,
static inline int __vringh_need_notify(struct vringh *vrh,
- int (*getu16)(u16 *val, const u16 *p))
+ int (*getu16)(const struct vringh *vrh,
+ u16 *val,
+ const __virtio16 *p))
{
bool notify;
u16 used_event;
@@ -447,7 +510,7 @@ static inline int __vringh_need_notify(struct vringh *vrh,
/* Old-style, without event indices. */
if (!vrh->event_indices) {
u16 flags;
- err = getu16(&flags, &vrh->vring.avail->flags);
+ err = getu16(vrh, &flags, &vrh->vring.avail->flags);
if (err) {
vringh_bad("Failed to get flags at %p",
&vrh->vring.avail->flags);
@@ -457,7 +520,7 @@ static inline int __vringh_need_notify(struct vringh *vrh,
}
/* Modern: we know when other side wants to know. */
- err = getu16(&used_event, &vring_used_event(&vrh->vring));
+ err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring));
if (err) {
vringh_bad("Failed to get used event idx at %p",
&vring_used_event(&vrh->vring));
@@ -478,20 +541,22 @@ static inline int __vringh_need_notify(struct vringh *vrh,
}
static inline bool __vringh_notify_enable(struct vringh *vrh,
- int (*getu16)(u16 *val, const u16 *p),
- int (*putu16)(u16 *p, u16 val))
+ int (*getu16)(const struct vringh *vrh,
+ u16 *val, const __virtio16 *p),
+ int (*putu16)(const struct vringh *vrh,
+ __virtio16 *p, u16 val))
{
u16 avail;
if (!vrh->event_indices) {
/* Old-school; update flags. */
- if (putu16(&vrh->vring.used->flags, 0) != 0) {
+ if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) {
vringh_bad("Clearing used flags %p",
&vrh->vring.used->flags);
return true;
}
} else {
- if (putu16(&vring_avail_event(&vrh->vring),
+ if (putu16(vrh, &vring_avail_event(&vrh->vring),
vrh->last_avail_idx) != 0) {
vringh_bad("Updating avail event index %p",
&vring_avail_event(&vrh->vring));
@@ -503,7 +568,7 @@ static inline bool __vringh_notify_enable(struct vringh *vrh,
* sure it's written, then check again. */
virtio_mb(vrh->weak_barriers);
- if (getu16(&avail, &vrh->vring.avail->idx) != 0) {
+ if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) {
vringh_bad("Failed to check avail idx at %p",
&vrh->vring.avail->idx);
return true;
@@ -516,11 +581,13 @@ static inline bool __vringh_notify_enable(struct vringh *vrh,
}
static inline void __vringh_notify_disable(struct vringh *vrh,
- int (*putu16)(u16 *p, u16 val))
+ int (*putu16)(const struct vringh *vrh,
+ __virtio16 *p, u16 val))
{
if (!vrh->event_indices) {
/* Old-school; update flags. */
- if (putu16(&vrh->vring.used->flags, VRING_USED_F_NO_NOTIFY)) {
+ if (putu16(vrh, &vrh->vring.used->flags,
+ VRING_USED_F_NO_NOTIFY)) {
vringh_bad("Setting used flags %p",
&vrh->vring.used->flags);
}
@@ -528,23 +595,29 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
}
/* Userspace access helpers: in this case, addresses are really userspace. */
-static inline int getu16_user(u16 *val, const u16 *p)
+static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
{
- return get_user(*val, (__force u16 __user *)p);
+ __virtio16 v = 0;
+ int rc = get_user(v, (__force __virtio16 __user *)p);
+ *val = vringh16_to_cpu(vrh, v);
+ return rc;
}
-static inline int putu16_user(u16 *p, u16 val)
+static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
{
- return put_user(val, (__force u16 __user *)p);
+ __virtio16 v = cpu_to_vringh16(vrh, val);
+ return put_user(v, (__force __virtio16 __user *)p);
}
-static inline int copydesc_user(void *dst, const void *src, size_t len)
+static inline int copydesc_user(const struct vringh *vrh,
+ void *dst, const void *src, size_t len)
{
return copy_from_user(dst, (__force void __user *)src, len) ?
-EFAULT : 0;
}
-static inline int putused_user(struct vring_used_elem *dst,
+static inline int putused_user(const struct vringh *vrh,
+ struct vring_used_elem *dst,
const struct vring_used_elem *src,
unsigned int num)
{
@@ -552,13 +625,15 @@ static inline int putused_user(struct vring_used_elem *dst,
sizeof(*dst) * num) ? -EFAULT : 0;
}
-static inline int xfer_from_user(void *src, void *dst, size_t len)
+static inline int xfer_from_user(const struct vringh *vrh, void *src,
+ void *dst, size_t len)
{
return copy_from_user(dst, (__force void __user *)src, len) ?
-EFAULT : 0;
}
-static inline int xfer_to_user(void *dst, void *src, size_t len)
+static inline int xfer_to_user(const struct vringh *vrh,
+ void *dst, void *src, size_t len)
{
return copy_to_user((__force void __user *)dst, src, len) ?
-EFAULT : 0;
@@ -570,18 +645,18 @@ static inline int xfer_to_user(void *dst, void *src, size_t len)
* @features: the feature bits for this ring.
* @num: the number of elements.
* @weak_barriers: true if we only need memory barriers, not I/O.
- * @desc: the userpace descriptor pointer.
- * @avail: the userpace avail pointer.
- * @used: the userpace used pointer.
+ * @desc: the userspace descriptor pointer.
+ * @avail: the userspace avail pointer.
+ * @used: the userspace used pointer.
*
* Returns an error if num is invalid: you should check pointers
* yourself!
*/
-int vringh_init_user(struct vringh *vrh, u32 features,
+int vringh_init_user(struct vringh *vrh, u64 features,
unsigned int num, bool weak_barriers,
- struct vring_desc __user *desc,
- struct vring_avail __user *avail,
- struct vring_used __user *used)
+ vring_desc_t __user *desc,
+ vring_avail_t __user *avail,
+ vring_used_t __user *used)
{
/* Sane power of 2 please! */
if (!num || num > 0xffff || (num & (num - 1))) {
@@ -589,6 +664,7 @@ int vringh_init_user(struct vringh *vrh, u32 features,
return -EINVAL;
}
+ vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
vrh->weak_barriers = weak_barriers;
vrh->completed = 0;
@@ -618,7 +694,10 @@ EXPORT_SYMBOL(vringh_init_user);
* *head will be vrh->vring.num. You may be able to ignore an invalid
* descriptor, but there's not much you can do with an invalid ring.
*
- * Note that you may need to clean up riov and wiov, even on error!
+ * Note that you can reuse riov and wiov with subsequent calls. Content is
+ * overwritten and memory reallocated if more space is needed.
+ * When you don't have to use riov and wiov anymore, you should clean up them
+ * calling vringh_iov_cleanup() to release the memory, even on error!
*/
int vringh_getdesc_user(struct vringh *vrh,
struct vringh_iov *riov,
@@ -679,7 +758,7 @@ EXPORT_SYMBOL(vringh_getdesc_user);
*/
ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len)
{
- return vringh_iov_xfer((struct vringh_kiov *)riov,
+ return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov,
dst, len, xfer_from_user);
}
EXPORT_SYMBOL(vringh_iov_pull_user);
@@ -687,7 +766,7 @@ EXPORT_SYMBOL(vringh_iov_pull_user);
/**
* vringh_iov_push_user - copy bytes into vring_iov.
* @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume)
- * @dst: the place to copy.
+ * @src: the place to copy from.
* @len: the maximum length to copy.
*
* Returns the bytes copied <= len or a negative errno.
@@ -695,28 +774,12 @@ EXPORT_SYMBOL(vringh_iov_pull_user);
ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
const void *src, size_t len)
{
- return vringh_iov_xfer((struct vringh_kiov *)wiov,
+ return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov,
(void *)src, len, xfer_to_user);
}
EXPORT_SYMBOL(vringh_iov_push_user);
/**
- * vringh_abandon_user - we've decided not to handle the descriptor(s).
- * @vrh: the vring.
- * @num: the number of descriptors to put back (ie. num
- * vringh_get_user() to undo).
- *
- * The next vringh_get_user() will return the old descriptor(s) again.
- */
-void vringh_abandon_user(struct vringh *vrh, unsigned int num)
-{
- /* We only update vring_avail_event(vr) when we want to be notified,
- * so we haven't changed that yet. */
- vrh->last_avail_idx -= num;
-}
-EXPORT_SYMBOL(vringh_abandon_user);
-
-/**
* vringh_complete_user - we've finished with descriptor, publish it.
* @vrh: the vring.
* @head: the head as filled in by vringh_getdesc_user.
@@ -729,8 +792,8 @@ int vringh_complete_user(struct vringh *vrh, u16 head, u32 len)
{
struct vring_used_elem used;
- used.id = head;
- used.len = len;
+ used.id = cpu_to_vringh32(vrh, head);
+ used.len = cpu_to_vringh32(vrh, len);
return __vringh_complete(vrh, &used, 1, putu16_user, putused_user);
}
EXPORT_SYMBOL(vringh_complete_user);
@@ -792,25 +855,28 @@ int vringh_need_notify_user(struct vringh *vrh)
EXPORT_SYMBOL(vringh_need_notify_user);
/* Kernelspace access helpers. */
-static inline int getu16_kern(u16 *val, const u16 *p)
+static inline int getu16_kern(const struct vringh *vrh,
+ u16 *val, const __virtio16 *p)
{
- *val = ACCESS_ONCE(*p);
+ *val = vringh16_to_cpu(vrh, READ_ONCE(*p));
return 0;
}
-static inline int putu16_kern(u16 *p, u16 val)
+static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
{
- ACCESS_ONCE(*p) = val;
+ WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
return 0;
}
-static inline int copydesc_kern(void *dst, const void *src, size_t len)
+static inline int copydesc_kern(const struct vringh *vrh,
+ void *dst, const void *src, size_t len)
{
memcpy(dst, src, len);
return 0;
}
-static inline int putused_kern(struct vring_used_elem *dst,
+static inline int putused_kern(const struct vringh *vrh,
+ struct vring_used_elem *dst,
const struct vring_used_elem *src,
unsigned int num)
{
@@ -818,25 +884,19 @@ static inline int putused_kern(struct vring_used_elem *dst,
return 0;
}
-static inline int xfer_kern(void *src, void *dst, size_t len)
-{
- memcpy(dst, src, len);
- return 0;
-}
-
/**
* vringh_init_kern - initialize a vringh for a kernelspace vring.
* @vrh: the vringh to initialize.
* @features: the feature bits for this ring.
* @num: the number of elements.
* @weak_barriers: true if we only need memory barriers, not I/O.
- * @desc: the userpace descriptor pointer.
- * @avail: the userpace avail pointer.
- * @used: the userpace used pointer.
+ * @desc: the userspace descriptor pointer.
+ * @avail: the userspace avail pointer.
+ * @used: the userspace used pointer.
*
* Returns an error if num is invalid.
*/
-int vringh_init_kern(struct vringh *vrh, u32 features,
+int vringh_init_kern(struct vringh *vrh, u64 features,
unsigned int num, bool weak_barriers,
struct vring_desc *desc,
struct vring_avail *avail,
@@ -848,6 +908,7 @@ int vringh_init_kern(struct vringh *vrh, u32 features,
return -EINVAL;
}
+ vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
vrh->weak_barriers = weak_barriers;
vrh->completed = 0;
@@ -876,7 +937,10 @@ EXPORT_SYMBOL(vringh_init_kern);
* *head will be vrh->vring.num. You may be able to ignore an invalid
* descriptor, but there's not much you can do with an invalid ring.
*
- * Note that you may need to clean up riov and wiov, even on error!
+ * Note that you can reuse riov and wiov with subsequent calls. Content is
+ * overwritten and memory reallocated if more space is needed.
+ * When you don't have to use riov and wiov anymore, you should clean up them
+ * calling vringh_kiov_cleanup() to release the memory, even on error!
*/
int vringh_getdesc_kern(struct vringh *vrh,
struct vringh_kiov *riov,
@@ -905,51 +969,6 @@ int vringh_getdesc_kern(struct vringh *vrh,
EXPORT_SYMBOL(vringh_getdesc_kern);
/**
- * vringh_iov_pull_kern - copy bytes from vring_iov.
- * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume)
- * @dst: the place to copy.
- * @len: the maximum length to copy.
- *
- * Returns the bytes copied <= len or a negative errno.
- */
-ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len)
-{
- return vringh_iov_xfer(riov, dst, len, xfer_kern);
-}
-EXPORT_SYMBOL(vringh_iov_pull_kern);
-
-/**
- * vringh_iov_push_kern - copy bytes into vring_iov.
- * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume)
- * @dst: the place to copy.
- * @len: the maximum length to copy.
- *
- * Returns the bytes copied <= len or a negative errno.
- */
-ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
- const void *src, size_t len)
-{
- return vringh_iov_xfer(wiov, (void *)src, len, xfer_kern);
-}
-EXPORT_SYMBOL(vringh_iov_push_kern);
-
-/**
- * vringh_abandon_kern - we've decided not to handle the descriptor(s).
- * @vrh: the vring.
- * @num: the number of descriptors to put back (ie. num
- * vringh_get_kern() to undo).
- *
- * The next vringh_get_kern() will return the old descriptor(s) again.
- */
-void vringh_abandon_kern(struct vringh *vrh, unsigned int num)
-{
- /* We only update vring_avail_event(vr) when we want to be notified,
- * so we haven't changed that yet. */
- vrh->last_avail_idx -= num;
-}
-EXPORT_SYMBOL(vringh_abandon_kern);
-
-/**
* vringh_complete_kern - we've finished with descriptor, publish it.
* @vrh: the vring.
* @head: the head as filled in by vringh_getdesc_kern.
@@ -962,8 +981,8 @@ int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len)
{
struct vring_used_elem used;
- used.id = head;
- used.len = len;
+ used.id = cpu_to_vringh32(vrh, head);
+ used.len = cpu_to_vringh32(vrh, len);
return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern);
}
@@ -1007,4 +1026,474 @@ int vringh_need_notify_kern(struct vringh *vrh)
}
EXPORT_SYMBOL(vringh_need_notify_kern);
+#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
+
+struct iotlb_vec {
+ union {
+ struct iovec *iovec;
+ struct bio_vec *bvec;
+ } iov;
+ size_t count;
+};
+
+static int iotlb_translate(const struct vringh *vrh,
+ u64 addr, u64 len, u64 *translated,
+ struct iotlb_vec *ivec, u32 perm)
+{
+ struct vhost_iotlb_map *map;
+ struct vhost_iotlb *iotlb = vrh->iotlb;
+ int ret = 0;
+ u64 s = 0, last = addr + len - 1;
+
+ spin_lock(vrh->iotlb_lock);
+
+ while (len > s) {
+ uintptr_t io_addr;
+ size_t io_len;
+ u64 size;
+
+ if (unlikely(ret >= ivec->count)) {
+ ret = -ENOBUFS;
+ break;
+ }
+
+ map = vhost_iotlb_itree_first(iotlb, addr, last);
+ if (!map || map->start > addr) {
+ ret = -EINVAL;
+ break;
+ } else if (!(map->perm & perm)) {
+ ret = -EPERM;
+ break;
+ }
+
+ size = map->size - addr + map->start;
+ io_len = min(len - s, size);
+ io_addr = map->addr - map->start + addr;
+
+ if (vrh->use_va) {
+ struct iovec *iovec = ivec->iov.iovec;
+
+ iovec[ret].iov_len = io_len;
+ iovec[ret].iov_base = (void __user *)io_addr;
+ } else {
+ u64 pfn = io_addr >> PAGE_SHIFT;
+ struct bio_vec *bvec = ivec->iov.bvec;
+
+ bvec_set_page(&bvec[ret], pfn_to_page(pfn), io_len,
+ io_addr & (PAGE_SIZE - 1));
+ }
+
+ s += size;
+ addr += size;
+ ++ret;
+ }
+
+ spin_unlock(vrh->iotlb_lock);
+
+ if (translated)
+ *translated = min(len, s);
+
+ return ret;
+}
+
+#define IOTLB_IOV_STRIDE 16
+
+static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
+ void *src, size_t len)
+{
+ struct iotlb_vec ivec;
+ union {
+ struct iovec iovec[IOTLB_IOV_STRIDE];
+ struct bio_vec bvec[IOTLB_IOV_STRIDE];
+ } iov;
+ u64 total_translated = 0;
+
+ ivec.iov.iovec = iov.iovec;
+ ivec.count = IOTLB_IOV_STRIDE;
+
+ while (total_translated < len) {
+ struct iov_iter iter;
+ u64 translated;
+ int ret;
+ size_t size;
+
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
+ len - total_translated, &translated,
+ &ivec, VHOST_MAP_RO);
+ if (ret == -ENOBUFS)
+ ret = IOTLB_IOV_STRIDE;
+ else if (ret < 0)
+ return ret;
+
+ if (vrh->use_va) {
+ iov_iter_init(&iter, ITER_SOURCE, ivec.iov.iovec, ret,
+ translated);
+ } else {
+ iov_iter_bvec(&iter, ITER_SOURCE, ivec.iov.bvec, ret,
+ translated);
+ }
+
+ size = copy_from_iter(dst, translated, &iter);
+ if (size != translated)
+ return -EFAULT;
+
+ src += translated;
+ dst += translated;
+ total_translated += translated;
+ }
+
+ return total_translated;
+}
+
+static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
+ void *src, size_t len)
+{
+ struct iotlb_vec ivec;
+ union {
+ struct iovec iovec[IOTLB_IOV_STRIDE];
+ struct bio_vec bvec[IOTLB_IOV_STRIDE];
+ } iov;
+ u64 total_translated = 0;
+
+ ivec.iov.iovec = iov.iovec;
+ ivec.count = IOTLB_IOV_STRIDE;
+
+ while (total_translated < len) {
+ struct iov_iter iter;
+ u64 translated;
+ int ret;
+ size_t size;
+
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
+ len - total_translated, &translated,
+ &ivec, VHOST_MAP_WO);
+ if (ret == -ENOBUFS)
+ ret = IOTLB_IOV_STRIDE;
+ else if (ret < 0)
+ return ret;
+
+ if (vrh->use_va) {
+ iov_iter_init(&iter, ITER_DEST, ivec.iov.iovec, ret,
+ translated);
+ } else {
+ iov_iter_bvec(&iter, ITER_DEST, ivec.iov.bvec, ret,
+ translated);
+ }
+
+ size = copy_to_iter(src, translated, &iter);
+ if (size != translated)
+ return -EFAULT;
+
+ src += translated;
+ dst += translated;
+ total_translated += translated;
+ }
+
+ return total_translated;
+}
+
+static inline int getu16_iotlb(const struct vringh *vrh,
+ u16 *val, const __virtio16 *p)
+{
+ struct iotlb_vec ivec;
+ union {
+ struct iovec iovec[1];
+ struct bio_vec bvec[1];
+ } iov;
+ __virtio16 tmp;
+ int ret;
+
+ ivec.iov.iovec = iov.iovec;
+ ivec.count = 1;
+
+ /* Atomic read is needed for getu16 */
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
+ NULL, &ivec, VHOST_MAP_RO);
+ if (ret < 0)
+ return ret;
+
+ if (vrh->use_va) {
+ ret = __get_user(tmp, (__virtio16 __user *)ivec.iov.iovec[0].iov_base);
+ if (ret)
+ return ret;
+ } else {
+ __virtio16 *from = bvec_kmap_local(&ivec.iov.bvec[0]);
+
+ tmp = READ_ONCE(*from);
+ kunmap_local(from);
+ }
+
+ *val = vringh16_to_cpu(vrh, tmp);
+
+ return 0;
+}
+
+static inline int putu16_iotlb(const struct vringh *vrh,
+ __virtio16 *p, u16 val)
+{
+ struct iotlb_vec ivec;
+ union {
+ struct iovec iovec;
+ struct bio_vec bvec;
+ } iov;
+ __virtio16 tmp;
+ int ret;
+
+ ivec.iov.iovec = &iov.iovec;
+ ivec.count = 1;
+
+ /* Atomic write is needed for putu16 */
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
+ NULL, &ivec, VHOST_MAP_RO);
+ if (ret < 0)
+ return ret;
+
+ tmp = cpu_to_vringh16(vrh, val);
+
+ if (vrh->use_va) {
+ ret = __put_user(tmp, (__virtio16 __user *)ivec.iov.iovec[0].iov_base);
+ if (ret)
+ return ret;
+ } else {
+ __virtio16 *to = bvec_kmap_local(&ivec.iov.bvec[0]);
+
+ WRITE_ONCE(*to, tmp);
+ kunmap_local(to);
+ }
+
+ return 0;
+}
+
+static inline int copydesc_iotlb(const struct vringh *vrh,
+ void *dst, const void *src, size_t len)
+{
+ int ret;
+
+ ret = copy_from_iotlb(vrh, dst, (void *)src, len);
+ if (ret != len)
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int xfer_from_iotlb(const struct vringh *vrh, void *src,
+ void *dst, size_t len)
+{
+ int ret;
+
+ ret = copy_from_iotlb(vrh, dst, src, len);
+ if (ret != len)
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int xfer_to_iotlb(const struct vringh *vrh,
+ void *dst, void *src, size_t len)
+{
+ int ret;
+
+ ret = copy_to_iotlb(vrh, dst, src, len);
+ if (ret != len)
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int putused_iotlb(const struct vringh *vrh,
+ struct vring_used_elem *dst,
+ const struct vring_used_elem *src,
+ unsigned int num)
+{
+ int size = num * sizeof(*dst);
+ int ret;
+
+ ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst));
+ if (ret != size)
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * vringh_init_iotlb - initialize a vringh for a ring with IOTLB.
+ * @vrh: the vringh to initialize.
+ * @features: the feature bits for this ring.
+ * @num: the number of elements.
+ * @weak_barriers: true if we only need memory barriers, not I/O.
+ * @desc: the userspace descriptor pointer.
+ * @avail: the userspace avail pointer.
+ * @used: the userspace used pointer.
+ *
+ * Returns an error if num is invalid.
+ */
+int vringh_init_iotlb(struct vringh *vrh, u64 features,
+ unsigned int num, bool weak_barriers,
+ struct vring_desc *desc,
+ struct vring_avail *avail,
+ struct vring_used *used)
+{
+ vrh->use_va = false;
+
+ return vringh_init_kern(vrh, features, num, weak_barriers,
+ desc, avail, used);
+}
+EXPORT_SYMBOL(vringh_init_iotlb);
+
+/**
+ * vringh_init_iotlb_va - initialize a vringh for a ring with IOTLB containing
+ * user VA.
+ * @vrh: the vringh to initialize.
+ * @features: the feature bits for this ring.
+ * @num: the number of elements.
+ * @weak_barriers: true if we only need memory barriers, not I/O.
+ * @desc: the userspace descriptor pointer.
+ * @avail: the userspace avail pointer.
+ * @used: the userspace used pointer.
+ *
+ * Returns an error if num is invalid.
+ */
+int vringh_init_iotlb_va(struct vringh *vrh, u64 features,
+ unsigned int num, bool weak_barriers,
+ struct vring_desc *desc,
+ struct vring_avail *avail,
+ struct vring_used *used)
+{
+ vrh->use_va = true;
+
+ return vringh_init_kern(vrh, features, num, weak_barriers,
+ desc, avail, used);
+}
+EXPORT_SYMBOL(vringh_init_iotlb_va);
+
+/**
+ * vringh_set_iotlb - initialize a vringh for a ring with IOTLB.
+ * @vrh: the vring
+ * @iotlb: iotlb associated with this vring
+ * @iotlb_lock: spinlock to synchronize the iotlb accesses
+ */
+void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb,
+ spinlock_t *iotlb_lock)
+{
+ vrh->iotlb = iotlb;
+ vrh->iotlb_lock = iotlb_lock;
+}
+EXPORT_SYMBOL(vringh_set_iotlb);
+
+/**
+ * vringh_getdesc_iotlb - get next available descriptor from ring with
+ * IOTLB.
+ * @vrh: the kernelspace vring.
+ * @riov: where to put the readable descriptors (or NULL)
+ * @wiov: where to put the writable descriptors (or NULL)
+ * @head: head index we received, for passing to vringh_complete_iotlb().
+ * @gfp: flags for allocating larger riov/wiov.
+ *
+ * Returns 0 if there was no descriptor, 1 if there was, or -errno.
+ *
+ * Note that on error return, you can tell the difference between an
+ * invalid ring and a single invalid descriptor: in the former case,
+ * *head will be vrh->vring.num. You may be able to ignore an invalid
+ * descriptor, but there's not much you can do with an invalid ring.
+ *
+ * Note that you can reuse riov and wiov with subsequent calls. Content is
+ * overwritten and memory reallocated if more space is needed.
+ * When you don't have to use riov and wiov anymore, you should clean up them
+ * calling vringh_kiov_cleanup() to release the memory, even on error!
+ */
+int vringh_getdesc_iotlb(struct vringh *vrh,
+ struct vringh_kiov *riov,
+ struct vringh_kiov *wiov,
+ u16 *head,
+ gfp_t gfp)
+{
+ int err;
+
+ err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx);
+ if (err < 0)
+ return err;
+
+ /* Empty... */
+ if (err == vrh->vring.num)
+ return 0;
+
+ *head = err;
+ err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
+ gfp, copydesc_iotlb);
+ if (err)
+ return err;
+
+ return 1;
+}
+EXPORT_SYMBOL(vringh_getdesc_iotlb);
+
+/**
+ * vringh_iov_pull_iotlb - copy bytes from vring_iov.
+ * @vrh: the vring.
+ * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume)
+ * @dst: the place to copy.
+ * @len: the maximum length to copy.
+ *
+ * Returns the bytes copied <= len or a negative errno.
+ */
+ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
+ struct vringh_kiov *riov,
+ void *dst, size_t len)
+{
+ return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb);
+}
+EXPORT_SYMBOL(vringh_iov_pull_iotlb);
+
+/**
+ * vringh_iov_push_iotlb - copy bytes into vring_iov.
+ * @vrh: the vring.
+ * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume)
+ * @src: the place to copy from.
+ * @len: the maximum length to copy.
+ *
+ * Returns the bytes copied <= len or a negative errno.
+ */
+ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
+ struct vringh_kiov *wiov,
+ const void *src, size_t len)
+{
+ return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb);
+}
+EXPORT_SYMBOL(vringh_iov_push_iotlb);
+
+/**
+ * vringh_complete_iotlb - we've finished with descriptor, publish it.
+ * @vrh: the vring.
+ * @head: the head as filled in by vringh_getdesc_iotlb.
+ * @len: the length of data we have written.
+ *
+ * You should check vringh_need_notify_iotlb() after one or more calls
+ * to this function.
+ */
+int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len)
+{
+ struct vring_used_elem used;
+
+ used.id = cpu_to_vringh32(vrh, head);
+ used.len = cpu_to_vringh32(vrh, len);
+
+ return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb);
+}
+EXPORT_SYMBOL(vringh_complete_iotlb);
+
+/**
+ * vringh_need_notify_iotlb - must we tell the other side about used buffers?
+ * @vrh: the vring we've called vringh_complete_iotlb() on.
+ *
+ * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
+ */
+int vringh_need_notify_iotlb(struct vringh *vrh)
+{
+ return __vringh_need_notify(vrh, getu16_iotlb);
+}
+EXPORT_SYMBOL(vringh_need_notify_iotlb);
+
+#endif
+
+MODULE_DESCRIPTION("host side of a virtio ring");
MODULE_LICENSE("GPL");
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
new file mode 100644
index 000000000000..0298ddc34824
--- /dev/null
+++ b/drivers/vhost/vsock.c
@@ -0,0 +1,966 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vhost transport for vsock
+ *
+ * Copyright (C) 2013-2015 Red Hat, Inc.
+ * Author: Asias He <asias@redhat.com>
+ * Stefan Hajnoczi <stefanha@redhat.com>
+ */
+#include <linux/miscdevice.h>
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <net/sock.h>
+#include <linux/virtio_vsock.h>
+#include <linux/vhost.h>
+#include <linux/hashtable.h>
+
+#include <net/af_vsock.h>
+#include "vhost.h"
+
+#define VHOST_VSOCK_DEFAULT_HOST_CID 2
+/* Max number of bytes transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others. */
+#define VHOST_VSOCK_WEIGHT 0x80000
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * small pkts.
+ */
+#define VHOST_VSOCK_PKT_WEIGHT 256
+
+static const int vhost_vsock_bits[] = {
+ VHOST_FEATURES,
+ VIRTIO_F_ACCESS_PLATFORM,
+ VIRTIO_VSOCK_F_SEQPACKET
+};
+
+#define VHOST_VSOCK_FEATURES VHOST_FEATURES_U64(vhost_vsock_bits, 0)
+
+enum {
+ VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
+};
+
+/* Used to track all the vhost_vsock instances on the system. */
+static DEFINE_MUTEX(vhost_vsock_mutex);
+static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
+
+struct vhost_vsock {
+ struct vhost_dev dev;
+ struct vhost_virtqueue vqs[2];
+
+ /* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
+ struct hlist_node hash;
+
+ struct vhost_work send_pkt_work;
+ struct sk_buff_head send_pkt_queue; /* host->guest pending packets */
+
+ atomic_t queued_replies;
+
+ u32 guest_cid;
+ bool seqpacket_allow;
+};
+
+static u32 vhost_transport_get_local_cid(void)
+{
+ return VHOST_VSOCK_DEFAULT_HOST_CID;
+}
+
+/* Callers that dereference the return value must hold vhost_vsock_mutex or the
+ * RCU read lock.
+ */
+static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+{
+ struct vhost_vsock *vsock;
+
+ hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
+ u32 other_cid = vsock->guest_cid;
+
+ /* Skip instances that have no CID yet */
+ if (other_cid == 0)
+ continue;
+
+ if (other_cid == guest_cid)
+ return vsock;
+
+ }
+
+ return NULL;
+}
+
+static void
+vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
+ struct vhost_virtqueue *vq)
+{
+ struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+ int pkts = 0, total_len = 0;
+ bool added = false;
+ bool restart_tx = false;
+
+ mutex_lock(&vq->mutex);
+
+ if (!vhost_vq_get_backend(vq))
+ goto out;
+
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
+ /* Avoid further vmexits, we're already processing the virtqueue */
+ vhost_disable_notify(&vsock->dev, vq);
+
+ do {
+ struct virtio_vsock_hdr *hdr;
+ size_t iov_len, payload_len;
+ struct iov_iter iov_iter;
+ u32 flags_to_restore = 0;
+ struct sk_buff *skb;
+ unsigned out, in;
+ size_t nbytes;
+ u32 offset;
+ int head;
+
+ skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
+
+ if (!skb) {
+ vhost_enable_notify(&vsock->dev, vq);
+ break;
+ }
+
+ head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+ &out, &in, NULL, NULL);
+ if (head < 0) {
+ virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
+ break;
+ }
+
+ if (head == vq->num) {
+ virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
+ /* We cannot finish yet if more buffers snuck in while
+ * re-enabling notify.
+ */
+ if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
+ vhost_disable_notify(&vsock->dev, vq);
+ continue;
+ }
+ break;
+ }
+
+ if (out) {
+ kfree_skb(skb);
+ vq_err(vq, "Expected 0 output buffers, got %u\n", out);
+ break;
+ }
+
+ iov_len = iov_length(&vq->iov[out], in);
+ if (iov_len < sizeof(*hdr)) {
+ kfree_skb(skb);
+ vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
+ break;
+ }
+
+ iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len);
+ offset = VIRTIO_VSOCK_SKB_CB(skb)->offset;
+ payload_len = skb->len - offset;
+ hdr = virtio_vsock_hdr(skb);
+
+ /* If the packet is greater than the space available in the
+ * buffer, we split it using multiple buffers.
+ */
+ if (payload_len > iov_len - sizeof(*hdr)) {
+ payload_len = iov_len - sizeof(*hdr);
+
+ /* As we are copying pieces of large packet's buffer to
+ * small rx buffers, headers of packets in rx queue are
+ * created dynamically and are initialized with header
+ * of current packet(except length). But in case of
+ * SOCK_SEQPACKET, we also must clear message delimeter
+ * bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit
+ * (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise,
+ * there will be sequence of packets with these
+ * bits set. After initialized header will be copied to
+ * rx buffer, these required bits will be restored.
+ */
+ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
+ hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
+ flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
+
+ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) {
+ hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
+ }
+ }
+ }
+
+ /* Set the correct length in the header */
+ hdr->len = cpu_to_le32(payload_len);
+
+ nbytes = copy_to_iter(hdr, sizeof(*hdr), &iov_iter);
+ if (nbytes != sizeof(*hdr)) {
+ kfree_skb(skb);
+ vq_err(vq, "Faulted on copying pkt hdr\n");
+ break;
+ }
+
+ if (skb_copy_datagram_iter(skb,
+ offset,
+ &iov_iter,
+ payload_len)) {
+ kfree_skb(skb);
+ vq_err(vq, "Faulted on copying pkt buf\n");
+ break;
+ }
+
+ /* Deliver to monitoring devices all packets that we
+ * will transmit.
+ */
+ virtio_transport_deliver_tap_pkt(skb);
+
+ vhost_add_used(vq, head, sizeof(*hdr) + payload_len);
+ added = true;
+
+ VIRTIO_VSOCK_SKB_CB(skb)->offset += payload_len;
+ total_len += payload_len;
+
+ /* If we didn't send all the payload we can requeue the packet
+ * to send it with the next available buffer.
+ */
+ if (VIRTIO_VSOCK_SKB_CB(skb)->offset < skb->len) {
+ hdr->flags |= cpu_to_le32(flags_to_restore);
+
+ /* We are queueing the same skb to handle
+ * the remaining bytes, and we want to deliver it
+ * to monitoring devices in the next iteration.
+ */
+ virtio_vsock_skb_clear_tap_delivered(skb);
+ virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
+ } else {
+ if (virtio_vsock_skb_reply(skb)) {
+ int val;
+
+ val = atomic_dec_return(&vsock->queued_replies);
+
+ /* Do we have resources to resume tx
+ * processing?
+ */
+ if (val + 1 == tx_vq->num)
+ restart_tx = true;
+ }
+
+ virtio_transport_consume_skb_sent(skb, true);
+ }
+ } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
+ if (added)
+ vhost_signal(&vsock->dev, vq);
+
+out:
+ mutex_unlock(&vq->mutex);
+
+ if (restart_tx)
+ vhost_poll_queue(&tx_vq->poll);
+}
+
+static void vhost_transport_send_pkt_work(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq;
+ struct vhost_vsock *vsock;
+
+ vsock = container_of(work, struct vhost_vsock, send_pkt_work);
+ vq = &vsock->vqs[VSOCK_VQ_RX];
+
+ vhost_transport_do_send_pkt(vsock, vq);
+}
+
+static int
+vhost_transport_send_pkt(struct sk_buff *skb)
+{
+ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ struct vhost_vsock *vsock;
+ int len = skb->len;
+
+ rcu_read_lock();
+
+ /* Find the vhost_vsock according to guest context id */
+ vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid));
+ if (!vsock) {
+ rcu_read_unlock();
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+
+ if (virtio_vsock_skb_reply(skb))
+ atomic_inc(&vsock->queued_replies);
+
+ virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
+ vhost_vq_work_queue(&vsock->vqs[VSOCK_VQ_RX], &vsock->send_pkt_work);
+
+ rcu_read_unlock();
+ return len;
+}
+
+static int
+vhost_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+ struct vhost_vsock *vsock;
+ int cnt = 0;
+ int ret = -ENODEV;
+
+ rcu_read_lock();
+
+ /* Find the vhost_vsock according to guest context id */
+ vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
+ if (!vsock)
+ goto out;
+
+ cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
+
+ if (cnt) {
+ struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+ int new_cnt;
+
+ new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
+ if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
+ vhost_poll_queue(&tx_vq->poll);
+ }
+
+ ret = 0;
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+static struct sk_buff *
+vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
+ unsigned int out, unsigned int in)
+{
+ struct virtio_vsock_hdr *hdr;
+ struct iov_iter iov_iter;
+ struct sk_buff *skb;
+ size_t payload_len;
+ size_t nbytes;
+ size_t len;
+
+ if (in != 0) {
+ vq_err(vq, "Expected 0 input buffers, got %u\n", in);
+ return NULL;
+ }
+
+ len = iov_length(vq->iov, out);
+
+ if (len < VIRTIO_VSOCK_SKB_HEADROOM ||
+ len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM)
+ return NULL;
+
+ /* len contains both payload and hdr */
+ skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ iov_iter_init(&iov_iter, ITER_SOURCE, vq->iov, out, len);
+
+ hdr = virtio_vsock_hdr(skb);
+ nbytes = copy_from_iter(hdr, sizeof(*hdr), &iov_iter);
+ if (nbytes != sizeof(*hdr)) {
+ vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
+ sizeof(*hdr), nbytes);
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ payload_len = le32_to_cpu(hdr->len);
+
+ /* No payload */
+ if (!payload_len)
+ return skb;
+
+ /* The pkt is too big or the length in the header is invalid */
+ if (payload_len + sizeof(*hdr) > len) {
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ virtio_vsock_skb_put(skb, payload_len);
+
+ if (skb_copy_datagram_from_iter(skb, 0, &iov_iter, payload_len)) {
+ vq_err(vq, "Failed to copy %zu byte payload\n", payload_len);
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ return skb;
+}
+
+/* Is there space left for replies to rx packets? */
+static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
+{
+ struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
+ int val;
+
+ smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
+ val = atomic_read(&vsock->queued_replies);
+
+ return val < vq->num;
+}
+
+static bool vhost_transport_msgzerocopy_allow(void)
+{
+ return true;
+}
+
+static bool vhost_transport_seqpacket_allow(u32 remote_cid);
+
+static struct virtio_transport vhost_transport = {
+ .transport = {
+ .module = THIS_MODULE,
+
+ .get_local_cid = vhost_transport_get_local_cid,
+
+ .init = virtio_transport_do_socket_init,
+ .destruct = virtio_transport_destruct,
+ .release = virtio_transport_release,
+ .connect = virtio_transport_connect,
+ .shutdown = virtio_transport_shutdown,
+ .cancel_pkt = vhost_transport_cancel_pkt,
+
+ .dgram_enqueue = virtio_transport_dgram_enqueue,
+ .dgram_dequeue = virtio_transport_dgram_dequeue,
+ .dgram_bind = virtio_transport_dgram_bind,
+ .dgram_allow = virtio_transport_dgram_allow,
+
+ .stream_enqueue = virtio_transport_stream_enqueue,
+ .stream_dequeue = virtio_transport_stream_dequeue,
+ .stream_has_data = virtio_transport_stream_has_data,
+ .stream_has_space = virtio_transport_stream_has_space,
+ .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
+ .stream_is_active = virtio_transport_stream_is_active,
+ .stream_allow = virtio_transport_stream_allow,
+
+ .seqpacket_dequeue = virtio_transport_seqpacket_dequeue,
+ .seqpacket_enqueue = virtio_transport_seqpacket_enqueue,
+ .seqpacket_allow = vhost_transport_seqpacket_allow,
+ .seqpacket_has_data = virtio_transport_seqpacket_has_data,
+
+ .msgzerocopy_allow = vhost_transport_msgzerocopy_allow,
+
+ .notify_poll_in = virtio_transport_notify_poll_in,
+ .notify_poll_out = virtio_transport_notify_poll_out,
+ .notify_recv_init = virtio_transport_notify_recv_init,
+ .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
+ .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
+ .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
+ .notify_send_init = virtio_transport_notify_send_init,
+ .notify_send_pre_block = virtio_transport_notify_send_pre_block,
+ .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
+ .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
+ .notify_buffer_size = virtio_transport_notify_buffer_size,
+ .notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat,
+
+ .unsent_bytes = virtio_transport_unsent_bytes,
+
+ .read_skb = virtio_transport_read_skb,
+ },
+
+ .send_pkt = vhost_transport_send_pkt,
+};
+
+static bool vhost_transport_seqpacket_allow(u32 remote_cid)
+{
+ struct vhost_vsock *vsock;
+ bool seqpacket_allow = false;
+
+ rcu_read_lock();
+ vsock = vhost_vsock_get(remote_cid);
+
+ if (vsock)
+ seqpacket_allow = vsock->seqpacket_allow;
+
+ rcu_read_unlock();
+
+ return seqpacket_allow;
+}
+
+static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
+ dev);
+ int head, pkts = 0, total_len = 0;
+ unsigned int out, in;
+ struct sk_buff *skb;
+ bool added = false;
+
+ mutex_lock(&vq->mutex);
+
+ if (!vhost_vq_get_backend(vq))
+ goto out;
+
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
+ vhost_disable_notify(&vsock->dev, vq);
+ do {
+ struct virtio_vsock_hdr *hdr;
+
+ if (!vhost_vsock_more_replies(vsock)) {
+ /* Stop tx until the device processes already
+ * pending replies. Leave tx virtqueue
+ * callbacks disabled.
+ */
+ goto no_more_replies;
+ }
+
+ head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+ &out, &in, NULL, NULL);
+ if (head < 0)
+ break;
+
+ if (head == vq->num) {
+ if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
+ vhost_disable_notify(&vsock->dev, vq);
+ continue;
+ }
+ break;
+ }
+
+ skb = vhost_vsock_alloc_skb(vq, out, in);
+ if (!skb) {
+ vq_err(vq, "Faulted on pkt\n");
+ continue;
+ }
+
+ total_len += sizeof(*hdr) + skb->len;
+
+ /* Deliver to monitoring devices all received packets */
+ virtio_transport_deliver_tap_pkt(skb);
+
+ hdr = virtio_vsock_hdr(skb);
+
+ /* Only accept correctly addressed packets */
+ if (le64_to_cpu(hdr->src_cid) == vsock->guest_cid &&
+ le64_to_cpu(hdr->dst_cid) ==
+ vhost_transport_get_local_cid())
+ virtio_transport_recv_pkt(&vhost_transport, skb);
+ else
+ kfree_skb(skb);
+
+ vhost_add_used(vq, head, 0);
+ added = true;
+ } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
+
+no_more_replies:
+ if (added)
+ vhost_signal(&vsock->dev, vq);
+
+out:
+ mutex_unlock(&vq->mutex);
+}
+
+static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
+ dev);
+
+ vhost_transport_do_send_pkt(vsock, vq);
+}
+
+static int vhost_vsock_start(struct vhost_vsock *vsock)
+{
+ struct vhost_virtqueue *vq;
+ size_t i;
+ int ret;
+
+ mutex_lock(&vsock->dev.mutex);
+
+ ret = vhost_dev_check_owner(&vsock->dev);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ vq = &vsock->vqs[i];
+
+ mutex_lock(&vq->mutex);
+
+ if (!vhost_vq_access_ok(vq)) {
+ ret = -EFAULT;
+ goto err_vq;
+ }
+
+ if (!vhost_vq_get_backend(vq)) {
+ vhost_vq_set_backend(vq, vsock);
+ ret = vhost_vq_init_access(vq);
+ if (ret)
+ goto err_vq;
+ }
+
+ mutex_unlock(&vq->mutex);
+ }
+
+ /* Some packets may have been queued before the device was started,
+ * let's kick the send worker to send them.
+ */
+ vhost_vq_work_queue(&vsock->vqs[VSOCK_VQ_RX], &vsock->send_pkt_work);
+
+ mutex_unlock(&vsock->dev.mutex);
+ return 0;
+
+err_vq:
+ vhost_vq_set_backend(vq, NULL);
+ mutex_unlock(&vq->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ vq = &vsock->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vhost_vq_set_backend(vq, NULL);
+ mutex_unlock(&vq->mutex);
+ }
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return ret;
+}
+
+static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner)
+{
+ size_t i;
+ int ret = 0;
+
+ mutex_lock(&vsock->dev.mutex);
+
+ if (check_owner) {
+ ret = vhost_dev_check_owner(&vsock->dev);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ struct vhost_virtqueue *vq = &vsock->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vhost_vq_set_backend(vq, NULL);
+ mutex_unlock(&vq->mutex);
+ }
+
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return ret;
+}
+
+static void vhost_vsock_free(struct vhost_vsock *vsock)
+{
+ kvfree(vsock);
+}
+
+static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
+{
+ struct vhost_virtqueue **vqs;
+ struct vhost_vsock *vsock;
+ int ret;
+
+ /* This struct is large and allocation could fail, fall back to vmalloc
+ * if there is no other way.
+ */
+ vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+ if (!vsock)
+ return -ENOMEM;
+
+ vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
+ if (!vqs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ vsock->guest_cid = 0; /* no CID assigned yet */
+ vsock->seqpacket_allow = false;
+
+ atomic_set(&vsock->queued_replies, 0);
+
+ vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
+ vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
+ vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
+ vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
+
+ vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
+ UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
+ VHOST_VSOCK_WEIGHT, true, NULL);
+
+ file->private_data = vsock;
+ skb_queue_head_init(&vsock->send_pkt_queue);
+ vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
+ return 0;
+
+out:
+ vhost_vsock_free(vsock);
+ return ret;
+}
+
+static void vhost_vsock_flush(struct vhost_vsock *vsock)
+{
+ vhost_dev_flush(&vsock->dev);
+}
+
+static void vhost_vsock_reset_orphans(struct sock *sk)
+{
+ struct vsock_sock *vsk = vsock_sk(sk);
+
+ /* vmci_transport.c doesn't take sk_lock here either. At least we're
+ * under vsock_table_lock so the sock cannot disappear while we're
+ * executing.
+ */
+
+ /* If the peer is still valid, no need to reset connection */
+ if (vhost_vsock_get(vsk->remote_addr.svm_cid))
+ return;
+
+ /* If the close timeout is pending, let it expire. This avoids races
+ * with the timeout callback.
+ */
+ if (vsk->close_work_scheduled)
+ return;
+
+ sock_set_flag(sk, SOCK_DONE);
+ vsk->peer_shutdown = SHUTDOWN_MASK;
+ sk->sk_state = SS_UNCONNECTED;
+ sk->sk_err = ECONNRESET;
+ sk_error_report(sk);
+}
+
+static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
+{
+ struct vhost_vsock *vsock = file->private_data;
+
+ mutex_lock(&vhost_vsock_mutex);
+ if (vsock->guest_cid)
+ hash_del_rcu(&vsock->hash);
+ mutex_unlock(&vhost_vsock_mutex);
+
+ /* Wait for other CPUs to finish using vsock */
+ synchronize_rcu();
+
+ /* Iterating over all connections for all CIDs to find orphans is
+ * inefficient. Room for improvement here. */
+ vsock_for_each_connected_socket(&vhost_transport.transport,
+ vhost_vsock_reset_orphans);
+
+ /* Don't check the owner, because we are in the release path, so we
+ * need to stop the vsock device in any case.
+ * vhost_vsock_stop() can not fail in this case, so we don't need to
+ * check the return code.
+ */
+ vhost_vsock_stop(vsock, false);
+ vhost_vsock_flush(vsock);
+ vhost_dev_stop(&vsock->dev);
+
+ virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
+
+ vhost_dev_cleanup(&vsock->dev);
+ kfree(vsock->dev.vqs);
+ vhost_vsock_free(vsock);
+ return 0;
+}
+
+static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
+{
+ struct vhost_vsock *other;
+
+ /* Refuse reserved CIDs */
+ if (guest_cid <= VMADDR_CID_HOST ||
+ guest_cid == U32_MAX)
+ return -EINVAL;
+
+ /* 64-bit CIDs are not yet supported */
+ if (guest_cid > U32_MAX)
+ return -EINVAL;
+
+ /* Refuse if CID is assigned to the guest->host transport (i.e. nested
+ * VM), to make the loopback work.
+ */
+ if (vsock_find_cid(guest_cid))
+ return -EADDRINUSE;
+
+ /* Refuse if CID is already in use */
+ mutex_lock(&vhost_vsock_mutex);
+ other = vhost_vsock_get(guest_cid);
+ if (other && other != vsock) {
+ mutex_unlock(&vhost_vsock_mutex);
+ return -EADDRINUSE;
+ }
+
+ if (vsock->guest_cid)
+ hash_del_rcu(&vsock->hash);
+
+ vsock->guest_cid = guest_cid;
+ hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
+ mutex_unlock(&vhost_vsock_mutex);
+
+ return 0;
+}
+
+static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
+{
+ struct vhost_virtqueue *vq;
+ int i;
+
+ if (features & ~VHOST_VSOCK_FEATURES)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&vsock->dev.mutex);
+ if ((features & (1 << VHOST_F_LOG_ALL)) &&
+ !vhost_log_access_ok(&vsock->dev)) {
+ goto err;
+ }
+
+ if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
+ if (vhost_init_device_iotlb(&vsock->dev))
+ goto err;
+ }
+
+ vsock->seqpacket_allow = features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET);
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ vq = &vsock->vqs[i];
+ mutex_lock(&vq->mutex);
+ vq->acked_features = features;
+ mutex_unlock(&vq->mutex);
+ }
+ mutex_unlock(&vsock->dev.mutex);
+ return 0;
+
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return -EFAULT;
+}
+
+static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct vhost_vsock *vsock = f->private_data;
+ void __user *argp = (void __user *)arg;
+ u64 guest_cid;
+ u64 features;
+ int start;
+ int r;
+
+ switch (ioctl) {
+ case VHOST_VSOCK_SET_GUEST_CID:
+ if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
+ return -EFAULT;
+ return vhost_vsock_set_cid(vsock, guest_cid);
+ case VHOST_VSOCK_SET_RUNNING:
+ if (copy_from_user(&start, argp, sizeof(start)))
+ return -EFAULT;
+ if (start)
+ return vhost_vsock_start(vsock);
+ else
+ return vhost_vsock_stop(vsock, true);
+ case VHOST_GET_FEATURES:
+ features = VHOST_VSOCK_FEATURES;
+ if (copy_to_user(argp, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_FEATURES:
+ if (copy_from_user(&features, argp, sizeof(features)))
+ return -EFAULT;
+ return vhost_vsock_set_features(vsock, features);
+ case VHOST_GET_BACKEND_FEATURES:
+ features = VHOST_VSOCK_BACKEND_FEATURES;
+ if (copy_to_user(argp, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_BACKEND_FEATURES:
+ if (copy_from_user(&features, argp, sizeof(features)))
+ return -EFAULT;
+ if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
+ return -EOPNOTSUPP;
+ vhost_set_backend_features(&vsock->dev, features);
+ return 0;
+ default:
+ mutex_lock(&vsock->dev.mutex);
+ r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
+ if (r == -ENOIOCTLCMD)
+ r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
+ else
+ vhost_vsock_flush(vsock);
+ mutex_unlock(&vsock->dev.mutex);
+ return r;
+ }
+}
+
+static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+ int noblock = file->f_flags & O_NONBLOCK;
+
+ return vhost_chr_read_iter(dev, to, noblock);
+}
+
+static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+
+ return vhost_chr_write_iter(dev, from);
+}
+
+static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
+{
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+
+ return vhost_chr_poll(file, dev, wait);
+}
+
+static const struct file_operations vhost_vsock_fops = {
+ .owner = THIS_MODULE,
+ .open = vhost_vsock_dev_open,
+ .release = vhost_vsock_dev_release,
+ .llseek = noop_llseek,
+ .unlocked_ioctl = vhost_vsock_dev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .read_iter = vhost_vsock_chr_read_iter,
+ .write_iter = vhost_vsock_chr_write_iter,
+ .poll = vhost_vsock_chr_poll,
+};
+
+static struct miscdevice vhost_vsock_misc = {
+ .minor = VHOST_VSOCK_MINOR,
+ .name = "vhost-vsock",
+ .fops = &vhost_vsock_fops,
+};
+
+static int __init vhost_vsock_init(void)
+{
+ int ret;
+
+ ret = vsock_core_register(&vhost_transport.transport,
+ VSOCK_TRANSPORT_F_H2G);
+ if (ret < 0)
+ return ret;
+
+ ret = misc_register(&vhost_vsock_misc);
+ if (ret) {
+ vsock_core_unregister(&vhost_transport.transport);
+ return ret;
+ }
+
+ return 0;
+};
+
+static void __exit vhost_vsock_exit(void)
+{
+ misc_deregister(&vhost_vsock_misc);
+ vsock_core_unregister(&vhost_transport.transport);
+};
+
+module_init(vhost_vsock_init);
+module_exit(vhost_vsock_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Asias He");
+MODULE_DESCRIPTION("vhost transport for vsock ");
+MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
+MODULE_ALIAS("devname:vhost-vsock");