summaryrefslogtreecommitdiff
path: root/drivers/vdpa
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vdpa')
-rw-r--r--drivers/vdpa/Kconfig8
-rw-r--r--drivers/vdpa/alibaba/eni_vdpa.c5
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c5
-rw-r--r--drivers/vdpa/mlx5/core/mr.c4
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c15
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_main.c6
-rw-r--r--drivers/vdpa/pds/vdpa_dev.c5
-rw-r--r--drivers/vdpa/solidrun/snet_main.c8
-rw-r--r--drivers/vdpa/vdpa.c5
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c4
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c134
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h7
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c79
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c5
14 files changed, 174 insertions, 116 deletions
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index 559fb9d3271f..857cf288c876 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -34,13 +34,7 @@ config VDPA_SIM_BLOCK
config VDPA_USER
tristate "VDUSE (vDPA Device in Userspace) support"
- depends on EVENTFD && MMU && HAS_DMA
- #
- # This driver incorrectly tries to override the dma_ops. It should
- # never have done that, but for now keep it working on architectures
- # that use dma ops
- #
- depends on ARCH_HAS_DMA_OPS
+ depends on EVENTFD && MMU
select VHOST_IOTLB
select IOMMU_IOVA
help
diff --git a/drivers/vdpa/alibaba/eni_vdpa.c b/drivers/vdpa/alibaba/eni_vdpa.c
index ad7f3447fe90..e476504db0c8 100644
--- a/drivers/vdpa/alibaba/eni_vdpa.c
+++ b/drivers/vdpa/alibaba/eni_vdpa.c
@@ -478,7 +478,8 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
eni_vdpa = vdpa_alloc_device(struct eni_vdpa, vdpa,
- dev, &eni_vdpa_ops, 1, 1, NULL, false);
+ dev, &eni_vdpa_ops, NULL,
+ 1, 1, NULL, false);
if (IS_ERR(eni_vdpa)) {
ENI_ERR(pdev, "failed to allocate vDPA structure\n");
return PTR_ERR(eni_vdpa);
@@ -496,7 +497,7 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
pci_set_drvdata(pdev, eni_vdpa);
- eni_vdpa->vdpa.dma_dev = &pdev->dev;
+ eni_vdpa->vdpa.vmap.dma_dev = &pdev->dev;
eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa);
eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues,
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index ccf64d7bbfaa..6658dc74d915 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -705,7 +705,8 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
vf = &ifcvf_mgmt_dev->vf;
pdev = vf->pdev;
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
- &pdev->dev, &ifc_vdpa_ops, 1, 1, NULL, false);
+ &pdev->dev, &ifc_vdpa_ops,
+ NULL, 1, 1, NULL, false);
if (IS_ERR(adapter)) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
return PTR_ERR(adapter);
@@ -713,7 +714,7 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
ifcvf_mgmt_dev->adapter = adapter;
adapter->pdev = pdev;
- adapter->vdpa.dma_dev = &pdev->dev;
+ adapter->vdpa.vmap.dma_dev = &pdev->dev;
adapter->vdpa.mdev = mdev;
adapter->vf = vf;
vdpa_dev = &adapter->vdpa;
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index c7a20278bc3c..8870a7169267 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -378,7 +378,7 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
u64 pa, offset;
u64 paend;
struct scatterlist *sg;
- struct device *dma = mvdev->vdev.dma_dev;
+ struct device *dma = mvdev->vdev.vmap.dma_dev;
for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) {
@@ -432,7 +432,7 @@ err_map:
static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
{
- struct device *dma = mvdev->vdev.dma_dev;
+ struct device *dma = mvdev->vdev.vmap.dma_dev;
destroy_direct_mr(mvdev, mr);
dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 0ed2fc28e1ce..82034efb74fc 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -3395,14 +3395,17 @@ static int mlx5_vdpa_reset_map(struct vdpa_device *vdev, unsigned int asid)
return err;
}
-static struct device *mlx5_get_vq_dma_dev(struct vdpa_device *vdev, u16 idx)
+static union virtio_map mlx5_get_vq_map(struct vdpa_device *vdev, u16 idx)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ union virtio_map map;
if (is_ctrl_vq_idx(mvdev, idx))
- return &vdev->dev;
+ map.dma_dev = &vdev->dev;
+ else
+ map.dma_dev = mvdev->vdev.vmap.dma_dev;
- return mvdev->vdev.dma_dev;
+ return map;
}
static void free_irqs(struct mlx5_vdpa_net *ndev)
@@ -3686,7 +3689,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.set_map = mlx5_vdpa_set_map,
.reset_map = mlx5_vdpa_reset_map,
.set_group_asid = mlx5_set_group_asid,
- .get_vq_dma_dev = mlx5_get_vq_dma_dev,
+ .get_vq_map = mlx5_get_vq_map,
.free = mlx5_vdpa_free,
.suspend = mlx5_vdpa_suspend,
.resume = mlx5_vdpa_resume, /* Op disabled if not supported. */
@@ -3879,7 +3882,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
}
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mgtdev->vdpa_ops,
- MLX5_VDPA_NUMVQ_GROUPS, MLX5_VDPA_NUM_AS, name, false);
+ NULL, MLX5_VDPA_NUMVQ_GROUPS, MLX5_VDPA_NUM_AS, name, false);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
@@ -3965,7 +3968,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
}
ndev->mvdev.mlx_features = device_features;
- mvdev->vdev.dma_dev = &mdev->pdev->dev;
+ mvdev->vdev.vmap.dma_dev = &mdev->pdev->dev;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
goto err_alloc;
diff --git a/drivers/vdpa/octeon_ep/octep_vdpa_main.c b/drivers/vdpa/octeon_ep/octep_vdpa_main.c
index 9b49efd24391..9e8d07078606 100644
--- a/drivers/vdpa/octeon_ep/octep_vdpa_main.c
+++ b/drivers/vdpa/octeon_ep/octep_vdpa_main.c
@@ -508,15 +508,15 @@ static int octep_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
u64 device_features;
int ret;
- oct_vdpa = vdpa_alloc_device(struct octep_vdpa, vdpa, &pdev->dev, &octep_vdpa_ops, 1, 1,
- NULL, false);
+ oct_vdpa = vdpa_alloc_device(struct octep_vdpa, vdpa, &pdev->dev, &octep_vdpa_ops,
+ NULL, 1, 1, NULL, false);
if (IS_ERR(oct_vdpa)) {
dev_err(&pdev->dev, "Failed to allocate vDPA structure for octep vdpa device");
return PTR_ERR(oct_vdpa);
}
oct_vdpa->pdev = pdev;
- oct_vdpa->vdpa.dma_dev = &pdev->dev;
+ oct_vdpa->vdpa.vmap.dma_dev = &pdev->dev;
oct_vdpa->vdpa.mdev = mdev;
oct_vdpa->oct_hw = oct_hw;
vdpa_dev = &oct_vdpa->vdpa;
diff --git a/drivers/vdpa/pds/vdpa_dev.c b/drivers/vdpa/pds/vdpa_dev.c
index 301d95e08596..36f61cc96e21 100644
--- a/drivers/vdpa/pds/vdpa_dev.c
+++ b/drivers/vdpa/pds/vdpa_dev.c
@@ -632,7 +632,8 @@ static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
}
pdsv = vdpa_alloc_device(struct pds_vdpa_device, vdpa_dev,
- dev, &pds_vdpa_ops, 1, 1, name, false);
+ dev, &pds_vdpa_ops, NULL,
+ 1, 1, name, false);
if (IS_ERR(pdsv)) {
dev_err(dev, "Failed to allocate vDPA structure: %pe\n", pdsv);
return PTR_ERR(pdsv);
@@ -643,7 +644,7 @@ static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
pdev = vdpa_aux->padev->vf_pdev;
dma_dev = &pdev->dev;
- pdsv->vdpa_dev.dma_dev = dma_dev;
+ pdsv->vdpa_dev.vmap.dma_dev = dma_dev;
status = pds_vdpa_get_status(&pdsv->vdpa_dev);
if (status == 0xff) {
diff --git a/drivers/vdpa/solidrun/snet_main.c b/drivers/vdpa/solidrun/snet_main.c
index 55ec51c17ab3..4588211d57eb 100644
--- a/drivers/vdpa/solidrun/snet_main.c
+++ b/drivers/vdpa/solidrun/snet_main.c
@@ -1008,8 +1008,8 @@ static int snet_vdpa_probe_vf(struct pci_dev *pdev)
}
/* Allocate vdpa device */
- snet = vdpa_alloc_device(struct snet, vdpa, &pdev->dev, &snet_config_ops, 1, 1, NULL,
- false);
+ snet = vdpa_alloc_device(struct snet, vdpa, &pdev->dev, &snet_config_ops,
+ NULL, 1, 1, NULL, false);
if (!snet) {
SNET_ERR(pdev, "Failed to allocate a vdpa device\n");
ret = -ENOMEM;
@@ -1052,8 +1052,8 @@ static int snet_vdpa_probe_vf(struct pci_dev *pdev)
*/
snet_reserve_irq_idx(pf_irqs ? pdev_pf : pdev, snet);
- /*set DMA device*/
- snet->vdpa.dma_dev = &pdev->dev;
+ /* set map metadata */
+ snet->vdpa.vmap.dma_dev = &pdev->dev;
/* Register VDPA device */
ret = vdpa_register_device(&snet->vdpa, snet->cfg->vq_num);
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 8a372b51c21a..34874beb0152 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -142,6 +142,7 @@ static void vdpa_release_dev(struct device *d)
* initialized but before registered.
* @parent: the parent device
* @config: the bus operations that is supported by this device
+ * @map: the map operations that is supported by this device
* @ngroups: number of groups supported by this device
* @nas: number of address spaces supported by this device
* @size: size of the parent structure that contains private data
@@ -151,11 +152,12 @@ static void vdpa_release_dev(struct device *d)
* Driver should use vdpa_alloc_device() wrapper macro instead of
* using this directly.
*
- * Return: Returns an error when parent/config/dma_dev is not set or fail to get
+ * Return: Returns an error when parent/config/map is not set or fail to get
* ida.
*/
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
+ const struct virtio_map_ops *map,
unsigned int ngroups, unsigned int nas,
size_t size, const char *name,
bool use_va)
@@ -187,6 +189,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
vdev->dev.release = vdpa_release_dev;
vdev->index = err;
vdev->config = config;
+ vdev->map = map;
vdev->features_valid = false;
vdev->use_va = use_va;
vdev->ngroups = ngroups;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index c204fc8e471a..c1c6431950e1 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -215,7 +215,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
else
ops = &vdpasim_config_ops;
- vdpa = __vdpa_alloc_device(NULL, ops,
+ vdpa = __vdpa_alloc_device(NULL, ops, NULL,
dev_attr->ngroups, dev_attr->nas,
dev_attr->alloc_size,
dev_attr->name, use_va);
@@ -272,7 +272,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
&vdpasim->iommu_lock);
- vdpasim->vdpa.dma_dev = dev;
+ vdpasim->vdpa.vmap.dma_dev = dev;
return vdpasim;
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 58116f89d8da..4352b5cf74f0 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -103,19 +103,38 @@ void vduse_domain_clear_map(struct vduse_iova_domain *domain,
static int vduse_domain_map_bounce_page(struct vduse_iova_domain *domain,
u64 iova, u64 size, u64 paddr)
{
- struct vduse_bounce_map *map;
+ struct vduse_bounce_map *map, *head_map;
+ struct page *tmp_page;
u64 last = iova + size - 1;
while (iova <= last) {
- map = &domain->bounce_maps[iova >> PAGE_SHIFT];
+ /*
+ * When PAGE_SIZE is larger than 4KB, multiple adjacent bounce_maps will
+ * point to the same memory page of PAGE_SIZE. Since bounce_maps originate
+ * from IO requests, we may not be able to guarantee that the orig_phys
+ * values of all IO requests within the same 64KB memory page are contiguous.
+ * Therefore, we need to store them separately.
+ *
+ * Bounce pages are allocated on demand. As a result, it may occur that
+ * multiple bounce pages corresponding to the same 64KB memory page attempt
+ * to allocate memory simultaneously, so we use cmpxchg to handle this
+ * concurrency.
+ */
+ map = &domain->bounce_maps[iova >> BOUNCE_MAP_SHIFT];
if (!map->bounce_page) {
- map->bounce_page = alloc_page(GFP_ATOMIC);
- if (!map->bounce_page)
- return -ENOMEM;
+ head_map = &domain->bounce_maps[(iova & PAGE_MASK) >> BOUNCE_MAP_SHIFT];
+ if (!head_map->bounce_page) {
+ tmp_page = alloc_page(GFP_ATOMIC);
+ if (!tmp_page)
+ return -ENOMEM;
+ if (cmpxchg(&head_map->bounce_page, NULL, tmp_page))
+ __free_page(tmp_page);
+ }
+ map->bounce_page = head_map->bounce_page;
}
map->orig_phys = paddr;
- paddr += PAGE_SIZE;
- iova += PAGE_SIZE;
+ paddr += BOUNCE_MAP_SIZE;
+ iova += BOUNCE_MAP_SIZE;
}
return 0;
}
@@ -127,12 +146,17 @@ static void vduse_domain_unmap_bounce_page(struct vduse_iova_domain *domain,
u64 last = iova + size - 1;
while (iova <= last) {
- map = &domain->bounce_maps[iova >> PAGE_SHIFT];
+ map = &domain->bounce_maps[iova >> BOUNCE_MAP_SHIFT];
map->orig_phys = INVALID_PHYS_ADDR;
- iova += PAGE_SIZE;
+ iova += BOUNCE_MAP_SIZE;
}
}
+static unsigned int offset_in_bounce_page(dma_addr_t addr)
+{
+ return (addr & ~BOUNCE_MAP_MASK);
+}
+
static void do_bounce(phys_addr_t orig, void *addr, size_t size,
enum dma_data_direction dir)
{
@@ -163,7 +187,7 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
{
struct vduse_bounce_map *map;
struct page *page;
- unsigned int offset;
+ unsigned int offset, head_offset;
void *addr;
size_t sz;
@@ -171,9 +195,10 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
return;
while (size) {
- map = &domain->bounce_maps[iova >> PAGE_SHIFT];
- offset = offset_in_page(iova);
- sz = min_t(size_t, PAGE_SIZE - offset, size);
+ map = &domain->bounce_maps[iova >> BOUNCE_MAP_SHIFT];
+ head_offset = offset_in_page(iova);
+ offset = offset_in_bounce_page(iova);
+ sz = min_t(size_t, BOUNCE_MAP_SIZE - offset, size);
if (WARN_ON(!map->bounce_page ||
map->orig_phys == INVALID_PHYS_ADDR))
@@ -183,7 +208,7 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
map->user_bounce_page : map->bounce_page;
addr = kmap_local_page(page);
- do_bounce(map->orig_phys + offset, addr + offset, sz, dir);
+ do_bounce(map->orig_phys + offset, addr + head_offset, sz, dir);
kunmap_local(addr);
size -= sz;
iova += sz;
@@ -218,7 +243,7 @@ vduse_domain_get_bounce_page(struct vduse_iova_domain *domain, u64 iova)
struct page *page = NULL;
read_lock(&domain->bounce_lock);
- map = &domain->bounce_maps[iova >> PAGE_SHIFT];
+ map = &domain->bounce_maps[iova >> BOUNCE_MAP_SHIFT];
if (domain->user_bounce_pages || !map->bounce_page)
goto out;
@@ -236,7 +261,7 @@ vduse_domain_free_kernel_bounce_pages(struct vduse_iova_domain *domain)
struct vduse_bounce_map *map;
unsigned long pfn, bounce_pfns;
- bounce_pfns = domain->bounce_size >> PAGE_SHIFT;
+ bounce_pfns = domain->bounce_size >> BOUNCE_MAP_SHIFT;
for (pfn = 0; pfn < bounce_pfns; pfn++) {
map = &domain->bounce_maps[pfn];
@@ -246,7 +271,8 @@ vduse_domain_free_kernel_bounce_pages(struct vduse_iova_domain *domain)
if (!map->bounce_page)
continue;
- __free_page(map->bounce_page);
+ if (!((pfn << BOUNCE_MAP_SHIFT) & ~PAGE_MASK))
+ __free_page(map->bounce_page);
map->bounce_page = NULL;
}
}
@@ -254,8 +280,12 @@ vduse_domain_free_kernel_bounce_pages(struct vduse_iova_domain *domain)
int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
struct page **pages, int count)
{
- struct vduse_bounce_map *map;
- int i, ret;
+ struct vduse_bounce_map *map, *head_map;
+ int i, j, ret;
+ int inner_pages = PAGE_SIZE / BOUNCE_MAP_SIZE;
+ int bounce_pfns = domain->bounce_size >> BOUNCE_MAP_SHIFT;
+ struct page *head_page = NULL;
+ bool need_copy;
/* Now we don't support partial mapping */
if (count != (domain->bounce_size >> PAGE_SHIFT))
@@ -267,16 +297,23 @@ int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
goto out;
for (i = 0; i < count; i++) {
- map = &domain->bounce_maps[i];
- if (map->bounce_page) {
+ need_copy = false;
+ head_map = &domain->bounce_maps[(i * inner_pages)];
+ head_page = head_map->bounce_page;
+ for (j = 0; j < inner_pages; j++) {
+ if ((i * inner_pages + j) >= bounce_pfns)
+ break;
+ map = &domain->bounce_maps[(i * inner_pages + j)];
/* Copy kernel page to user page if it's in use */
- if (map->orig_phys != INVALID_PHYS_ADDR)
- memcpy_to_page(pages[i], 0,
- page_address(map->bounce_page),
- PAGE_SIZE);
+ if ((head_page) && (map->orig_phys != INVALID_PHYS_ADDR))
+ need_copy = true;
+ map->user_bounce_page = pages[i];
}
- map->user_bounce_page = pages[i];
get_page(pages[i]);
+ if ((head_page) && (need_copy))
+ memcpy_to_page(pages[i], 0,
+ page_address(head_page),
+ PAGE_SIZE);
}
domain->user_bounce_pages = true;
ret = 0;
@@ -288,8 +325,12 @@ out:
void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
{
- struct vduse_bounce_map *map;
- unsigned long i, count;
+ struct vduse_bounce_map *map, *head_map;
+ unsigned long i, j, count;
+ int inner_pages = PAGE_SIZE / BOUNCE_MAP_SIZE;
+ int bounce_pfns = domain->bounce_size >> BOUNCE_MAP_SHIFT;
+ struct page *head_page = NULL;
+ bool need_copy;
write_lock(&domain->bounce_lock);
if (!domain->user_bounce_pages)
@@ -297,20 +338,27 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
count = domain->bounce_size >> PAGE_SHIFT;
for (i = 0; i < count; i++) {
- struct page *page = NULL;
-
- map = &domain->bounce_maps[i];
- if (WARN_ON(!map->user_bounce_page))
+ need_copy = false;
+ head_map = &domain->bounce_maps[(i * inner_pages)];
+ if (WARN_ON(!head_map->user_bounce_page))
continue;
-
- /* Copy user page to kernel page if it's in use */
- if (map->orig_phys != INVALID_PHYS_ADDR) {
- page = map->bounce_page;
- memcpy_from_page(page_address(page),
- map->user_bounce_page, 0, PAGE_SIZE);
+ head_page = head_map->user_bounce_page;
+
+ for (j = 0; j < inner_pages; j++) {
+ if ((i * inner_pages + j) >= bounce_pfns)
+ break;
+ map = &domain->bounce_maps[(i * inner_pages + j)];
+ if (WARN_ON(!map->user_bounce_page))
+ continue;
+ /* Copy user page to kernel page if it's in use */
+ if ((map->orig_phys != INVALID_PHYS_ADDR) && (head_map->bounce_page))
+ need_copy = true;
+ map->user_bounce_page = NULL;
}
- put_page(map->user_bounce_page);
- map->user_bounce_page = NULL;
+ if (need_copy)
+ memcpy_from_page(page_address(head_map->bounce_page),
+ head_page, 0, PAGE_SIZE);
+ put_page(head_page);
}
domain->user_bounce_pages = false;
out:
@@ -447,7 +495,7 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
size_t size, dma_addr_t *dma_addr,
- gfp_t flag, unsigned long attrs)
+ gfp_t flag)
{
struct iova_domain *iovad = &domain->consistent_iovad;
unsigned long limit = domain->iova_limit;
@@ -581,7 +629,7 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
unsigned long pfn, bounce_pfns;
int ret;
- bounce_pfns = PAGE_ALIGN(bounce_size) >> PAGE_SHIFT;
+ bounce_pfns = PAGE_ALIGN(bounce_size) >> BOUNCE_MAP_SHIFT;
if (iova_limit <= bounce_size)
return NULL;
@@ -613,7 +661,7 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
rwlock_init(&domain->bounce_lock);
spin_lock_init(&domain->iotlb_lock);
init_iova_domain(&domain->stream_iovad,
- PAGE_SIZE, IOVA_START_PFN);
+ BOUNCE_MAP_SIZE, IOVA_START_PFN);
ret = iova_domain_init_rcaches(&domain->stream_iovad);
if (ret)
goto err_iovad_stream;
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
index 7f3f0928ec78..775cad5238f3 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.h
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -19,6 +19,11 @@
#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
+#define BOUNCE_MAP_SHIFT 12
+#define BOUNCE_MAP_SIZE (1 << BOUNCE_MAP_SHIFT)
+#define BOUNCE_MAP_MASK (~(BOUNCE_MAP_SIZE - 1))
+#define BOUNCE_MAP_ALIGN(addr) (((addr) + BOUNCE_MAP_SIZE - 1) & ~(BOUNCE_MAP_SIZE - 1))
+
struct vduse_bounce_map {
struct page *bounce_page;
struct page *user_bounce_page;
@@ -64,7 +69,7 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
size_t size, dma_addr_t *dma_addr,
- gfp_t flag, unsigned long attrs);
+ gfp_t flag);
void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
void *vaddr, dma_addr_t dma_addr,
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 04620bb77203..e7bced0b5542 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -814,59 +814,53 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
.free = vduse_vdpa_free,
};
-static void vduse_dev_sync_single_for_device(struct device *dev,
+static void vduse_dev_sync_single_for_device(union virtio_map token,
dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
- struct vduse_dev *vdev = dev_to_vduse(dev);
- struct vduse_iova_domain *domain = vdev->domain;
+ struct vduse_iova_domain *domain = token.iova_domain;
vduse_domain_sync_single_for_device(domain, dma_addr, size, dir);
}
-static void vduse_dev_sync_single_for_cpu(struct device *dev,
+static void vduse_dev_sync_single_for_cpu(union virtio_map token,
dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
- struct vduse_dev *vdev = dev_to_vduse(dev);
- struct vduse_iova_domain *domain = vdev->domain;
+ struct vduse_iova_domain *domain = token.iova_domain;
vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir);
}
-static dma_addr_t vduse_dev_map_page(struct device *dev, struct page *page,
+static dma_addr_t vduse_dev_map_page(union virtio_map token, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
- struct vduse_dev *vdev = dev_to_vduse(dev);
- struct vduse_iova_domain *domain = vdev->domain;
+ struct vduse_iova_domain *domain = token.iova_domain;
return vduse_domain_map_page(domain, page, offset, size, dir, attrs);
}
-static void vduse_dev_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
+static void vduse_dev_unmap_page(union virtio_map token, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
{
- struct vduse_dev *vdev = dev_to_vduse(dev);
- struct vduse_iova_domain *domain = vdev->domain;
+ struct vduse_iova_domain *domain = token.iova_domain;
return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs);
}
-static void *vduse_dev_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t flag,
- unsigned long attrs)
+static void *vduse_dev_alloc_coherent(union virtio_map token, size_t size,
+ dma_addr_t *dma_addr, gfp_t flag)
{
- struct vduse_dev *vdev = dev_to_vduse(dev);
- struct vduse_iova_domain *domain = vdev->domain;
+ struct vduse_iova_domain *domain = token.iova_domain;
unsigned long iova;
void *addr;
*dma_addr = DMA_MAPPING_ERROR;
addr = vduse_domain_alloc_coherent(domain, size,
- (dma_addr_t *)&iova, flag, attrs);
+ (dma_addr_t *)&iova, flag);
if (!addr)
return NULL;
@@ -875,31 +869,45 @@ static void *vduse_dev_alloc_coherent(struct device *dev, size_t size,
return addr;
}
-static void vduse_dev_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_addr,
- unsigned long attrs)
+static void vduse_dev_free_coherent(union virtio_map token, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ unsigned long attrs)
{
- struct vduse_dev *vdev = dev_to_vduse(dev);
- struct vduse_iova_domain *domain = vdev->domain;
+ struct vduse_iova_domain *domain = token.iova_domain;
vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs);
}
-static size_t vduse_dev_max_mapping_size(struct device *dev)
+static bool vduse_dev_need_sync(union virtio_map token, dma_addr_t dma_addr)
{
- struct vduse_dev *vdev = dev_to_vduse(dev);
- struct vduse_iova_domain *domain = vdev->domain;
+ struct vduse_iova_domain *domain = token.iova_domain;
+
+ return dma_addr < domain->bounce_size;
+}
+
+static int vduse_dev_mapping_error(union virtio_map token, dma_addr_t dma_addr)
+{
+ if (unlikely(dma_addr == DMA_MAPPING_ERROR))
+ return -ENOMEM;
+ return 0;
+}
+
+static size_t vduse_dev_max_mapping_size(union virtio_map token)
+{
+ struct vduse_iova_domain *domain = token.iova_domain;
return domain->bounce_size;
}
-static const struct dma_map_ops vduse_dev_dma_ops = {
+static const struct virtio_map_ops vduse_map_ops = {
.sync_single_for_device = vduse_dev_sync_single_for_device,
.sync_single_for_cpu = vduse_dev_sync_single_for_cpu,
.map_page = vduse_dev_map_page,
.unmap_page = vduse_dev_unmap_page,
.alloc = vduse_dev_alloc_coherent,
.free = vduse_dev_free_coherent,
+ .need_sync = vduse_dev_need_sync,
+ .mapping_error = vduse_dev_mapping_error,
.max_mapping_size = vduse_dev_max_mapping_size,
};
@@ -2003,26 +2011,18 @@ static struct vduse_mgmt_dev *vduse_mgmt;
static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
{
struct vduse_vdpa *vdev;
- int ret;
if (dev->vdev)
return -EEXIST;
vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev,
- &vduse_vdpa_config_ops, 1, 1, name, true);
+ &vduse_vdpa_config_ops, &vduse_map_ops,
+ 1, 1, name, true);
if (IS_ERR(vdev))
return PTR_ERR(vdev);
dev->vdev = vdev;
vdev->dev = dev;
- vdev->vdpa.dev.dma_mask = &vdev->vdpa.dev.coherent_dma_mask;
- ret = dma_set_mask_and_coherent(&vdev->vdpa.dev, DMA_BIT_MASK(64));
- if (ret) {
- put_device(&vdev->vdpa.dev);
- return ret;
- }
- set_dma_ops(&vdev->vdpa.dev, &vduse_dev_dma_ops);
- vdev->vdpa.dma_dev = &vdev->vdpa.dev;
vdev->vdpa.mdev = &vduse_mgmt->mgmt_dev;
return 0;
@@ -2055,6 +2055,7 @@ static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
return -ENOMEM;
}
+ dev->vdev->vdpa.vmap.iova_domain = dev->domain;
ret = _vdpa_register_device(&dev->vdev->vdpa, dev->vq_num);
if (ret) {
put_device(&dev->vdev->vdpa.dev);
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index 8787407f75b0..17a19a728c9c 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -511,7 +511,8 @@ static int vp_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
int ret, i;
vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
- dev, &vp_vdpa_ops, 1, 1, name, false);
+ dev, &vp_vdpa_ops, NULL,
+ 1, 1, name, false);
if (IS_ERR(vp_vdpa)) {
dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
@@ -520,7 +521,7 @@ static int vp_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
vp_vdpa_mgtdev->vp_vdpa = vp_vdpa;
- vp_vdpa->vdpa.dma_dev = &pdev->dev;
+ vp_vdpa->vdpa.vmap.dma_dev = &pdev->dev;
vp_vdpa->queues = vp_modern_get_num_queues(mdev);
vp_vdpa->mdev = mdev;