summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c1
-rw-r--r--drivers/iommu/iommu-priv.h19
-rw-r--r--drivers/iommu/iommu.c81
-rw-r--r--drivers/iommu/iommufd/Kconfig4
-rw-r--r--drivers/iommu/iommufd/device.c728
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c112
-rw-r--r--drivers/iommu/iommufd/io_pagetable.c36
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h62
-rw-r--r--drivers/iommu/iommufd/iommufd_test.h10
-rw-r--r--drivers/iommu/iommufd/main.c58
-rw-r--r--drivers/iommu/iommufd/selftest.c197
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c1
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c1
-rw-r--r--drivers/vfio/Kconfig27
-rw-r--r--drivers/vfio/Makefile3
-rw-r--r--drivers/vfio/device_cdev.c228
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc.c1
-rw-r--r--drivers/vfio/group.c173
-rw-r--r--drivers/vfio/iommufd.c145
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c2
-rw-r--r--drivers/vfio/pci/mlx5/main.c1
-rw-r--r--drivers/vfio/pci/vfio_pci.c1
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c250
-rw-r--r--drivers/vfio/platform/vfio_amba.c1
-rw-r--r--drivers/vfio/platform/vfio_platform.c1
-rw-r--r--drivers/vfio/vfio.h218
-rw-r--r--drivers/vfio/vfio_main.c258
27 files changed, 2101 insertions, 518 deletions
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index de675d799c7d..9cd9e9da60dd 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1474,6 +1474,7 @@ static const struct vfio_device_ops intel_vgpu_dev_ops = {
.bind_iommufd = vfio_iommufd_emulated_bind,
.unbind_iommufd = vfio_iommufd_emulated_unbind,
.attach_ioas = vfio_iommufd_emulated_attach_ioas,
+ .detach_ioas = vfio_iommufd_emulated_detach_ioas,
};
static int intel_vgpu_probe(struct mdev_device *mdev)
diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h
new file mode 100644
index 000000000000..e3e3b2015854
--- /dev/null
+++ b/drivers/iommu/iommu-priv.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
+ */
+#ifndef __LINUX_IOMMU_PRIV_H
+#define __LINUX_IOMMU_PRIV_H
+
+#include <linux/iommu.h>
+
+int iommu_group_replace_domain(struct iommu_group *group,
+ struct iommu_domain *new_domain);
+
+int iommu_device_register_bus(struct iommu_device *iommu,
+ const struct iommu_ops *ops, struct bus_type *bus,
+ struct notifier_block *nb);
+void iommu_device_unregister_bus(struct iommu_device *iommu,
+ struct bus_type *bus,
+ struct notifier_block *nb);
+
+#endif /* __LINUX_IOMMU_PRIV_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index caaf563d38ae..c9c370de9d3d 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -34,8 +34,10 @@
#include <linux/msi.h>
#include "dma-iommu.h"
+#include "iommu-priv.h"
#include "iommu-sva.h"
+#include "iommu-priv.h"
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
@@ -287,6 +289,48 @@ void iommu_device_unregister(struct iommu_device *iommu)
}
EXPORT_SYMBOL_GPL(iommu_device_unregister);
+#if IS_ENABLED(CONFIG_IOMMUFD_TEST)
+void iommu_device_unregister_bus(struct iommu_device *iommu,
+ struct bus_type *bus,
+ struct notifier_block *nb)
+{
+ bus_unregister_notifier(bus, nb);
+ iommu_device_unregister(iommu);
+}
+EXPORT_SYMBOL_GPL(iommu_device_unregister_bus);
+
+/*
+ * Register an iommu driver against a single bus. This is only used by iommufd
+ * selftest to create a mock iommu driver. The caller must provide
+ * some memory to hold a notifier_block.
+ */
+int iommu_device_register_bus(struct iommu_device *iommu,
+ const struct iommu_ops *ops, struct bus_type *bus,
+ struct notifier_block *nb)
+{
+ int err;
+
+ iommu->ops = ops;
+ nb->notifier_call = iommu_bus_notifier;
+ err = bus_register_notifier(bus, nb);
+ if (err)
+ return err;
+
+ spin_lock(&iommu_device_lock);
+ list_add_tail(&iommu->list, &iommu_device_list);
+ spin_unlock(&iommu_device_lock);
+
+ bus->iommu_ops = ops;
+ err = bus_iommu_probe(bus);
+ if (err) {
+ iommu_device_unregister_bus(iommu, bus, nb);
+ return err;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_device_register_bus);
+#endif
+
static struct dev_iommu *dev_iommu_get(struct device *dev)
{
struct dev_iommu *param = dev->iommu;
@@ -2114,6 +2158,32 @@ int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
}
EXPORT_SYMBOL_GPL(iommu_attach_group);
+/**
+ * iommu_group_replace_domain - replace the domain that a group is attached to
+ * @new_domain: new IOMMU domain to replace with
+ * @group: IOMMU group that will be attached to the new domain
+ *
+ * This API allows the group to switch domains without being forced to go to
+ * the blocking domain in-between.
+ *
+ * If the currently attached domain is a core domain (e.g. a default_domain),
+ * it will act just like the iommu_attach_group().
+ */
+int iommu_group_replace_domain(struct iommu_group *group,
+ struct iommu_domain *new_domain)
+{
+ int ret;
+
+ if (!new_domain)
+ return -EINVAL;
+
+ mutex_lock(&group->mutex);
+ ret = __iommu_group_set_domain(group, new_domain);
+ mutex_unlock(&group->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(iommu_group_replace_domain, IOMMUFD_INTERNAL);
+
static int __iommu_device_set_domain(struct iommu_group *group,
struct device *dev,
struct iommu_domain *new_domain,
@@ -2642,6 +2712,14 @@ int iommu_set_pgtable_quirks(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
+/**
+ * iommu_get_resv_regions - get reserved regions
+ * @dev: device for which to get reserved regions
+ * @list: reserved region list for device
+ *
+ * This returns a list of reserved IOVA regions specific to this device.
+ * A domain user should not map IOVA in these ranges.
+ */
void iommu_get_resv_regions(struct device *dev, struct list_head *list)
{
const struct iommu_ops *ops = dev_iommu_ops(dev);
@@ -2649,9 +2727,10 @@ void iommu_get_resv_regions(struct device *dev, struct list_head *list)
if (ops->get_resv_regions)
ops->get_resv_regions(dev, list);
}
+EXPORT_SYMBOL_GPL(iommu_get_resv_regions);
/**
- * iommu_put_resv_regions - release resered regions
+ * iommu_put_resv_regions - release reserved regions
* @dev: device for which to free reserved regions
* @list: reserved region list for device
*
diff --git a/drivers/iommu/iommufd/Kconfig b/drivers/iommu/iommufd/Kconfig
index ada693ea51a7..99d4b075df49 100644
--- a/drivers/iommu/iommufd/Kconfig
+++ b/drivers/iommu/iommufd/Kconfig
@@ -14,8 +14,8 @@ config IOMMUFD
if IOMMUFD
config IOMMUFD_VFIO_CONTAINER
bool "IOMMUFD provides the VFIO container /dev/vfio/vfio"
- depends on VFIO && !VFIO_CONTAINER
- default VFIO && !VFIO_CONTAINER
+ depends on VFIO_GROUP && !VFIO_CONTAINER
+ default VFIO_GROUP && !VFIO_CONTAINER
help
IOMMUFD will provide /dev/vfio/vfio instead of VFIO. This relies on
IOMMUFD providing compatibility emulation to give the same ioctls.
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index ed2937a4e196..90f88c295ce0 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -4,6 +4,7 @@
#include <linux/iommufd.h>
#include <linux/slab.h>
#include <linux/iommu.h>
+#include "../iommu-priv.h"
#include "io_pagetable.h"
#include "iommufd_private.h"
@@ -15,13 +16,127 @@ MODULE_PARM_DESC(
"Allow IOMMUFD to bind to devices even if the platform cannot isolate "
"the MSI interrupt window. Enabling this is a security weakness.");
+static void iommufd_group_release(struct kref *kref)
+{
+ struct iommufd_group *igroup =
+ container_of(kref, struct iommufd_group, ref);
+
+ WARN_ON(igroup->hwpt || !list_empty(&igroup->device_list));
+
+ xa_cmpxchg(&igroup->ictx->groups, iommu_group_id(igroup->group), igroup,
+ NULL, GFP_KERNEL);
+ iommu_group_put(igroup->group);
+ mutex_destroy(&igroup->lock);
+ kfree(igroup);
+}
+
+static void iommufd_put_group(struct iommufd_group *group)
+{
+ kref_put(&group->ref, iommufd_group_release);
+}
+
+static bool iommufd_group_try_get(struct iommufd_group *igroup,
+ struct iommu_group *group)
+{
+ if (!igroup)
+ return false;
+ /*
+ * group ID's cannot be re-used until the group is put back which does
+ * not happen if we could get an igroup pointer under the xa_lock.
+ */
+ if (WARN_ON(igroup->group != group))
+ return false;
+ return kref_get_unless_zero(&igroup->ref);
+}
+
+/*
+ * iommufd needs to store some more data for each iommu_group, we keep a
+ * parallel xarray indexed by iommu_group id to hold this instead of putting it
+ * in the core structure. To keep things simple the iommufd_group memory is
+ * unique within the iommufd_ctx. This makes it easy to check there are no
+ * memory leaks.
+ */
+static struct iommufd_group *iommufd_get_group(struct iommufd_ctx *ictx,
+ struct device *dev)
+{
+ struct iommufd_group *new_igroup;
+ struct iommufd_group *cur_igroup;
+ struct iommufd_group *igroup;
+ struct iommu_group *group;
+ unsigned int id;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return ERR_PTR(-ENODEV);
+
+ id = iommu_group_id(group);
+
+ xa_lock(&ictx->groups);
+ igroup = xa_load(&ictx->groups, id);
+ if (iommufd_group_try_get(igroup, group)) {
+ xa_unlock(&ictx->groups);
+ iommu_group_put(group);
+ return igroup;
+ }
+ xa_unlock(&ictx->groups);
+
+ new_igroup = kzalloc(sizeof(*new_igroup), GFP_KERNEL);
+ if (!new_igroup) {
+ iommu_group_put(group);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ kref_init(&new_igroup->ref);
+ mutex_init(&new_igroup->lock);
+ INIT_LIST_HEAD(&new_igroup->device_list);
+ new_igroup->sw_msi_start = PHYS_ADDR_MAX;
+ /* group reference moves into new_igroup */
+ new_igroup->group = group;
+
+ /*
+ * The ictx is not additionally refcounted here becase all objects using
+ * an igroup must put it before their destroy completes.
+ */
+ new_igroup->ictx = ictx;
+
+ /*
+ * We dropped the lock so igroup is invalid. NULL is a safe and likely
+ * value to assume for the xa_cmpxchg algorithm.
+ */
+ cur_igroup = NULL;
+ xa_lock(&ictx->groups);
+ while (true) {
+ igroup = __xa_cmpxchg(&ictx->groups, id, cur_igroup, new_igroup,
+ GFP_KERNEL);
+ if (xa_is_err(igroup)) {
+ xa_unlock(&ictx->groups);
+ iommufd_put_group(new_igroup);
+ return ERR_PTR(xa_err(igroup));
+ }
+
+ /* new_group was successfully installed */
+ if (cur_igroup == igroup) {
+ xa_unlock(&ictx->groups);
+ return new_igroup;
+ }
+
+ /* Check again if the current group is any good */
+ if (iommufd_group_try_get(igroup, group)) {
+ xa_unlock(&ictx->groups);
+ iommufd_put_group(new_igroup);
+ return igroup;
+ }
+ cur_igroup = igroup;
+ }
+}
+
void iommufd_device_destroy(struct iommufd_object *obj)
{
struct iommufd_device *idev =
container_of(obj, struct iommufd_device, obj);
iommu_device_release_dma_owner(idev->dev);
- iommu_group_put(idev->group);
+ iommufd_put_group(idev->igroup);
if (!iommufd_selftest_is_mock_dev(idev->dev))
iommufd_ctx_put(idev->ictx);
}
@@ -46,7 +161,7 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
struct device *dev, u32 *id)
{
struct iommufd_device *idev;
- struct iommu_group *group;
+ struct iommufd_group *igroup;
int rc;
/*
@@ -56,9 +171,29 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY))
return ERR_PTR(-EINVAL);
- group = iommu_group_get(dev);
- if (!group)
- return ERR_PTR(-ENODEV);
+ igroup = iommufd_get_group(ictx, dev);
+ if (IS_ERR(igroup))
+ return ERR_CAST(igroup);
+
+ /*
+ * For historical compat with VFIO the insecure interrupt path is
+ * allowed if the module parameter is set. Secure/Isolated means that a
+ * MemWr operation from the device (eg a simple DMA) cannot trigger an
+ * interrupt outside this iommufd context.
+ */
+ if (!iommufd_selftest_is_mock_dev(dev) &&
+ !iommu_group_has_isolated_msi(igroup->group)) {
+ if (!allow_unsafe_interrupts) {
+ rc = -EPERM;
+ goto out_group_put;
+ }
+
+ dev_warn(
+ dev,
+ "MSI interrupts are not secure, they cannot be isolated by the platform. "
+ "Check that platform features like interrupt remapping are enabled. "
+ "Use the \"allow_unsafe_interrupts\" module parameter to override\n");
+ }
rc = iommu_device_claim_dma_owner(dev, ictx);
if (rc)
@@ -77,8 +212,8 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
device_iommu_capable(dev, IOMMU_CAP_ENFORCE_CACHE_COHERENCY);
/* The calling driver is a user until iommufd_device_unbind() */
refcount_inc(&idev->obj.users);
- /* group refcount moves into iommufd_device */
- idev->group = group;
+ /* igroup refcount moves into iommufd_device */
+ idev->igroup = igroup;
/*
* If the caller fails after this success it must call
@@ -93,12 +228,43 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
out_release_owner:
iommu_device_release_dma_owner(dev);
out_group_put:
- iommu_group_put(group);
+ iommufd_put_group(igroup);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_bind, IOMMUFD);
/**
+ * iommufd_ctx_has_group - True if any device within the group is bound
+ * to the ictx
+ * @ictx: iommufd file descriptor
+ * @group: Pointer to a physical iommu_group struct
+ *
+ * True if any device within the group has been bound to this ictx, ex. via
+ * iommufd_device_bind(), therefore implying ictx ownership of the group.
+ */
+bool iommufd_ctx_has_group(struct iommufd_ctx *ictx, struct iommu_group *group)
+{
+ struct iommufd_object *obj;
+ unsigned long index;
+
+ if (!ictx || !group)
+ return false;
+
+ xa_lock(&ictx->objects);
+ xa_for_each(&ictx->objects, index, obj) {
+ if (obj->type == IOMMUFD_OBJ_DEVICE &&
+ container_of(obj, struct iommufd_device, obj)
+ ->igroup->group == group) {
+ xa_unlock(&ictx->objects);
+ return true;
+ }
+ }
+ xa_unlock(&ictx->objects);
+ return false;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_ctx_has_group, IOMMUFD);
+
+/**
* iommufd_device_unbind - Undo iommufd_device_bind()
* @idev: Device returned by iommufd_device_bind()
*
@@ -113,10 +279,22 @@ void iommufd_device_unbind(struct iommufd_device *idev)
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_unbind, IOMMUFD);
-static int iommufd_device_setup_msi(struct iommufd_device *idev,
- struct iommufd_hw_pagetable *hwpt,
- phys_addr_t sw_msi_start)
+struct iommufd_ctx *iommufd_device_to_ictx(struct iommufd_device *idev)
+{
+ return idev->ictx;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_device_to_ictx, IOMMUFD);
+
+u32 iommufd_device_to_id(struct iommufd_device *idev)
+{
+ return idev->obj.id;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_device_to_id, IOMMUFD);
+
+static int iommufd_group_setup_msi(struct iommufd_group *igroup,
+ struct iommufd_hw_pagetable *hwpt)
{
+ phys_addr_t sw_msi_start = igroup->sw_msi_start;
int rc;
/*
@@ -143,128 +321,192 @@ static int iommufd_device_setup_msi(struct iommufd_device *idev,
*/
hwpt->msi_cookie = true;
}
-
- /*
- * For historical compat with VFIO the insecure interrupt path is
- * allowed if the module parameter is set. Insecure means that a MemWr
- * operation from the device (eg a simple DMA) cannot trigger an
- * interrupt outside this iommufd context.
- */
- if (!iommufd_selftest_is_mock_dev(idev->dev) &&
- !iommu_group_has_isolated_msi(idev->group)) {
- if (!allow_unsafe_interrupts)
- return -EPERM;
-
- dev_warn(
- idev->dev,
- "MSI interrupts are not secure, they cannot be isolated by the platform. "
- "Check that platform features like interrupt remapping are enabled. "
- "Use the \"allow_unsafe_interrupts\" module parameter to override\n");
- }
return 0;
}
-static bool iommufd_hw_pagetable_has_group(struct iommufd_hw_pagetable *hwpt,
- struct iommu_group *group)
-{
- struct iommufd_device *cur_dev;
-
- lockdep_assert_held(&hwpt->devices_lock);
-
- list_for_each_entry(cur_dev, &hwpt->devices, devices_item)
- if (cur_dev->group == group)
- return true;
- return false;
-}
-
int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev)
{
- phys_addr_t sw_msi_start = PHYS_ADDR_MAX;
int rc;
- lockdep_assert_held(&hwpt->devices_lock);
+ mutex_lock(&idev->igroup->lock);
- if (WARN_ON(idev->hwpt))
- return -EINVAL;
-
- /*
- * Try to upgrade the domain we have, it is an iommu driver bug to
- * report IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail
- * enforce_cache_coherency when there are no devices attached to the
- * domain.
- */
- if (idev->enforce_cache_coherency && !hwpt->enforce_cache_coherency) {
- if (hwpt->domain->ops->enforce_cache_coherency)
- hwpt->enforce_cache_coherency =
- hwpt->domain->ops->enforce_cache_coherency(
- hwpt->domain);
- if (!hwpt->enforce_cache_coherency) {
- WARN_ON(list_empty(&hwpt->devices));
- return -EINVAL;
- }
+ if (idev->igroup->hwpt != NULL && idev->igroup->hwpt != hwpt) {
+ rc = -EINVAL;
+ goto err_unlock;
}
- rc = iopt_table_enforce_group_resv_regions(&hwpt->ioas->iopt, idev->dev,
- idev->group, &sw_msi_start);
- if (rc)
- return rc;
+ /* Try to upgrade the domain we have */
+ if (idev->enforce_cache_coherency) {
+ rc = iommufd_hw_pagetable_enforce_cc(hwpt);
+ if (rc)
+ goto err_unlock;
+ }
- rc = iommufd_device_setup_msi(idev, hwpt, sw_msi_start);
+ rc = iopt_table_enforce_dev_resv_regions(&hwpt->ioas->iopt, idev->dev,
+ &idev->igroup->sw_msi_start);
if (rc)
- goto err_unresv;
+ goto err_unlock;
/*
- * FIXME: Hack around missing a device-centric iommu api, only attach to
- * the group once for the first device that is in the group.
+ * Only attach to the group once for the first device that is in the
+ * group. All the other devices will follow this attachment. The user
+ * should attach every device individually to the hwpt as the per-device
+ * reserved regions are only updated during individual device
+ * attachment.
*/
- if (!iommufd_hw_pagetable_has_group(hwpt, idev->group)) {
- rc = iommu_attach_group(hwpt->domain, idev->group);
+ if (list_empty(&idev->igroup->device_list)) {
+ rc = iommufd_group_setup_msi(idev->igroup, hwpt);
if (rc)
goto err_unresv;
+
+ rc = iommu_attach_group(hwpt->domain, idev->igroup->group);
+ if (rc)
+ goto err_unresv;
+ idev->igroup->hwpt = hwpt;
}
+ refcount_inc(&hwpt->obj.users);
+ list_add_tail(&idev->group_item, &idev->igroup->device_list);
+ mutex_unlock(&idev->igroup->lock);
return 0;
err_unresv:
iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev);
+err_unlock:
+ mutex_unlock(&idev->igroup->lock);
return rc;
}
-void iommufd_hw_pagetable_detach(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
+struct iommufd_hw_pagetable *
+iommufd_hw_pagetable_detach(struct iommufd_device *idev)
{
- if (!iommufd_hw_pagetable_has_group(hwpt, idev->group))
- iommu_detach_group(hwpt->domain, idev->group);
+ struct iommufd_hw_pagetable *hwpt = idev->igroup->hwpt;
+
+ mutex_lock(&idev->igroup->lock);
+ list_del(&idev->group_item);
+ if (list_empty(&idev->igroup->device_list)) {
+ iommu_detach_group(hwpt->domain, idev->igroup->group);
+ idev->igroup->hwpt = NULL;
+ }
iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev);
+ mutex_unlock(&idev->igroup->lock);
+
+ /* Caller must destroy hwpt */
+ return hwpt;
}
-static int iommufd_device_do_attach(struct iommufd_device *idev,
- struct iommufd_hw_pagetable *hwpt)
+static struct iommufd_hw_pagetable *
+iommufd_device_do_attach(struct iommufd_device *idev,
+ struct iommufd_hw_pagetable *hwpt)
{
int rc;
- mutex_lock(&hwpt->devices_lock);
rc = iommufd_hw_pagetable_attach(hwpt, idev);
if (rc)
- goto out_unlock;
+ return ERR_PTR(rc);
+ return NULL;
+}
- idev->hwpt = hwpt;
- refcount_inc(&hwpt->obj.users);
- list_add(&idev->devices_item, &hwpt->devices);
-out_unlock:
- mutex_unlock(&hwpt->devices_lock);
- return rc;
+static struct iommufd_hw_pagetable *
+iommufd_device_do_replace(struct iommufd_device *idev,
+ struct iommufd_hw_pagetable *hwpt)
+{
+ struct iommufd_group *igroup = idev->igroup;
+ struct iommufd_hw_pagetable *old_hwpt;
+ unsigned int num_devices = 0;
+ struct iommufd_device *cur;
+ int rc;
+
+ mutex_lock(&idev->igroup->lock);
+
+ if (igroup->hwpt == NULL) {
+ rc = -EINVAL;
+ goto err_unlock;
+ }
+
+ if (hwpt == igroup->hwpt) {
+ mutex_unlock(&idev->igroup->lock);
+ return NULL;
+ }
+
+ /* Try to upgrade the domain we have */
+ list_for_each_entry(cur, &igroup->device_list, group_item) {
+ num_devices++;
+ if (cur->enforce_cache_coherency) {
+ rc = iommufd_hw_pagetable_enforce_cc(hwpt);
+ if (rc)
+ goto err_unlock;
+ }
+ }
+
+ old_hwpt = igroup->hwpt;
+ if (hwpt->ioas != old_hwpt->ioas) {
+ list_for_each_entry(cur, &igroup->device_list, group_item) {
+ rc = iopt_table_enforce_dev_resv_regions(
+ &hwpt->ioas->iopt, cur->dev, NULL);
+ if (rc)
+ goto err_unresv;
+ }
+ }
+
+ rc = iommufd_group_setup_msi(idev->igroup, hwpt);
+ if (rc)
+ goto err_unresv;
+
+ rc = iommu_group_replace_domain(igroup->group, hwpt->domain);
+ if (rc)
+ goto err_unresv;
+
+ if (hwpt->ioas != old_hwpt->ioas) {
+ list_for_each_entry(cur, &igroup->device_list, group_item)
+ iopt_remove_reserved_iova(&old_hwpt->ioas->iopt,
+ cur->dev);
+ }
+
+ igroup->hwpt = hwpt;
+
+ /*
+ * Move the refcounts held by the device_list to the new hwpt. Retain a
+ * refcount for this thread as the caller will free it.
+ */
+ refcount_add(num_devices, &hwpt->obj.users);
+ if (num_devices > 1)
+ WARN_ON(refcount_sub_and_test(num_devices - 1,
+ &old_hwpt->obj.users));
+ mutex_unlock(&idev->igroup->lock);
+
+ /* Caller must destroy old_hwpt */
+ return old_hwpt;
+err_unresv:
+ list_for_each_entry(cur, &igroup->device_list, group_item)
+ iopt_remove_reserved_iova(&hwpt->ioas->iopt, cur->dev);
+err_unlock:
+ mutex_unlock(&idev->igroup->lock);
+ return ERR_PTR(rc);
}
+typedef struct iommufd_hw_pagetable *(*attach_fn)(
+ struct iommufd_device *idev, struct iommufd_hw_pagetable *hwpt);
+
/*
* When automatically managing the domains we search for a compatible domain in
* the iopt and if one is found use it, otherwise create a new domain.
* Automatic domain selection will never pick a manually created domain.
*/
-static int iommufd_device_auto_get_domain(struct iommufd_device *idev,
- struct iommufd_ioas *ioas)
+static struct iommufd_hw_pagetable *
+iommufd_device_auto_get_domain(struct iommufd_device *idev,
+ struct iommufd_ioas *ioas, u32 *pt_id,
+ attach_fn do_attach)
{
+ /*
+ * iommufd_hw_pagetable_attach() is called by
+ * iommufd_hw_pagetable_alloc() in immediate attachment mode, same as
+ * iommufd_device_do_attach(). So if we are in this mode then we prefer
+ * to use the immediate_attach path as it supports drivers that can't
+ * directly allocate a domain.
+ */
+ bool immediate_attach = do_attach == iommufd_device_do_attach;
+ struct iommufd_hw_pagetable *destroy_hwpt;
struct iommufd_hw_pagetable *hwpt;
- int rc;
/*
* There is no differentiation when domains are allocated, so any domain
@@ -278,50 +520,58 @@ static int iommufd_device_auto_get_domain(struct iommufd_device *idev,
if (!iommufd_lock_obj(&hwpt->obj))
continue;
- rc = iommufd_device_do_attach(idev, hwpt);
+ destroy_hwpt = (*do_attach)(idev, hwpt);
+ if (IS_ERR(destroy_hwpt)) {
+ iommufd_put_object(&hwpt->obj);
+ /*
+ * -EINVAL means the domain is incompatible with the
+ * device. Other error codes should propagate to
+ * userspace as failure. Success means the domain is
+ * attached.
+ */
+ if (PTR_ERR(destroy_hwpt) == -EINVAL)
+ continue;
+ goto out_unlock;
+ }
+ *pt_id = hwpt->obj.id;
iommufd_put_object(&hwpt->obj);
-
- /*
- * -EINVAL means the domain is incompatible with the device.
- * Other error codes should propagate to userspace as failure.
- * Success means the domain is attached.
- */
- if (rc == -EINVAL)
- continue;
goto out_unlock;
}
- hwpt = iommufd_hw_pagetable_alloc(idev->ictx, ioas, idev, true);
+ hwpt = iommufd_hw_pagetable_alloc(idev->ictx, ioas, idev,
+ immediate_attach);
if (IS_ERR(hwpt)) {
- rc = PTR_ERR(hwpt);
+ destroy_hwpt = ERR_CAST(hwpt);
goto out_unlock;
}
+
+ if (!immediate_attach) {
+ destroy_hwpt = (*do_attach)(idev, hwpt);
+ if (IS_ERR(destroy_hwpt))
+ goto out_abort;
+ } else {
+ destroy_hwpt = NULL;
+ }
+
hwpt->auto_domain = true;
+ *pt_id = hwpt->obj.id;
- mutex_unlock(&ioas->mutex);
iommufd_object_finalize(idev->ictx, &hwpt->obj);
- return 0;
+ mutex_unlock(&ioas->mutex);
+ return destroy_hwpt;
+
+out_abort:
+ iommufd_object_abort_and_destroy(idev->ictx, &hwpt->obj);
out_unlock:
mutex_unlock(&ioas->mutex);
- return rc;
+ return destroy_hwpt;
}
-/**
- * iommufd_device_attach - Connect a device from an iommu_domain
- * @idev: device to attach
- * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HW_PAGETABLE
- * Output the IOMMUFD_OBJ_HW_PAGETABLE ID
- *
- * This connects the device to an iommu_domain, either automatically or manually
- * selected. Once this completes the device could do DMA.
- *
- * The caller should return the resulting pt_id back to userspace.
- * This function is undone by calling iommufd_device_detach().
- */
-int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id)
+static int iommufd_device_change_pt(struct iommufd_device *idev, u32 *pt_id,
+ attach_fn do_attach)
{
+ struct iommufd_hw_pagetable *destroy_hwpt;
struct iommufd_object *pt_obj;
- int rc;
pt_obj = iommufd_get_object(idev->ictx, *pt_id, IOMMUFD_OBJ_ANY);
if (IS_ERR(pt_obj))
@@ -332,8 +582,8 @@ int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id)
struct iommufd_hw_pagetable *hwpt =
container_of(pt_obj, struct iommufd_hw_pagetable, obj);
- rc = iommufd_device_do_attach(idev, hwpt);
- if (rc)
+ destroy_hwpt = (*do_attach)(idev, hwpt);
+ if (IS_ERR(destroy_hwpt))
goto out_put_pt_obj;
break;
}
@@ -341,27 +591,80 @@ int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id)
struct iommufd_ioas *ioas =
container_of(pt_obj, struct iommufd_ioas, obj);
- rc = iommufd_device_auto_get_domain(idev, ioas);
- if (rc)
+ destroy_hwpt = iommufd_device_auto_get_domain(idev, ioas, pt_id,
+ do_attach);
+ if (IS_ERR(destroy_hwpt))
goto out_put_pt_obj;
break;
}
default:
- rc = -EINVAL;
+ destroy_hwpt = ERR_PTR(-EINVAL);
goto out_put_pt_obj;
}
+ iommufd_put_object(pt_obj);
- refcount_inc(&idev->obj.users);
- *pt_id = idev->hwpt->obj.id;
- rc = 0;
+ /* This destruction has to be after we unlock everything */
+ if (destroy_hwpt)
+ iommufd_hw_pagetable_put(idev->ictx, destroy_hwpt);
+ return 0;
out_put_pt_obj:
iommufd_put_object(pt_obj);
- return rc;
+ return PTR_ERR(destroy_hwpt);
+}
+
+/**
+ * iommufd_device_attach - Connect a device to an iommu_domain
+ * @idev: device to attach
+ * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HW_PAGETABLE
+ * Output the IOMMUFD_OBJ_HW_PAGETABLE ID
+ *
+ * This connects the device to an iommu_domain, either automatically or manually
+ * selected. Once this completes the device could do DMA.
+ *
+ * The caller should return the resulting pt_id back to userspace.
+ * This function is undone by calling iommufd_device_detach().
+ */
+int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id)
+{
+ int rc;
+
+ rc = iommufd_device_change_pt(idev, pt_id, &iommufd_device_do_attach);
+ if (rc)
+ return rc;
+
+ /*
+ * Pairs with iommufd_device_detach() - catches caller bugs attempting
+ * to destroy a device with an attachment.
+ */
+ refcount_inc(&idev->obj.users);
+ return 0;
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_attach, IOMMUFD);
/**
+ * iommufd_device_replace - Change the device's iommu_domain
+ * @idev: device to change
+ * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HW_PAGETABLE
+ * Output the IOMMUFD_OBJ_HW_PAGETABLE ID
+ *
+ * This is the same as::
+ *
+ * iommufd_device_detach();
+ * iommufd_device_attach();
+ *
+ * If it fails then no change is made to the attachment. The iommu driver may
+ * implement this so there is no disruption in translation. This can only be
+ * called if iommufd_device_attach() has already succeeded.
+ */
+int iommufd_device_replace(struct iommufd_device *idev, u32 *pt_id)
+{
+ return iommufd_device_change_pt(idev, pt_id,
+ &iommufd_device_do_replace);
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_device_replace, IOMMUFD);
+
+/**
* iommufd_device_detach - Disconnect a device to an iommu_domain
* @idev: device to detach
*
@@ -370,33 +673,87 @@ EXPORT_SYMBOL_NS_GPL(iommufd_device_attach, IOMMUFD);
*/
void iommufd_device_detach(struct iommufd_device *idev)
{
- struct iommufd_hw_pagetable *hwpt = idev->hwpt;
-
- mutex_lock(&hwpt->devices_lock);
- list_del(&idev->devices_item);
- idev->hwpt = NULL;
- iommufd_hw_pagetable_detach(hwpt, idev);
- mutex_unlock(&hwpt->devices_lock);
-
- if (hwpt->auto_domain)
- iommufd_object_deref_user(idev->ictx, &hwpt->obj);
- else
- refcount_dec(&hwpt->obj.users);
+ struct iommufd_hw_pagetable *hwpt;
+ hwpt = iommufd_hw_pagetable_detach(idev);
+ iommufd_hw_pagetable_put(idev->ictx, hwpt);
refcount_dec(&idev->obj.users);
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_detach, IOMMUFD);
+/*
+ * On success, it will refcount_inc() at a valid new_ioas and refcount_dec() at
+ * a valid cur_ioas (access->ioas). A caller passing in a valid new_ioas should
+ * call iommufd_put_object() if it does an iommufd_get_object() for a new_ioas.
+ */
+static int iommufd_access_change_ioas(struct iommufd_access *access,
+ struct iommufd_ioas *new_ioas)
+{
+ u32 iopt_access_list_id = access->iopt_access_list_id;
+ struct iommufd_ioas *cur_ioas = access->ioas;
+ int rc;
+
+ lockdep_assert_held(&access->ioas_lock);
+
+ /* We are racing with a concurrent detach, bail */
+ if (cur_ioas != access->ioas_unpin)
+ return -EBUSY;
+
+ if (cur_ioas == new_ioas)
+ return 0;
+
+ /*
+ * Set ioas to NULL to block any further iommufd_access_pin_pages().
+ * iommufd_access_unpin_pages() can continue using access->ioas_unpin.
+ */
+ access->ioas = NULL;
+
+ if (new_ioas) {
+ rc = iopt_add_access(&new_ioas->iopt, access);
+ if (rc) {
+ access->ioas = cur_ioas;
+ return rc;
+ }
+ refcount_inc(&new_ioas->obj.users);
+ }
+
+ if (cur_ioas) {
+ if (access->ops->unmap) {
+ mutex_unlock(&access->ioas_lock);
+ access->ops->unmap(access->data, 0, ULONG_MAX);
+ mutex_lock(&access->ioas_lock);
+ }
+ iopt_remove_access(&cur_ioas->iopt, access, iopt_access_list_id);
+ refcount_dec(&cur_ioas->obj.users);
+ }
+
+ access->ioas = new_ioas;
+ access->ioas_unpin = new_ioas;
+
+ return 0;
+}
+
+static int iommufd_access_change_ioas_id(struct iommufd_access *access, u32 id)
+{
+ struct iommufd_ioas *ioas = iommufd_get_ioas(access->ictx, id);
+ int rc;
+
+ if (IS_ERR(ioas))
+ return PTR_ERR(ioas);
+ rc = iommufd_access_change_ioas(access, ioas);
+ iommufd_put_object(&ioas->obj);
+ return rc;
+}
+
void iommufd_access_destroy_object(struct iommufd_object *obj)
{
struct iommufd_access *access =
container_of(obj, struct iommufd_access, obj);
- if (access->ioas) {
- iopt_remove_access(&access->ioas->iopt, access);
- refcount_dec(&access->ioas->obj.users);
- access->ioas = NULL;
- }
+ mutex_lock(&access->ioas_lock);
+ if (access->ioas)
+ WARN_ON(iommufd_access_change_ioas(access, NULL));
+ mutex_unlock(&access->ioas_lock);
iommufd_ctx_put(access->ictx);
}
@@ -441,6 +798,7 @@ iommufd_access_create(struct iommufd_ctx *ictx,
iommufd_ctx_get(ictx);
iommufd_object_finalize(ictx, &access->obj);
*id = access->obj.id;
+ mutex_init(&access->ioas_lock);
return access;
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_create, IOMMUFD);
@@ -457,30 +815,49 @@ void iommufd_access_destroy(struct iommufd_access *access)
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_destroy, IOMMUFD);
+void iommufd_access_detach(struct iommufd_access *access)
+{
+ mutex_lock(&access->ioas_lock);
+ if (WARN_ON(!access->ioas)) {
+ mutex_unlock(&access->ioas_lock);
+ return;
+ }
+ WARN_ON(iommufd_access_change_ioas(access, NULL));
+ mutex_unlock(&access->ioas_lock);
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_access_detach, IOMMUFD);
+
int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id)
{
- struct iommufd_ioas *new_ioas;
- int rc = 0;
+ int rc;
- if (access->ioas)
+ mutex_lock(&access->ioas_lock);
+ if (WARN_ON(access->ioas)) {
+ mutex_unlock(&access->ioas_lock);
return -EINVAL;
-
- new_ioas = iommufd_get_ioas(access->ictx, ioas_id);
- if (IS_ERR(new_ioas))
- return PTR_ERR(new_ioas);
-
- rc = iopt_add_access(&new_ioas->iopt, access);
- if (rc) {
- iommufd_put_object(&new_ioas->obj);
- return rc;
}
- iommufd_ref_to_users(&new_ioas->obj);
- access->ioas = new_ioas;
- return 0;
+ rc = iommufd_access_change_ioas_id(access, ioas_id);
+ mutex_unlock(&access->ioas_lock);
+ return rc;
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_attach, IOMMUFD);
+int iommufd_access_replace(struct iommufd_access *access, u32 ioas_id)
+{
+ int rc;
+
+ mutex_lock(&access->ioas_lock);
+ if (!access->ioas) {
+ mutex_unlock(&access->ioas_lock);
+ return -ENOENT;
+ }
+ rc = iommufd_access_change_ioas_id(access, ioas_id);
+ mutex_unlock(&access->ioas_lock);
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_access_replace, IOMMUFD);
+
/**
* iommufd_access_notify_unmap - Notify users of an iopt to stop using it
* @iopt: iopt to work on
@@ -531,8 +908,8 @@ void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
void iommufd_access_unpin_pages(struct iommufd_access *access,
unsigned long iova, unsigned long length)
{
- struct io_pagetable *iopt = &access->ioas->iopt;
struct iopt_area_contig_iter iter;
+ struct io_pagetable *iopt;
unsigned long last_iova;
struct iopt_area *area;
@@ -540,6 +917,17 @@ void iommufd_access_unpin_pages(struct iommufd_access *access,
WARN_ON(check_add_overflow(iova, length - 1, &last_iova)))
return;
+ mutex_lock(&access->ioas_lock);
+ /*
+ * The driver must be doing something wrong if it calls this before an
+ * iommufd_access_attach() or after an iommufd_access_detach().
+ */
+ if (WARN_ON(!access->ioas_unpin)) {
+ mutex_unlock(&access->ioas_lock);
+ return;
+ }
+ iopt = &access->ioas_unpin->iopt;
+
down_read(&iopt->iova_rwsem);
iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova)
iopt_area_remove_access(
@@ -549,6 +937,7 @@ void iommufd_access_unpin_pages(struct iommufd_access *access,
min(last_iova, iopt_area_last_iova(area))));
WARN_ON(!iopt_area_contig_done(&iter));
up_read(&iopt->iova_rwsem);
+ mutex_unlock(&access->ioas_lock);
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_unpin_pages, IOMMUFD);
@@ -594,8 +983,8 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
unsigned long length, struct page **out_pages,
unsigned int flags)
{
- struct io_pagetable *iopt = &access->ioas->iopt;
struct iopt_area_contig_iter iter;
+ struct io_pagetable *iopt;
unsigned long last_iova;
struct iopt_area *area;
int rc;
@@ -610,6 +999,13 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
if (check_add_overflow(iova, length - 1, &last_iova))
return -EOVERFLOW;
+ mutex_lock(&access->ioas_lock);
+ if (!access->ioas) {
+ mutex_unlock(&access->ioas_lock);
+ return -ENOENT;
+ }
+ iopt = &access->ioas->iopt;
+
down_read(&iopt->iova_rwsem);
iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
unsigned long last = min(last_iova, iopt_area_last_iova(area));
@@ -640,6 +1036,7 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
}
up_read(&iopt->iova_rwsem);
+ mutex_unlock(&access->ioas_lock);
return 0;
err_remove:
@@ -654,6 +1051,7 @@ err_remove:
iopt_area_last_iova(area))));
}
up_read(&iopt->iova_rwsem);
+ mutex_unlock(&access->ioas_lock);
return rc;
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_pin_pages, IOMMUFD);
@@ -673,8 +1071,8 @@ EXPORT_SYMBOL_NS_GPL(iommufd_access_pin_pages, IOMMUFD);
int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
void *data, size_t length, unsigned int flags)
{
- struct io_pagetable *iopt = &access->ioas->iopt;
struct iopt_area_contig_iter iter;
+ struct io_pagetable *iopt;
struct iopt_area *area;
unsigned long last_iova;
int rc;
@@ -684,6 +1082,13 @@ int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
if (check_add_overflow(iova, length - 1, &last_iova))
return -EOVERFLOW;
+ mutex_lock(&access->ioas_lock);
+ if (!access->ioas) {
+ mutex_unlock(&access->ioas_lock);
+ return -ENOENT;
+ }
+ iopt = &access->ioas->iopt;
+
down_read(&iopt->iova_rwsem);
iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
unsigned long last = min(last_iova, iopt_area_last_iova(area));
@@ -710,6 +1115,7 @@ int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
rc = -ENOENT;
err_out:
up_read(&iopt->iova_rwsem);
+ mutex_unlock(&access->ioas_lock);
return rc;
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_rw, IOMMUFD);
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index 6cdb6749d359..cf2c1504e20d 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -3,6 +3,7 @@
* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
*/
#include <linux/iommu.h>
+#include <uapi/linux/iommufd.h>
#include "iommufd_private.h"
@@ -11,8 +12,6 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj)
struct iommufd_hw_pagetable *hwpt =
container_of(obj, struct iommufd_hw_pagetable, obj);
- WARN_ON(!list_empty(&hwpt->devices));
-
if (!list_empty(&hwpt->hwpt_item)) {
mutex_lock(&hwpt->ioas->mutex);
list_del(&hwpt->hwpt_item);
@@ -25,7 +24,35 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj)
iommu_domain_free(hwpt->domain);
refcount_dec(&hwpt->ioas->obj.users);
- mutex_destroy(&hwpt->devices_lock);
+}
+
+void iommufd_hw_pagetable_abort(struct iommufd_object *obj)
+{
+ struct iommufd_hw_pagetable *hwpt =
+ container_of(obj, struct iommufd_hw_pagetable, obj);
+
+ /* The ioas->mutex must be held until finalize is called. */
+ lockdep_assert_held(&hwpt->ioas->mutex);
+
+ if (!list_empty(&hwpt->hwpt_item)) {
+ list_del_init(&hwpt->hwpt_item);
+ iopt_table_remove_domain(&hwpt->ioas->iopt, hwpt->domain);
+ }
+ iommufd_hw_pagetable_destroy(obj);
+}
+
+int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt)
+{
+ if (hwpt->enforce_cache_coherency)
+ return 0;
+
+ if (hwpt->domain->ops->enforce_cache_coherency)
+ hwpt->enforce_cache_coherency =
+ hwpt->domain->ops->enforce_cache_coherency(
+ hwpt->domain);
+ if (!hwpt->enforce_cache_coherency)
+ return -EINVAL;
+ return 0;
}
/**
@@ -38,6 +65,10 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj)
* Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT
* will be linked to the given ioas and upon return the underlying iommu_domain
* is fully popoulated.
+ *
+ * The caller must hold the ioas->mutex until after
+ * iommufd_object_abort_and_destroy() or iommufd_object_finalize() is called on
+ * the returned hwpt.
*/
struct iommufd_hw_pagetable *
iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
@@ -52,9 +83,7 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
if (IS_ERR(hwpt))
return hwpt;
- INIT_LIST_HEAD(&hwpt->devices);
INIT_LIST_HEAD(&hwpt->hwpt_item);
- mutex_init(&hwpt->devices_lock);
/* Pairs with iommufd_hw_pagetable_destroy() */
refcount_inc(&ioas->obj.users);
hwpt->ioas = ioas;
@@ -65,7 +94,18 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
goto out_abort;
}
- mutex_lock(&hwpt->devices_lock);
+ /*
+ * Set the coherency mode before we do iopt_table_add_domain() as some
+ * iommus have a per-PTE bit that controls it and need to decide before
+ * doing any maps. It is an iommu driver bug to report
+ * IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail enforce_cache_coherency on
+ * a new domain.
+ */
+ if (idev->enforce_cache_coherency) {
+ rc = iommufd_hw_pagetable_enforce_cc(hwpt);
+ if (WARN_ON(rc))
+ goto out_abort;
+ }
/*
* immediate_attach exists only to accommodate iommu drivers that cannot
@@ -76,30 +116,64 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
if (immediate_attach) {
rc = iommufd_hw_pagetable_attach(hwpt, idev);
if (rc)
- goto out_unlock;
+ goto out_abort;
}
rc = iopt_table_add_domain(&hwpt->ioas->iopt, hwpt->domain);
if (rc)
goto out_detach;
list_add_tail(&hwpt->hwpt_item, &hwpt->ioas->hwpt_list);
-
- if (immediate_attach) {
- /* See iommufd_device_do_attach() */
- refcount_inc(&hwpt->obj.users);
- idev->hwpt = hwpt;
- list_add(&idev->devices_item, &hwpt->devices);
- }
-
- mutex_unlock(&hwpt->devices_lock);
return hwpt;
out_detach:
if (immediate_attach)
- iommufd_hw_pagetable_detach(hwpt, idev);
-out_unlock:
- mutex_unlock(&hwpt->devices_lock);
+ iommufd_hw_pagetable_detach(idev);
out_abort:
iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
return ERR_PTR(rc);
}
+
+int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
+{
+ struct iommu_hwpt_alloc *cmd = ucmd->cmd;
+ struct iommufd_hw_pagetable *hwpt;
+ struct iommufd_device *idev;
+ struct iommufd_ioas *ioas;
+ int rc;
+
+ if (cmd->flags || cmd->__reserved)
+ return -EOPNOTSUPP;
+
+ idev = iommufd_get_device(ucmd, cmd->dev_id);
+ if (IS_ERR(idev))
+ return PTR_ERR(idev);
+
+ ioas = iommufd_get_ioas(ucmd->ictx, cmd->pt_id);
+ if (IS_ERR(ioas)) {
+ rc = PTR_ERR(ioas);
+ goto out_put_idev;
+ }
+
+ mutex_lock(&ioas->mutex);
+ hwpt = iommufd_hw_pagetable_alloc(ucmd->ictx, ioas, idev, false);
+ if (IS_ERR(hwpt)) {
+ rc = PTR_ERR(hwpt);
+ goto out_unlock;
+ }
+
+ cmd->out_hwpt_id = hwpt->obj.id;
+ rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
+ if (rc)
+ goto out_hwpt;
+ iommufd_object_finalize(ucmd->ictx, &hwpt->obj);
+ goto out_unlock;
+
+out_hwpt:
+ iommufd_object_abort_and_destroy(ucmd->ictx, &hwpt->obj);
+out_unlock:
+ mutex_unlock(&ioas->mutex);
+ iommufd_put_object(&ioas->obj);
+out_put_idev:
+ iommufd_put_object(&idev->obj);
+ return rc;
+}
diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
index 724c4c574241..3a598182b761 100644
--- a/drivers/iommu/iommufd/io_pagetable.c
+++ b/drivers/iommu/iommufd/io_pagetable.c
@@ -1158,36 +1158,36 @@ out_unlock:
}
void iopt_remove_access(struct io_pagetable *iopt,
- struct iommufd_access *access)
+ struct iommufd_access *access,
+ u32 iopt_access_list_id)
{
down_write(&iopt->domains_rwsem);
down_write(&iopt->iova_rwsem);
- WARN_ON(xa_erase(&iopt->access_list, access->iopt_access_list_id) !=
- access);
+ WARN_ON(xa_erase(&iopt->access_list, iopt_access_list_id) != access);
WARN_ON(iopt_calculate_iova_alignment(iopt));
up_write(&iopt->iova_rwsem);
up_write(&iopt->domains_rwsem);
}
-/* Narrow the valid_iova_itree to include reserved ranges from a group. */
-int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt,
- struct device *device,
- struct iommu_group *group,
- phys_addr_t *sw_msi_start)
+/* Narrow the valid_iova_itree to include reserved ranges from a device. */
+int iopt_table_enforce_dev_resv_regions(struct io_pagetable *iopt,
+ struct device *dev,
+ phys_addr_t *sw_msi_start)
{
struct iommu_resv_region *resv;
- struct iommu_resv_region *tmp;
- LIST_HEAD(group_resv_regions);
+ LIST_HEAD(resv_regions);
unsigned int num_hw_msi = 0;
unsigned int num_sw_msi = 0;
int rc;
+ if (iommufd_should_fail())
+ return -EINVAL;
+
down_write(&iopt->iova_rwsem);
- rc = iommu_get_group_resv_regions(group, &group_resv_regions);
- if (rc)
- goto out_unlock;
+ /* FIXME: drivers allocate memory but there is no failure propogated */
+ iommu_get_resv_regions(dev, &resv_regions);
- list_for_each_entry(resv, &group_resv_regions, list) {
+ list_for_each_entry(resv, &resv_regions, list) {
if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE)
continue;
@@ -1199,7 +1199,7 @@ int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt,
}
rc = iopt_reserve_iova(iopt, resv->start,
- resv->length - 1 + resv->start, device);
+ resv->length - 1 + resv->start, dev);
if (rc)
goto out_reserved;
}
@@ -1214,11 +1214,9 @@ int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt,
goto out_free_resv;
out_reserved:
- __iopt_remove_reserved_iova(iopt, device);
+ __iopt_remove_reserved_iova(iopt, dev);
out_free_resv:
- list_for_each_entry_safe(resv, tmp, &group_resv_regions, list)
- kfree(resv);
-out_unlock:
+ iommu_put_resv_regions(dev, &resv_regions);
up_write(&iopt->iova_rwsem);
return rc;
}
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index f9790983699c..15596a08a057 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -17,6 +17,7 @@ struct iommufd_device;
struct iommufd_ctx {
struct file *file;
struct xarray objects;
+ struct xarray groups;
u8 account_mode;
/* Compatibility with VFIO no iommu */
@@ -75,10 +76,9 @@ int iopt_table_add_domain(struct io_pagetable *iopt,
struct iommu_domain *domain);
void iopt_table_remove_domain(struct io_pagetable *iopt,
struct iommu_domain *domain);
-int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt,
- struct device *device,
- struct iommu_group *group,
- phys_addr_t *sw_msi_start);
+int iopt_table_enforce_dev_resv_regions(struct io_pagetable *iopt,
+ struct device *dev,
+ phys_addr_t *sw_msi_start);
int iopt_set_allow_iova(struct io_pagetable *iopt,
struct rb_root_cached *allowed_iova);
int iopt_reserve_iova(struct io_pagetable *iopt, unsigned long start,
@@ -119,6 +119,7 @@ enum iommufd_object_type {
#ifdef CONFIG_IOMMUFD_TEST
IOMMUFD_OBJ_SELFTEST,
#endif
+ IOMMUFD_OBJ_MAX,
};
/* Base struct for all objects with a userspace ID handle. */
@@ -260,18 +261,39 @@ struct iommufd_hw_pagetable {
bool msi_cookie : 1;
/* Head at iommufd_ioas::hwpt_list */
struct list_head hwpt_item;
- struct mutex devices_lock;
- struct list_head devices;
};
struct iommufd_hw_pagetable *
iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
struct iommufd_device *idev, bool immediate_attach);
+int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt);
int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev);
-void iommufd_hw_pagetable_detach(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev);
+struct iommufd_hw_pagetable *
+iommufd_hw_pagetable_detach(struct iommufd_device *idev);
void iommufd_hw_pagetable_destroy(struct iommufd_object *obj);
+void iommufd_hw_pagetable_abort(struct iommufd_object *obj);
+int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd);
+
+static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
+ struct iommufd_hw_pagetable *hwpt)
+{
+ lockdep_assert_not_held(&hwpt->ioas->mutex);
+ if (hwpt->auto_domain)
+ iommufd_object_deref_user(ictx, &hwpt->obj);
+ else
+ refcount_dec(&hwpt->obj.users);
+}
+
+struct iommufd_group {
+ struct kref ref;
+ struct mutex lock;
+ struct iommufd_ctx *ictx;
+ struct iommu_group *group;
+ struct iommufd_hw_pagetable *hwpt;
+ struct list_head device_list;
+ phys_addr_t sw_msi_start;
+};
/*
* A iommufd_device object represents the binding relationship between a
@@ -281,21 +303,29 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj);
struct iommufd_device {
struct iommufd_object obj;
struct iommufd_ctx *ictx;
- struct iommufd_hw_pagetable *hwpt;
- /* Head at iommufd_hw_pagetable::devices */
- struct list_head devices_item;
+ struct iommufd_group *igroup;
+ struct list_head group_item;
/* always the physical device */
struct device *dev;
- struct iommu_group *group;
bool enforce_cache_coherency;
};
+static inline struct iommufd_device *
+iommufd_get_device(struct iommufd_ucmd *ucmd, u32 id)
+{
+ return container_of(iommufd_get_object(ucmd->ictx, id,
+ IOMMUFD_OBJ_DEVICE),
+ struct iommufd_device, obj);
+}
+
void iommufd_device_destroy(struct iommufd_object *obj);
struct iommufd_access {
struct iommufd_object obj;
struct iommufd_ctx *ictx;
struct iommufd_ioas *ioas;
+ struct iommufd_ioas *ioas_unpin;
+ struct mutex ioas_lock;
const struct iommufd_access_ops *ops;
void *data;
unsigned long iova_alignment;
@@ -304,7 +334,8 @@ struct iommufd_access {
int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access);
void iopt_remove_access(struct io_pagetable *iopt,
- struct iommufd_access *access);
+ struct iommufd_access *access,
+ u32 iopt_access_list_id);
void iommufd_access_destroy_object(struct iommufd_object *obj);
#ifdef CONFIG_IOMMUFD_TEST
@@ -314,7 +345,7 @@ extern size_t iommufd_test_memory_limit;
void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
unsigned int ioas_id, u64 *iova, u32 *flags);
bool iommufd_should_fail(void);
-void __init iommufd_test_init(void);
+int __init iommufd_test_init(void);
void iommufd_test_exit(void);
bool iommufd_selftest_is_mock_dev(struct device *dev);
#else
@@ -327,8 +358,9 @@ static inline bool iommufd_should_fail(void)
{
return false;
}
-static inline void __init iommufd_test_init(void)
+static inline int __init iommufd_test_init(void)
{
+ return 0;
}
static inline void iommufd_test_exit(void)
{
diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h
index b3d69cca7729..258de2253b61 100644
--- a/drivers/iommu/iommufd/iommufd_test.h
+++ b/drivers/iommu/iommufd/iommufd_test.h
@@ -17,6 +17,8 @@ enum {
IOMMU_TEST_OP_ACCESS_PAGES,
IOMMU_TEST_OP_ACCESS_RW,
IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT,
+ IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE,
+ IOMMU_TEST_OP_ACCESS_REPLACE_IOAS,
};
enum {
@@ -51,8 +53,13 @@ struct iommu_test_cmd {
struct {
__u32 out_stdev_id;
__u32 out_hwpt_id;
+ /* out_idev_id is the standard iommufd_bind object */
+ __u32 out_idev_id;
} mock_domain;
struct {
+ __u32 pt_id;
+ } mock_domain_replace;
+ struct {
__aligned_u64 iova;
__aligned_u64 length;
__aligned_u64 uptr;
@@ -85,6 +92,9 @@ struct iommu_test_cmd {
struct {
__u32 limit;
} memory_limit;
+ struct {
+ __u32 ioas_id;
+ } access_replace_ioas;
};
__u32 last;
};
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 4cf5f73f2708..5f7e9fa45502 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -24,6 +24,7 @@
struct iommufd_object_ops {
void (*destroy)(struct iommufd_object *obj);
+ void (*abort)(struct iommufd_object *obj);
};
static const struct iommufd_object_ops iommufd_object_ops[];
static struct miscdevice vfio_misc_dev;
@@ -32,6 +33,7 @@ struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
size_t size,
enum iommufd_object_type type)
{
+ static struct lock_class_key obj_keys[IOMMUFD_OBJ_MAX];
struct iommufd_object *obj;
int rc;
@@ -39,7 +41,15 @@ struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
if (!obj)
return ERR_PTR(-ENOMEM);
obj->type = type;
- init_rwsem(&obj->destroy_rwsem);
+ /*
+ * In most cases the destroy_rwsem is obtained with try so it doesn't
+ * interact with lockdep, however on destroy we have to sleep. This
+ * means if we have to destroy an object while holding a get on another
+ * object it triggers lockdep. Using one locking class per object type
+ * is a simple and reasonable way to avoid this.
+ */
+ __init_rwsem(&obj->destroy_rwsem, "iommufd_object::destroy_rwsem",
+ &obj_keys[type]);
refcount_set(&obj->users, 1);
/*
@@ -50,7 +60,7 @@ struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
* before calling iommufd_object_finalize().
*/
rc = xa_alloc(&ictx->objects, &obj->id, XA_ZERO_ENTRY,
- xa_limit_32b, GFP_KERNEL_ACCOUNT);
+ xa_limit_31b, GFP_KERNEL_ACCOUNT);
if (rc)
goto out_free;
return obj;
@@ -95,7 +105,10 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
struct iommufd_object *obj)
{
- iommufd_object_ops[obj->type].destroy(obj);
+ if (iommufd_object_ops[obj->type].abort)
+ iommufd_object_ops[obj->type].abort(obj);
+ else
+ iommufd_object_ops[obj->type].destroy(obj);
iommufd_object_abort(ictx, obj);
}
@@ -223,6 +236,7 @@ static int iommufd_fops_open(struct inode *inode, struct file *filp)
}
xa_init_flags(&ictx->objects, XA_FLAGS_ALLOC1 | XA_FLAGS_ACCOUNT);
+ xa_init(&ictx->groups);
ictx->file = filp;
filp->private_data = ictx;
return 0;
@@ -258,6 +272,7 @@ static int iommufd_fops_release(struct inode *inode, struct file *filp)
if (WARN_ON(!destroyed))
break;
}
+ WARN_ON(!xa_empty(&ictx->groups));
kfree(ictx);
return 0;
}
@@ -290,6 +305,7 @@ static int iommufd_option(struct iommufd_ucmd *ucmd)
union ucmd_buffer {
struct iommu_destroy destroy;
+ struct iommu_hwpt_alloc hwpt;
struct iommu_ioas_alloc alloc;
struct iommu_ioas_allow_iovas allow_iovas;
struct iommu_ioas_copy ioas_copy;
@@ -321,6 +337,8 @@ struct iommufd_ioctl_op {
}
static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
IOCTL_OP(IOMMU_DESTROY, iommufd_destroy, struct iommu_destroy, id),
+ IOCTL_OP(IOMMU_HWPT_ALLOC, iommufd_hwpt_alloc, struct iommu_hwpt_alloc,
+ __reserved),
IOCTL_OP(IOMMU_IOAS_ALLOC, iommufd_ioas_alloc_ioctl,
struct iommu_ioas_alloc, out_ioas_id),
IOCTL_OP(IOMMU_IOAS_ALLOW_IOVAS, iommufd_ioas_allow_iovas,
@@ -418,6 +436,30 @@ struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
EXPORT_SYMBOL_NS_GPL(iommufd_ctx_from_file, IOMMUFD);
/**
+ * iommufd_ctx_from_fd - Acquires a reference to the iommufd context
+ * @fd: File descriptor to obtain the reference from
+ *
+ * Returns a pointer to the iommufd_ctx, otherwise ERR_PTR. On success
+ * the caller is responsible to call iommufd_ctx_put().
+ */
+struct iommufd_ctx *iommufd_ctx_from_fd(int fd)
+{
+ struct file *file;
+
+ file = fget(fd);
+ if (!file)
+ return ERR_PTR(-EBADF);
+
+ if (file->f_op != &iommufd_fops) {
+ fput(file);
+ return ERR_PTR(-EBADFD);
+ }
+ /* fget is the same as iommufd_ctx_get() */
+ return file->private_data;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_ctx_from_fd, IOMMUFD);
+
+/**
* iommufd_ctx_put - Put back a reference
* @ictx: Context to put back
*/
@@ -439,6 +481,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
},
[IOMMUFD_OBJ_HW_PAGETABLE] = {
.destroy = iommufd_hw_pagetable_destroy,
+ .abort = iommufd_hw_pagetable_abort,
},
#ifdef CONFIG_IOMMUFD_TEST
[IOMMUFD_OBJ_SELFTEST] = {
@@ -477,8 +520,14 @@ static int __init iommufd_init(void)
if (ret)
goto err_misc;
}
- iommufd_test_init();
+ ret = iommufd_test_init();
+ if (ret)
+ goto err_vfio_misc;
return 0;
+
+err_vfio_misc:
+ if (IS_ENABLED(CONFIG_IOMMUFD_VFIO_CONTAINER))
+ misc_deregister(&vfio_misc_dev);
err_misc:
misc_deregister(&iommu_misc_dev);
return ret;
@@ -499,5 +548,6 @@ module_exit(iommufd_exit);
MODULE_ALIAS_MISCDEV(VFIO_MINOR);
MODULE_ALIAS("devname:vfio/vfio");
#endif
+MODULE_IMPORT_NS(IOMMUFD_INTERNAL);
MODULE_DESCRIPTION("I/O Address Space Management for passthrough devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index 74c2076105d4..9223ff3f23f9 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -9,14 +9,17 @@
#include <linux/file.h>
#include <linux/anon_inodes.h>
#include <linux/fault-inject.h>
+#include <linux/platform_device.h>
#include <uapi/linux/iommufd.h>
+#include "../iommu-priv.h"
#include "io_pagetable.h"
#include "iommufd_private.h"
#include "iommufd_test.h"
static DECLARE_FAULT_ATTR(fail_iommufd);
static struct dentry *dbgfs_root;
+static struct platform_device *selftest_iommu_dev;
size_t iommufd_test_memory_limit = 65536;
@@ -135,7 +138,7 @@ static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type)
if (iommu_domain_type == IOMMU_DOMAIN_BLOCKED)
return &mock_blocking_domain;
- if (WARN_ON(iommu_domain_type != IOMMU_DOMAIN_UNMANAGED))
+ if (iommu_domain_type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
mock = kzalloc(sizeof(*mock), GFP_KERNEL);
@@ -276,12 +279,22 @@ static void mock_domain_set_plaform_dma_ops(struct device *dev)
*/
}
+static struct iommu_device mock_iommu_device = {
+};
+
+static struct iommu_device *mock_probe_device(struct device *dev)
+{
+ return &mock_iommu_device;
+}
+
static const struct iommu_ops mock_ops = {
.owner = THIS_MODULE,
.pgsize_bitmap = MOCK_IO_PAGE_SIZE,
.domain_alloc = mock_domain_alloc,
.capable = mock_domain_capable,
.set_platform_dma_ops = mock_domain_set_plaform_dma_ops,
+ .device_group = generic_device_group,
+ .probe_device = mock_probe_device,
.default_domain_ops =
&(struct iommu_domain_ops){
.free = mock_domain_free,
@@ -292,10 +305,6 @@ static const struct iommu_ops mock_ops = {
},
};
-static struct iommu_device mock_iommu_device = {
- .ops = &mock_ops,
-};
-
static inline struct iommufd_hw_pagetable *
get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
struct mock_iommu_domain **mock)
@@ -316,22 +325,29 @@ get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
return hwpt;
}
-static struct bus_type iommufd_mock_bus_type = {
- .name = "iommufd_mock",
- .iommu_ops = &mock_ops,
+struct mock_bus_type {
+ struct bus_type bus;
+ struct notifier_block nb;
};
+static struct mock_bus_type iommufd_mock_bus_type = {
+ .bus = {
+ .name = "iommufd_mock",
+ },
+};
+
+static atomic_t mock_dev_num;
+
static void mock_dev_release(struct device *dev)
{
struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
+ atomic_dec(&mock_dev_num);
kfree(mdev);
}
static struct mock_dev *mock_dev_create(void)
{
- struct iommu_group *iommu_group;
- struct dev_iommu *dev_iommu;
struct mock_dev *mdev;
int rc;
@@ -341,51 +357,18 @@ static struct mock_dev *mock_dev_create(void)
device_initialize(&mdev->dev);
mdev->dev.release = mock_dev_release;
- mdev->dev.bus = &iommufd_mock_bus_type;
-
- iommu_group = iommu_group_alloc();
- if (IS_ERR(iommu_group)) {
- rc = PTR_ERR(iommu_group);
- goto err_put;
- }
+ mdev->dev.bus = &iommufd_mock_bus_type.bus;
rc = dev_set_name(&mdev->dev, "iommufd_mock%u",
- iommu_group_id(iommu_group));
+ atomic_inc_return(&mock_dev_num));
if (rc)
- goto err_group;
-
- /*
- * The iommu core has no way to associate a single device with an iommu
- * driver (heck currently it can't even support two iommu_drivers
- * registering). Hack it together with an open coded dev_iommu_get().
- * Notice that the normal notifier triggered iommu release process also
- * does not work here because this bus is not in iommu_buses.
- */
- mdev->dev.iommu = kzalloc(sizeof(*dev_iommu), GFP_KERNEL);
- if (!mdev->dev.iommu) {
- rc = -ENOMEM;
- goto err_group;
- }
- mutex_init(&mdev->dev.iommu->lock);
- mdev->dev.iommu->iommu_dev = &mock_iommu_device;
+ goto err_put;
rc = device_add(&mdev->dev);
if (rc)
- goto err_dev_iommu;
-
- rc = iommu_group_add_device(iommu_group, &mdev->dev);
- if (rc)
- goto err_del;
- iommu_group_put(iommu_group);
+ goto err_put;
return mdev;
-err_del:
- device_del(&mdev->dev);
-err_dev_iommu:
- kfree(mdev->dev.iommu);
- mdev->dev.iommu = NULL;
-err_group:
- iommu_group_put(iommu_group);
err_put:
put_device(&mdev->dev);
return ERR_PTR(rc);
@@ -393,11 +376,7 @@ err_put:
static void mock_dev_destroy(struct mock_dev *mdev)
{
- iommu_group_remove_device(&mdev->dev);
- device_del(&mdev->dev);
- kfree(mdev->dev.iommu);
- mdev->dev.iommu = NULL;
- put_device(&mdev->dev);
+ device_unregister(&mdev->dev);
}
bool iommufd_selftest_is_mock_dev(struct device *dev)
@@ -443,9 +422,15 @@ static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd,
/* Userspace must destroy the device_id to destroy the object */
cmd->mock_domain.out_hwpt_id = pt_id;
cmd->mock_domain.out_stdev_id = sobj->obj.id;
+ cmd->mock_domain.out_idev_id = idev_id;
+ rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
+ if (rc)
+ goto out_detach;
iommufd_object_finalize(ucmd->ictx, &sobj->obj);
- return iommufd_ucmd_respond(ucmd, sizeof(*cmd));
+ return 0;
+out_detach:
+ iommufd_device_detach(idev);
out_unbind:
iommufd_device_unbind(idev);
out_mdev:
@@ -455,6 +440,42 @@ out_sobj:
return rc;
}
+/* Replace the mock domain with a manually allocated hw_pagetable */
+static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd,
+ unsigned int device_id, u32 pt_id,
+ struct iommu_test_cmd *cmd)
+{
+ struct iommufd_object *dev_obj;
+ struct selftest_obj *sobj;
+ int rc;
+
+ /*
+ * Prefer to use the OBJ_SELFTEST because the destroy_rwsem will ensure
+ * it doesn't race with detach, which is not allowed.
+ */
+ dev_obj =
+ iommufd_get_object(ucmd->ictx, device_id, IOMMUFD_OBJ_SELFTEST);
+ if (IS_ERR(dev_obj))
+ return PTR_ERR(dev_obj);
+
+ sobj = container_of(dev_obj, struct selftest_obj, obj);
+ if (sobj->type != TYPE_IDEV) {
+ rc = -EINVAL;
+ goto out_dev_obj;
+ }
+
+ rc = iommufd_device_replace(sobj->idev.idev, &pt_id);
+ if (rc)
+ goto out_dev_obj;
+
+ cmd->mock_domain_replace.pt_id = pt_id;
+ rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
+
+out_dev_obj:
+ iommufd_put_object(dev_obj);
+ return rc;
+}
+
/* Add an additional reserved IOVA to the IOAS */
static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd,
unsigned int mockpt_id,
@@ -748,6 +769,22 @@ out_free_staccess:
return rc;
}
+static int iommufd_test_access_replace_ioas(struct iommufd_ucmd *ucmd,
+ unsigned int access_id,
+ unsigned int ioas_id)
+{
+ struct selftest_access *staccess;
+ int rc;
+
+ staccess = iommufd_access_get(access_id);
+ if (IS_ERR(staccess))
+ return PTR_ERR(staccess);
+
+ rc = iommufd_access_replace(staccess->access, ioas_id);
+ fput(staccess->file);
+ return rc;
+}
+
/* Check that the pages in a page array match the pages in the user VA */
static int iommufd_test_check_pages(void __user *uptr, struct page **pages,
size_t npages)
@@ -948,6 +985,9 @@ int iommufd_test(struct iommufd_ucmd *ucmd)
cmd->add_reserved.length);
case IOMMU_TEST_OP_MOCK_DOMAIN:
return iommufd_test_mock_domain(ucmd, cmd);
+ case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE:
+ return iommufd_test_mock_domain_replace(
+ ucmd, cmd->id, cmd->mock_domain_replace.pt_id, cmd);
case IOMMU_TEST_OP_MD_CHECK_MAP:
return iommufd_test_md_check_pa(
ucmd, cmd->id, cmd->check_map.iova,
@@ -960,6 +1000,9 @@ int iommufd_test(struct iommufd_ucmd *ucmd)
case IOMMU_TEST_OP_CREATE_ACCESS:
return iommufd_test_create_access(ucmd, cmd->id,
cmd->create_access.flags);
+ case IOMMU_TEST_OP_ACCESS_REPLACE_IOAS:
+ return iommufd_test_access_replace_ioas(
+ ucmd, cmd->id, cmd->access_replace_ioas.ioas_id);
case IOMMU_TEST_OP_ACCESS_PAGES:
return iommufd_test_access_pages(
ucmd, cmd->id, cmd->access_pages.iova,
@@ -992,15 +1035,57 @@ bool iommufd_should_fail(void)
return should_fail(&fail_iommufd, 1);
}
-void __init iommufd_test_init(void)
+int __init iommufd_test_init(void)
{
+ struct platform_device_info pdevinfo = {
+ .name = "iommufd_selftest_iommu",
+ };
+ int rc;
+
dbgfs_root =
fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd);
- WARN_ON(bus_register(&iommufd_mock_bus_type));
+
+ selftest_iommu_dev = platform_device_register_full(&pdevinfo);
+ if (IS_ERR(selftest_iommu_dev)) {
+ rc = PTR_ERR(selftest_iommu_dev);
+ goto err_dbgfs;
+ }
+
+ rc = bus_register(&iommufd_mock_bus_type.bus);
+ if (rc)
+ goto err_platform;
+
+ rc = iommu_device_sysfs_add(&mock_iommu_device,
+ &selftest_iommu_dev->dev, NULL, "%s",
+ dev_name(&selftest_iommu_dev->dev));
+ if (rc)
+ goto err_bus;
+
+ rc = iommu_device_register_bus(&mock_iommu_device, &mock_ops,
+ &iommufd_mock_bus_type.bus,
+ &iommufd_mock_bus_type.nb);
+ if (rc)
+ goto err_sysfs;
+ return 0;
+
+err_sysfs:
+ iommu_device_sysfs_remove(&mock_iommu_device);
+err_bus:
+ bus_unregister(&iommufd_mock_bus_type.bus);
+err_platform:
+ platform_device_del(selftest_iommu_dev);
+err_dbgfs:
+ debugfs_remove_recursive(dbgfs_root);
+ return rc;
}
void iommufd_test_exit(void)
{
+ iommu_device_sysfs_remove(&mock_iommu_device);
+ iommu_device_unregister_bus(&mock_iommu_device,
+ &iommufd_mock_bus_type.bus,
+ &iommufd_mock_bus_type.nb);
+ bus_unregister(&iommufd_mock_bus_type.bus);
+ platform_device_del(selftest_iommu_dev);
debugfs_remove_recursive(dbgfs_root);
- bus_unregister(&iommufd_mock_bus_type);
}
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index 5b53b94f13c7..cba4971618ff 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -632,6 +632,7 @@ static const struct vfio_device_ops vfio_ccw_dev_ops = {
.bind_iommufd = vfio_iommufd_emulated_bind,
.unbind_iommufd = vfio_iommufd_emulated_unbind,
.attach_ioas = vfio_iommufd_emulated_attach_ioas,
+ .detach_ioas = vfio_iommufd_emulated_detach_ioas,
};
struct mdev_driver vfio_ccw_mdev_driver = {
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index b441745b0418..2d3c3a79b687 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -1975,6 +1975,7 @@ static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
.bind_iommufd = vfio_iommufd_emulated_bind,
.unbind_iommufd = vfio_iommufd_emulated_unbind,
.attach_ioas = vfio_iommufd_emulated_attach_ioas,
+ .detach_ioas = vfio_iommufd_emulated_detach_ioas,
.request = vfio_ap_mdev_request
};
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index aba36f5be4ec..6bda6dbb4878 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -4,6 +4,8 @@ menuconfig VFIO
select IOMMU_API
depends on IOMMUFD || !IOMMUFD
select INTERVAL_TREE
+ select VFIO_GROUP if SPAPR_TCE_IOMMU || IOMMUFD=n
+ select VFIO_DEVICE_CDEV if !VFIO_GROUP
select VFIO_CONTAINER if IOMMUFD=n
help
VFIO provides a framework for secure userspace device drivers.
@@ -12,9 +14,33 @@ menuconfig VFIO
If you don't know what to do here, say N.
if VFIO
+config VFIO_DEVICE_CDEV
+ bool "Support for the VFIO cdev /dev/vfio/devices/vfioX"
+ depends on IOMMUFD && !SPAPR_TCE_IOMMU
+ default !VFIO_GROUP
+ help
+ The VFIO device cdev is another way for userspace to get device
+ access. Userspace gets device fd by opening device cdev under
+ /dev/vfio/devices/vfioX, and then bind the device fd with an iommufd
+ to set up secure DMA context for device access. This interface does
+ not support noiommu.
+
+ If you don't know what to do here, say N.
+
+config VFIO_GROUP
+ bool "Support for the VFIO group /dev/vfio/$group_id"
+ default y
+ help
+ VFIO group support provides the traditional model for accessing
+ devices through VFIO and is used by the majority of userspace
+ applications and drivers making use of VFIO.
+
+ If you don't know what to do here, say Y.
+
config VFIO_CONTAINER
bool "Support for the VFIO container /dev/vfio/vfio"
select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64)
+ depends on VFIO_GROUP
default y
help
The VFIO container is the classic interface to VFIO for establishing
@@ -36,6 +62,7 @@ endif
config VFIO_NOIOMMU
bool "VFIO No-IOMMU support"
+ depends on VFIO_GROUP
help
VFIO is built on the ability to isolate devices using the IOMMU.
Only with an IOMMU can userspace access to DMA capable devices be
diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile
index 66f418aef5a9..c82ea032d352 100644
--- a/drivers/vfio/Makefile
+++ b/drivers/vfio/Makefile
@@ -2,8 +2,9 @@
obj-$(CONFIG_VFIO) += vfio.o
vfio-y += vfio_main.o \
- group.o \
iova_bitmap.o
+vfio-$(CONFIG_VFIO_DEVICE_CDEV) += device_cdev.o
+vfio-$(CONFIG_VFIO_GROUP) += group.o
vfio-$(CONFIG_IOMMUFD) += iommufd.o
vfio-$(CONFIG_VFIO_CONTAINER) += container.o
vfio-$(CONFIG_VFIO_VIRQFD) += virqfd.o
diff --git a/drivers/vfio/device_cdev.c b/drivers/vfio/device_cdev.c
new file mode 100644
index 000000000000..e75da0a70d1f
--- /dev/null
+++ b/drivers/vfio/device_cdev.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Intel Corporation.
+ */
+#include <linux/vfio.h>
+#include <linux/iommufd.h>
+
+#include "vfio.h"
+
+static dev_t device_devt;
+
+void vfio_init_device_cdev(struct vfio_device *device)
+{
+ device->device.devt = MKDEV(MAJOR(device_devt), device->index);
+ cdev_init(&device->cdev, &vfio_device_fops);
+ device->cdev.owner = THIS_MODULE;
+}
+
+/*
+ * device access via the fd opened by this function is blocked until
+ * .open_device() is called successfully during BIND_IOMMUFD.
+ */
+int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep)
+{
+ struct vfio_device *device = container_of(inode->i_cdev,
+ struct vfio_device, cdev);
+ struct vfio_device_file *df;
+ int ret;
+
+ /* Paired with the put in vfio_device_fops_release() */
+ if (!vfio_device_try_get_registration(device))
+ return -ENODEV;
+
+ df = vfio_allocate_device_file(device);
+ if (IS_ERR(df)) {
+ ret = PTR_ERR(df);
+ goto err_put_registration;
+ }
+
+ filep->private_data = df;
+
+ return 0;
+
+err_put_registration:
+ vfio_device_put_registration(device);
+ return ret;
+}
+
+static void vfio_df_get_kvm_safe(struct vfio_device_file *df)
+{
+ spin_lock(&df->kvm_ref_lock);
+ vfio_device_get_kvm_safe(df->device, df->kvm);
+ spin_unlock(&df->kvm_ref_lock);
+}
+
+long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
+ struct vfio_device_bind_iommufd __user *arg)
+{
+ struct vfio_device *device = df->device;
+ struct vfio_device_bind_iommufd bind;
+ unsigned long minsz;
+ int ret;
+
+ static_assert(__same_type(arg->out_devid, df->devid));
+
+ minsz = offsetofend(struct vfio_device_bind_iommufd, out_devid);
+
+ if (copy_from_user(&bind, arg, minsz))
+ return -EFAULT;
+
+ if (bind.argsz < minsz || bind.flags || bind.iommufd < 0)
+ return -EINVAL;
+
+ /* BIND_IOMMUFD only allowed for cdev fds */
+ if (df->group)
+ return -EINVAL;
+
+ ret = vfio_device_block_group(device);
+ if (ret)
+ return ret;
+
+ mutex_lock(&device->dev_set->lock);
+ /* one device cannot be bound twice */
+ if (df->access_granted) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ df->iommufd = iommufd_ctx_from_fd(bind.iommufd);
+ if (IS_ERR(df->iommufd)) {
+ ret = PTR_ERR(df->iommufd);
+ df->iommufd = NULL;
+ goto out_unlock;
+ }
+
+ /*
+ * Before the device open, get the KVM pointer currently
+ * associated with the device file (if there is) and obtain
+ * a reference. This reference is held until device closed.
+ * Save the pointer in the device for use by drivers.
+ */
+ vfio_df_get_kvm_safe(df);
+
+ ret = vfio_df_open(df);
+ if (ret)
+ goto out_put_kvm;
+
+ ret = copy_to_user(&arg->out_devid, &df->devid,
+ sizeof(df->devid)) ? -EFAULT : 0;
+ if (ret)
+ goto out_close_device;
+
+ device->cdev_opened = true;
+ /*
+ * Paired with smp_load_acquire() in vfio_device_fops::ioctl/
+ * read/write/mmap
+ */
+ smp_store_release(&df->access_granted, true);
+ mutex_unlock(&device->dev_set->lock);
+ return 0;
+
+out_close_device:
+ vfio_df_close(df);
+out_put_kvm:
+ vfio_device_put_kvm(device);
+ iommufd_ctx_put(df->iommufd);
+ df->iommufd = NULL;
+out_unlock:
+ mutex_unlock(&device->dev_set->lock);
+ vfio_device_unblock_group(device);
+ return ret;
+}
+
+void vfio_df_unbind_iommufd(struct vfio_device_file *df)
+{
+ struct vfio_device *device = df->device;
+
+ /*
+ * In the time of close, there is no contention with another one
+ * changing this flag. So read df->access_granted without lock
+ * and no smp_load_acquire() is ok.
+ */
+ if (!df->access_granted)
+ return;
+
+ mutex_lock(&device->dev_set->lock);
+ vfio_df_close(df);
+ vfio_device_put_kvm(device);
+ iommufd_ctx_put(df->iommufd);
+ device->cdev_opened = false;
+ mutex_unlock(&device->dev_set->lock);
+ vfio_device_unblock_group(device);
+}
+
+int vfio_df_ioctl_attach_pt(struct vfio_device_file *df,
+ struct vfio_device_attach_iommufd_pt __user *arg)
+{
+ struct vfio_device *device = df->device;
+ struct vfio_device_attach_iommufd_pt attach;
+ unsigned long minsz;
+ int ret;
+
+ minsz = offsetofend(struct vfio_device_attach_iommufd_pt, pt_id);
+
+ if (copy_from_user(&attach, arg, minsz))
+ return -EFAULT;
+
+ if (attach.argsz < minsz || attach.flags)
+ return -EINVAL;
+
+ mutex_lock(&device->dev_set->lock);
+ ret = device->ops->attach_ioas(device, &attach.pt_id);
+ if (ret)
+ goto out_unlock;
+
+ if (copy_to_user(&arg->pt_id, &attach.pt_id, sizeof(attach.pt_id))) {
+ ret = -EFAULT;
+ goto out_detach;
+ }
+ mutex_unlock(&device->dev_set->lock);
+
+ return 0;
+
+out_detach:
+ device->ops->detach_ioas(device);
+out_unlock:
+ mutex_unlock(&device->dev_set->lock);
+ return ret;
+}
+
+int vfio_df_ioctl_detach_pt(struct vfio_device_file *df,
+ struct vfio_device_detach_iommufd_pt __user *arg)
+{
+ struct vfio_device *device = df->device;
+ struct vfio_device_detach_iommufd_pt detach;
+ unsigned long minsz;
+
+ minsz = offsetofend(struct vfio_device_detach_iommufd_pt, flags);
+
+ if (copy_from_user(&detach, arg, minsz))
+ return -EFAULT;
+
+ if (detach.argsz < minsz || detach.flags)
+ return -EINVAL;
+
+ mutex_lock(&device->dev_set->lock);
+ device->ops->detach_ioas(device);
+ mutex_unlock(&device->dev_set->lock);
+
+ return 0;
+}
+
+static char *vfio_device_devnode(const struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "vfio/devices/%s", dev_name(dev));
+}
+
+int vfio_cdev_init(struct class *device_class)
+{
+ device_class->devnode = vfio_device_devnode;
+ return alloc_chrdev_region(&device_devt, 0,
+ MINORMASK + 1, "vfio-dev");
+}
+
+void vfio_cdev_cleanup(void)
+{
+ unregister_chrdev_region(device_devt, MINORMASK + 1);
+}
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
index f2140e94d41e..116358a8f1cf 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
@@ -593,6 +593,7 @@ static const struct vfio_device_ops vfio_fsl_mc_ops = {
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static struct fsl_mc_driver vfio_fsl_mc_driver = {
diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
index fc75c1000d74..610a429c6191 100644
--- a/drivers/vfio/group.c
+++ b/drivers/vfio/group.c
@@ -160,17 +160,13 @@ out_unlock:
static void vfio_device_group_get_kvm_safe(struct vfio_device *device)
{
spin_lock(&device->group->kvm_ref_lock);
- if (!device->group->kvm)
- goto unlock;
-
- _vfio_device_get_kvm_safe(device, device->group->kvm);
-
-unlock:
+ vfio_device_get_kvm_safe(device, device->group->kvm);
spin_unlock(&device->group->kvm_ref_lock);
}
-static int vfio_device_group_open(struct vfio_device *device)
+static int vfio_df_group_open(struct vfio_device_file *df)
{
+ struct vfio_device *device = df->device;
int ret;
mutex_lock(&device->group->group_lock);
@@ -190,24 +186,62 @@ static int vfio_device_group_open(struct vfio_device *device)
if (device->open_count == 0)
vfio_device_group_get_kvm_safe(device);
- ret = vfio_device_open(device, device->group->iommufd);
+ df->iommufd = device->group->iommufd;
+ if (df->iommufd && vfio_device_is_noiommu(device) && device->open_count == 0) {
+ /*
+ * Require no compat ioas to be assigned to proceed. The basic
+ * statement is that the user cannot have done something that
+ * implies they expected translation to exist
+ */
+ if (!capable(CAP_SYS_RAWIO) ||
+ vfio_iommufd_device_has_compat_ioas(device, df->iommufd))
+ ret = -EPERM;
+ else
+ ret = 0;
+ goto out_put_kvm;
+ }
- if (device->open_count == 0)
- vfio_device_put_kvm(device);
+ ret = vfio_df_open(df);
+ if (ret)
+ goto out_put_kvm;
+
+ if (df->iommufd && device->open_count == 1) {
+ ret = vfio_iommufd_compat_attach_ioas(device, df->iommufd);
+ if (ret)
+ goto out_close_device;
+ }
+
+ /*
+ * Paired with smp_load_acquire() in vfio_device_fops::ioctl/
+ * read/write/mmap and vfio_file_has_device_access()
+ */
+ smp_store_release(&df->access_granted, true);
mutex_unlock(&device->dev_set->lock);
+ mutex_unlock(&device->group->group_lock);
+ return 0;
+out_close_device:
+ vfio_df_close(df);
+out_put_kvm:
+ df->iommufd = NULL;
+ if (device->open_count == 0)
+ vfio_device_put_kvm(device);
+ mutex_unlock(&device->dev_set->lock);
out_unlock:
mutex_unlock(&device->group->group_lock);
return ret;
}
-void vfio_device_group_close(struct vfio_device *device)
+void vfio_df_group_close(struct vfio_device_file *df)
{
+ struct vfio_device *device = df->device;
+
mutex_lock(&device->group->group_lock);
mutex_lock(&device->dev_set->lock);
- vfio_device_close(device, device->group->iommufd);
+ vfio_df_close(df);
+ df->iommufd = NULL;
if (device->open_count == 0)
vfio_device_put_kvm(device);
@@ -218,19 +252,28 @@ void vfio_device_group_close(struct vfio_device *device)
static struct file *vfio_device_open_file(struct vfio_device *device)
{
+ struct vfio_device_file *df;
struct file *filep;
int ret;
- ret = vfio_device_group_open(device);
- if (ret)
+ df = vfio_allocate_device_file(device);
+ if (IS_ERR(df)) {
+ ret = PTR_ERR(df);
goto err_out;
+ }
+
+ df->group = device->group;
+
+ ret = vfio_df_group_open(df);
+ if (ret)
+ goto err_free;
/*
* We can't use anon_inode_getfd() because we need to modify
* the f_mode flags directly to allow more than just ioctls
*/
filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
- device, O_RDWR);
+ df, O_RDWR);
if (IS_ERR(filep)) {
ret = PTR_ERR(filep);
goto err_close_device;
@@ -253,7 +296,9 @@ static struct file *vfio_device_open_file(struct vfio_device *device)
return filep;
err_close_device:
- vfio_device_group_close(device);
+ vfio_df_group_close(df);
+err_free:
+ kfree(df);
err_out:
return ERR_PTR(ret);
}
@@ -357,6 +402,33 @@ static long vfio_group_fops_unl_ioctl(struct file *filep,
}
}
+int vfio_device_block_group(struct vfio_device *device)
+{
+ struct vfio_group *group = device->group;
+ int ret = 0;
+
+ mutex_lock(&group->group_lock);
+ if (group->opened_file) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ group->cdev_device_open_cnt++;
+
+out_unlock:
+ mutex_unlock(&group->group_lock);
+ return ret;
+}
+
+void vfio_device_unblock_group(struct vfio_device *device)
+{
+ struct vfio_group *group = device->group;
+
+ mutex_lock(&group->group_lock);
+ group->cdev_device_open_cnt--;
+ mutex_unlock(&group->group_lock);
+}
+
static int vfio_group_fops_open(struct inode *inode, struct file *filep)
{
struct vfio_group *group =
@@ -379,6 +451,11 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep)
goto out_unlock;
}
+ if (group->cdev_device_open_cnt) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
/*
* Do we need multiple instances of the group open? Seems not.
*/
@@ -453,6 +530,7 @@ static void vfio_group_release(struct device *dev)
mutex_destroy(&group->device_lock);
mutex_destroy(&group->group_lock);
WARN_ON(group->iommu_group);
+ WARN_ON(group->cdev_device_open_cnt);
ida_free(&vfio.group_ida, MINOR(group->dev.devt));
kfree(group);
}
@@ -604,16 +682,6 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev)
if (!iommu_group)
return ERR_PTR(-EINVAL);
- /*
- * VFIO always sets IOMMU_CACHE because we offer no way for userspace to
- * restore cache coherency. It has to be checked here because it is only
- * valid for cases where we are using iommu groups.
- */
- if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) {
- iommu_group_put(iommu_group);
- return ERR_PTR(-EINVAL);
- }
-
mutex_lock(&vfio.group_lock);
group = vfio_group_find_from_iommu(iommu_group);
if (group) {
@@ -745,6 +813,15 @@ bool vfio_device_has_container(struct vfio_device *device)
return device->group->container;
}
+struct vfio_group *vfio_group_from_file(struct file *file)
+{
+ struct vfio_group *group = file->private_data;
+
+ if (file->f_op != &vfio_group_fops)
+ return NULL;
+ return group;
+}
+
/**
* vfio_file_iommu_group - Return the struct iommu_group for the vfio group file
* @file: VFIO group file
@@ -755,13 +832,13 @@ bool vfio_device_has_container(struct vfio_device *device)
*/
struct iommu_group *vfio_file_iommu_group(struct file *file)
{
- struct vfio_group *group = file->private_data;
+ struct vfio_group *group = vfio_group_from_file(file);
struct iommu_group *iommu_group = NULL;
if (!IS_ENABLED(CONFIG_SPAPR_TCE_IOMMU))
return NULL;
- if (!vfio_file_is_group(file))
+ if (!group)
return NULL;
mutex_lock(&group->group_lock);
@@ -775,33 +852,20 @@ struct iommu_group *vfio_file_iommu_group(struct file *file)
EXPORT_SYMBOL_GPL(vfio_file_iommu_group);
/**
- * vfio_file_is_group - True if the file is usable with VFIO aPIS
+ * vfio_file_is_group - True if the file is a vfio group file
* @file: VFIO group file
*/
bool vfio_file_is_group(struct file *file)
{
- return file->f_op == &vfio_group_fops;
+ return vfio_group_from_file(file);
}
EXPORT_SYMBOL_GPL(vfio_file_is_group);
-/**
- * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file
- * is always CPU cache coherent
- * @file: VFIO group file
- *
- * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop
- * bit in DMA transactions. A return of false indicates that the user has
- * rights to access additional instructions such as wbinvd on x86.
- */
-bool vfio_file_enforced_coherent(struct file *file)
+bool vfio_group_enforced_coherent(struct vfio_group *group)
{
- struct vfio_group *group = file->private_data;
struct vfio_device *device;
bool ret = true;
- if (!vfio_file_is_group(file))
- return true;
-
/*
* If the device does not have IOMMU_CAP_ENFORCE_CACHE_COHERENCY then
* any domain later attached to it will also not support it. If the cap
@@ -819,28 +883,13 @@ bool vfio_file_enforced_coherent(struct file *file)
mutex_unlock(&group->device_lock);
return ret;
}
-EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
-/**
- * vfio_file_set_kvm - Link a kvm with VFIO drivers
- * @file: VFIO group file
- * @kvm: KVM to link
- *
- * When a VFIO device is first opened the KVM will be available in
- * device->kvm if one was associated with the group.
- */
-void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
+void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
{
- struct vfio_group *group = file->private_data;
-
- if (!vfio_file_is_group(file))
- return;
-
spin_lock(&group->kvm_ref_lock);
group->kvm = kvm;
spin_unlock(&group->kvm_ref_lock);
}
-EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
/**
* vfio_file_has_dev - True if the VFIO file is a handle for device
@@ -851,9 +900,9 @@ EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
*/
bool vfio_file_has_dev(struct file *file, struct vfio_device *device)
{
- struct vfio_group *group = file->private_data;
+ struct vfio_group *group = vfio_group_from_file(file);
- if (!vfio_file_is_group(file))
+ if (!group)
return false;
return group == device->group;
diff --git a/drivers/vfio/iommufd.c b/drivers/vfio/iommufd.c
index 88b00c501015..82eba6966fa5 100644
--- a/drivers/vfio/iommufd.c
+++ b/drivers/vfio/iommufd.c
@@ -10,53 +10,48 @@
MODULE_IMPORT_NS(IOMMUFD);
MODULE_IMPORT_NS(IOMMUFD_VFIO);
-int vfio_iommufd_bind(struct vfio_device *vdev, struct iommufd_ctx *ictx)
+bool vfio_iommufd_device_has_compat_ioas(struct vfio_device *vdev,
+ struct iommufd_ctx *ictx)
+{
+ u32 ioas_id;
+
+ return !iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
+}
+
+int vfio_df_iommufd_bind(struct vfio_device_file *df)
+{
+ struct vfio_device *vdev = df->device;
+ struct iommufd_ctx *ictx = df->iommufd;
+
+ lockdep_assert_held(&vdev->dev_set->lock);
+
+ return vdev->ops->bind_iommufd(vdev, ictx, &df->devid);
+}
+
+int vfio_iommufd_compat_attach_ioas(struct vfio_device *vdev,
+ struct iommufd_ctx *ictx)
{
u32 ioas_id;
- u32 device_id;
int ret;
lockdep_assert_held(&vdev->dev_set->lock);
- if (vfio_device_is_noiommu(vdev)) {
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- /*
- * Require no compat ioas to be assigned to proceed. The basic
- * statement is that the user cannot have done something that
- * implies they expected translation to exist
- */
- if (!iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id))
- return -EPERM;
+ /* compat noiommu does not need to do ioas attach */
+ if (vfio_device_is_noiommu(vdev))
return 0;
- }
-
- ret = vdev->ops->bind_iommufd(vdev, ictx, &device_id);
- if (ret)
- return ret;
ret = iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
if (ret)
- goto err_unbind;
- ret = vdev->ops->attach_ioas(vdev, &ioas_id);
- if (ret)
- goto err_unbind;
-
- /*
- * The legacy path has no way to return the device id or the selected
- * pt_id
- */
- return 0;
+ return ret;
-err_unbind:
- if (vdev->ops->unbind_iommufd)
- vdev->ops->unbind_iommufd(vdev);
- return ret;
+ /* The legacy path has no way to return the selected pt_id */
+ return vdev->ops->attach_ioas(vdev, &ioas_id);
}
-void vfio_iommufd_unbind(struct vfio_device *vdev)
+void vfio_df_iommufd_unbind(struct vfio_device_file *df)
{
+ struct vfio_device *vdev = df->device;
+
lockdep_assert_held(&vdev->dev_set->lock);
if (vfio_device_is_noiommu(vdev))
@@ -66,6 +61,50 @@ void vfio_iommufd_unbind(struct vfio_device *vdev)
vdev->ops->unbind_iommufd(vdev);
}
+struct iommufd_ctx *vfio_iommufd_device_ictx(struct vfio_device *vdev)
+{
+ if (vdev->iommufd_device)
+ return iommufd_device_to_ictx(vdev->iommufd_device);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(vfio_iommufd_device_ictx);
+
+static int vfio_iommufd_device_id(struct vfio_device *vdev)
+{
+ if (vdev->iommufd_device)
+ return iommufd_device_to_id(vdev->iommufd_device);
+ return -EINVAL;
+}
+
+/*
+ * Return devid for a device.
+ * valid ID for the device that is owned by the ictx
+ * -ENOENT = device is owned but there is no ID
+ * -ENODEV or other error = device is not owned
+ */
+int vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx)
+{
+ struct iommu_group *group;
+ int devid;
+
+ if (vfio_iommufd_device_ictx(vdev) == ictx)
+ return vfio_iommufd_device_id(vdev);
+
+ group = iommu_group_get(vdev->dev);
+ if (!group)
+ return -ENODEV;
+
+ if (iommufd_ctx_has_group(ictx, group))
+ devid = -ENOENT;
+ else
+ devid = -ENODEV;
+
+ iommu_group_put(group);
+
+ return devid;
+}
+EXPORT_SYMBOL_GPL(vfio_iommufd_get_dev_id);
+
/*
* The physical standard ops mean that the iommufd_device is bound to the
* physical device vdev->dev that was provided to vfio_init_group_dev(). Drivers
@@ -101,7 +140,15 @@ int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
{
int rc;
- rc = iommufd_device_attach(vdev->iommufd_device, pt_id);
+ lockdep_assert_held(&vdev->dev_set->lock);
+
+ if (WARN_ON(!vdev->iommufd_device))
+ return -EINVAL;
+
+ if (vdev->iommufd_attached)
+ rc = iommufd_device_replace(vdev->iommufd_device, pt_id);
+ else
+ rc = iommufd_device_attach(vdev->iommufd_device, pt_id);
if (rc)
return rc;
vdev->iommufd_attached = true;
@@ -109,6 +156,18 @@ int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
}
EXPORT_SYMBOL_GPL(vfio_iommufd_physical_attach_ioas);
+void vfio_iommufd_physical_detach_ioas(struct vfio_device *vdev)
+{
+ lockdep_assert_held(&vdev->dev_set->lock);
+
+ if (WARN_ON(!vdev->iommufd_device) || !vdev->iommufd_attached)
+ return;
+
+ iommufd_device_detach(vdev->iommufd_device);
+ vdev->iommufd_attached = false;
+}
+EXPORT_SYMBOL_GPL(vfio_iommufd_physical_detach_ioas);
+
/*
* The emulated standard ops mean that vfio_device is going to use the
* "mdev path" and will call vfio_pin_pages()/vfio_dma_rw(). Drivers using this
@@ -164,11 +223,25 @@ int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
lockdep_assert_held(&vdev->dev_set->lock);
if (vdev->iommufd_attached)
- return -EBUSY;
- rc = iommufd_access_attach(vdev->iommufd_access, *pt_id);
+ rc = iommufd_access_replace(vdev->iommufd_access, *pt_id);
+ else
+ rc = iommufd_access_attach(vdev->iommufd_access, *pt_id);
if (rc)
return rc;
vdev->iommufd_attached = true;
return 0;
}
EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_attach_ioas);
+
+void vfio_iommufd_emulated_detach_ioas(struct vfio_device *vdev)
+{
+ lockdep_assert_held(&vdev->dev_set->lock);
+
+ if (WARN_ON(!vdev->iommufd_access) ||
+ !vdev->iommufd_attached)
+ return;
+
+ iommufd_access_detach(vdev->iommufd_access);
+ vdev->iommufd_attached = false;
+}
+EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_detach_ioas);
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
index a117eaf21c14..b2f9778c8366 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -1373,6 +1373,7 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
@@ -1391,6 +1392,7 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index d95fd382814c..42ec574a8622 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -1320,6 +1320,7 @@ static const struct vfio_device_ops mlx5vf_pci_ops = {
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static int mlx5vf_pci_probe(struct pci_dev *pdev,
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 29091ee2e984..cb5b7f865d58 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -141,6 +141,7 @@ static const struct vfio_device_ops vfio_pci_ops = {
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 20d7b69ea6ff..65cbada3ec13 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -27,6 +27,7 @@
#include <linux/vgaarb.h>
#include <linux/nospec.h>
#include <linux/sched/mm.h>
+#include <linux/iommufd.h>
#if IS_ENABLED(CONFIG_EEH)
#include <asm/eeh.h>
#endif
@@ -180,7 +181,8 @@ no_mmap:
struct vfio_pci_group_info;
static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set);
static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
- struct vfio_pci_group_info *groups);
+ struct vfio_pci_group_info *groups,
+ struct iommufd_ctx *iommufd_ctx);
/*
* INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
@@ -776,29 +778,65 @@ static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
}
struct vfio_pci_fill_info {
- int max;
- int cur;
- struct vfio_pci_dependent_device *devices;
+ struct vfio_pci_dependent_device __user *devices;
+ struct vfio_pci_dependent_device __user *devices_end;
+ struct vfio_device *vdev;
+ u32 count;
+ u32 flags;
};
static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
{
+ struct vfio_pci_dependent_device info = {
+ .segment = pci_domain_nr(pdev->bus),
+ .bus = pdev->bus->number,
+ .devfn = pdev->devfn,
+ };
struct vfio_pci_fill_info *fill = data;
- struct iommu_group *iommu_group;
- if (fill->cur == fill->max)
- return -EAGAIN; /* Something changed, try again */
+ fill->count++;
+ if (fill->devices >= fill->devices_end)
+ return 0;
+
+ if (fill->flags & VFIO_PCI_HOT_RESET_FLAG_DEV_ID) {
+ struct iommufd_ctx *iommufd = vfio_iommufd_device_ictx(fill->vdev);
+ struct vfio_device_set *dev_set = fill->vdev->dev_set;
+ struct vfio_device *vdev;
+
+ /*
+ * hot-reset requires all affected devices be represented in
+ * the dev_set.
+ */
+ vdev = vfio_find_device_in_devset(dev_set, &pdev->dev);
+ if (!vdev) {
+ info.devid = VFIO_PCI_DEVID_NOT_OWNED;
+ } else {
+ int id = vfio_iommufd_get_dev_id(vdev, iommufd);
+
+ if (id > 0)
+ info.devid = id;
+ else if (id == -ENOENT)
+ info.devid = VFIO_PCI_DEVID_OWNED;
+ else
+ info.devid = VFIO_PCI_DEVID_NOT_OWNED;
+ }
+ /* If devid is VFIO_PCI_DEVID_NOT_OWNED, clear owned flag. */
+ if (info.devid == VFIO_PCI_DEVID_NOT_OWNED)
+ fill->flags &= ~VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED;
+ } else {
+ struct iommu_group *iommu_group;
- iommu_group = iommu_group_get(&pdev->dev);
- if (!iommu_group)
- return -EPERM; /* Cannot reset non-isolated devices */
+ iommu_group = iommu_group_get(&pdev->dev);
+ if (!iommu_group)
+ return -EPERM; /* Cannot reset non-isolated devices */
- fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
- fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
- fill->devices[fill->cur].bus = pdev->bus->number;
- fill->devices[fill->cur].devfn = pdev->devfn;
- fill->cur++;
- iommu_group_put(iommu_group);
+ info.group_id = iommu_group_id(iommu_group);
+ iommu_group_put(iommu_group);
+ }
+
+ if (copy_to_user(fill->devices, &info, sizeof(info)))
+ return -EFAULT;
+ fill->devices++;
return 0;
}
@@ -1228,8 +1266,7 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info(
unsigned long minsz =
offsetofend(struct vfio_pci_hot_reset_info, count);
struct vfio_pci_hot_reset_info hdr;
- struct vfio_pci_fill_info fill = { 0 };
- struct vfio_pci_dependent_device *devices = NULL;
+ struct vfio_pci_fill_info fill = {};
bool slot = false;
int ret = 0;
@@ -1247,78 +1284,42 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info(
else if (pci_probe_reset_bus(vdev->pdev->bus))
return -ENODEV;
- /* How many devices are affected? */
- ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
- &fill.max, slot);
- if (ret)
- return ret;
-
- WARN_ON(!fill.max); /* Should always be at least one */
-
- /*
- * If there's enough space, fill it now, otherwise return -ENOSPC and
- * the number of devices affected.
- */
- if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
- ret = -ENOSPC;
- hdr.count = fill.max;
- goto reset_info_exit;
- }
-
- devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
- if (!devices)
- return -ENOMEM;
+ fill.devices = arg->devices;
+ fill.devices_end = arg->devices +
+ (hdr.argsz - sizeof(hdr)) / sizeof(arg->devices[0]);
+ fill.vdev = &vdev->vdev;
- fill.devices = devices;
+ if (vfio_device_cdev_opened(&vdev->vdev))
+ fill.flags |= VFIO_PCI_HOT_RESET_FLAG_DEV_ID |
+ VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED;
+ mutex_lock(&vdev->vdev.dev_set->lock);
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_fill_devs,
&fill, slot);
+ mutex_unlock(&vdev->vdev.dev_set->lock);
+ if (ret)
+ return ret;
- /*
- * If a device was removed between counting and filling, we may come up
- * short of fill.max. If a device was added, we'll have a return of
- * -EAGAIN above.
- */
- if (!ret)
- hdr.count = fill.cur;
-
-reset_info_exit:
+ hdr.count = fill.count;
+ hdr.flags = fill.flags;
if (copy_to_user(arg, &hdr, minsz))
- ret = -EFAULT;
-
- if (!ret) {
- if (copy_to_user(&arg->devices, devices,
- hdr.count * sizeof(*devices)))
- ret = -EFAULT;
- }
+ return -EFAULT;
- kfree(devices);
- return ret;
+ if (fill.count > fill.devices - arg->devices)
+ return -ENOSPC;
+ return 0;
}
-static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
- struct vfio_pci_hot_reset __user *arg)
+static int
+vfio_pci_ioctl_pci_hot_reset_groups(struct vfio_pci_core_device *vdev,
+ int array_count, bool slot,
+ struct vfio_pci_hot_reset __user *arg)
{
- unsigned long minsz = offsetofend(struct vfio_pci_hot_reset, count);
- struct vfio_pci_hot_reset hdr;
int32_t *group_fds;
struct file **files;
struct vfio_pci_group_info info;
- bool slot = false;
int file_idx, count = 0, ret = 0;
- if (copy_from_user(&hdr, arg, minsz))
- return -EFAULT;
-
- if (hdr.argsz < minsz || hdr.flags)
- return -EINVAL;
-
- /* Can we do a slot or bus reset or neither? */
- if (!pci_probe_reset_slot(vdev->pdev->slot))
- slot = true;
- else if (pci_probe_reset_bus(vdev->pdev->bus))
- return -ENODEV;
-
/*
* We can't let userspace give us an arbitrarily large buffer to copy,
* so verify how many we think there could be. Note groups can have
@@ -1329,12 +1330,11 @@ static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
if (ret)
return ret;
- /* Somewhere between 1 and count is OK */
- if (!hdr.count || hdr.count > count)
+ if (array_count > count)
return -EINVAL;
- group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
- files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL);
+ group_fds = kcalloc(array_count, sizeof(*group_fds), GFP_KERNEL);
+ files = kcalloc(array_count, sizeof(*files), GFP_KERNEL);
if (!group_fds || !files) {
kfree(group_fds);
kfree(files);
@@ -1342,18 +1342,17 @@ static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
}
if (copy_from_user(group_fds, arg->group_fds,
- hdr.count * sizeof(*group_fds))) {
+ array_count * sizeof(*group_fds))) {
kfree(group_fds);
kfree(files);
return -EFAULT;
}
/*
- * For each group_fd, get the group through the vfio external user
- * interface and store the group and iommu ID. This ensures the group
- * is held across the reset.
+ * Get the group file for each fd to ensure the group is held across
+ * the reset
*/
- for (file_idx = 0; file_idx < hdr.count; file_idx++) {
+ for (file_idx = 0; file_idx < array_count; file_idx++) {
struct file *file = fget(group_fds[file_idx]);
if (!file) {
@@ -1377,10 +1376,10 @@ static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
if (ret)
goto hot_reset_release;
- info.count = hdr.count;
+ info.count = array_count;
info.files = files;
- ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info);
+ ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info, NULL);
hot_reset_release:
for (file_idx--; file_idx >= 0; file_idx--)
@@ -1390,6 +1389,36 @@ hot_reset_release:
return ret;
}
+static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
+ struct vfio_pci_hot_reset __user *arg)
+{
+ unsigned long minsz = offsetofend(struct vfio_pci_hot_reset, count);
+ struct vfio_pci_hot_reset hdr;
+ bool slot = false;
+
+ if (copy_from_user(&hdr, arg, minsz))
+ return -EFAULT;
+
+ if (hdr.argsz < minsz || hdr.flags)
+ return -EINVAL;
+
+ /* zero-length array is only for cdev opened devices */
+ if (!!hdr.count == vfio_device_cdev_opened(&vdev->vdev))
+ return -EINVAL;
+
+ /* Can we do a slot or bus reset or neither? */
+ if (!pci_probe_reset_slot(vdev->pdev->slot))
+ slot = true;
+ else if (pci_probe_reset_bus(vdev->pdev->bus))
+ return -ENODEV;
+
+ if (hdr.count)
+ return vfio_pci_ioctl_pci_hot_reset_groups(vdev, hdr.count, slot, arg);
+
+ return vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, NULL,
+ vfio_iommufd_device_ictx(&vdev->vdev));
+}
+
static int vfio_pci_ioctl_ioeventfd(struct vfio_pci_core_device *vdev,
struct vfio_device_ioeventfd __user *arg)
{
@@ -2355,13 +2384,16 @@ const struct pci_error_handlers vfio_pci_core_err_handlers = {
};
EXPORT_SYMBOL_GPL(vfio_pci_core_err_handlers);
-static bool vfio_dev_in_groups(struct vfio_pci_core_device *vdev,
+static bool vfio_dev_in_groups(struct vfio_device *vdev,
struct vfio_pci_group_info *groups)
{
unsigned int i;
+ if (!groups)
+ return false;
+
for (i = 0; i < groups->count; i++)
- if (vfio_file_has_dev(groups->files[i], &vdev->vdev))
+ if (vfio_file_has_dev(groups->files[i], vdev))
return true;
return false;
}
@@ -2369,12 +2401,8 @@ static bool vfio_dev_in_groups(struct vfio_pci_core_device *vdev,
static int vfio_pci_is_device_in_set(struct pci_dev *pdev, void *data)
{
struct vfio_device_set *dev_set = data;
- struct vfio_device *cur;
- list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
- if (cur->dev == &pdev->dev)
- return 0;
- return -EBUSY;
+ return vfio_find_device_in_devset(dev_set, &pdev->dev) ? 0 : -ENODEV;
}
/*
@@ -2441,7 +2469,8 @@ unwind:
* get each memory_lock.
*/
static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
- struct vfio_pci_group_info *groups)
+ struct vfio_pci_group_info *groups,
+ struct iommufd_ctx *iommufd_ctx)
{
struct vfio_pci_core_device *cur_mem;
struct vfio_pci_core_device *cur_vma;
@@ -2471,11 +2500,38 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
goto err_unlock;
list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) {
+ bool owned;
+
/*
- * Test whether all the affected devices are contained by the
- * set of groups provided by the user.
+ * Test whether all the affected devices can be reset by the
+ * user.
+ *
+ * If called from a group opened device and the user provides
+ * a set of groups, all the devices in the dev_set should be
+ * contained by the set of groups provided by the user.
+ *
+ * If called from a cdev opened device and the user provides
+ * a zero-length array, all the devices in the dev_set must
+ * be bound to the same iommufd_ctx as the input iommufd_ctx.
+ * If there is any device that has not been bound to any
+ * iommufd_ctx yet, check if its iommu_group has any device
+ * bound to the input iommufd_ctx. Such devices can be
+ * considered owned by the input iommufd_ctx as the device
+ * cannot be owned by another iommufd_ctx when its iommu_group
+ * is owned.
+ *
+ * Otherwise, reset is not allowed.
*/
- if (!vfio_dev_in_groups(cur_vma, groups)) {
+ if (iommufd_ctx) {
+ int devid = vfio_iommufd_get_dev_id(&cur_vma->vdev,
+ iommufd_ctx);
+
+ owned = (devid > 0 || devid == -ENOENT);
+ } else {
+ owned = vfio_dev_in_groups(&cur_vma->vdev, groups);
+ }
+
+ if (!owned) {
ret = -EINVAL;
goto err_undo;
}
diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c
index 83fe54015595..6464b3939ebc 100644
--- a/drivers/vfio/platform/vfio_amba.c
+++ b/drivers/vfio/platform/vfio_amba.c
@@ -119,6 +119,7 @@ static const struct vfio_device_ops vfio_amba_ops = {
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static const struct amba_id pl330_ids[] = {
diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c
index 22a1efca32a8..8cf22fa65baa 100644
--- a/drivers/vfio/platform/vfio_platform.c
+++ b/drivers/vfio/platform/vfio_platform.c
@@ -108,6 +108,7 @@ static const struct vfio_device_ops vfio_platform_ops = {
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static struct platform_driver vfio_platform_driver = {
diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h
index 7b19c621e0e6..307e3f29b527 100644
--- a/drivers/vfio/vfio.h
+++ b/drivers/vfio/vfio.h
@@ -16,14 +16,32 @@ struct iommufd_ctx;
struct iommu_group;
struct vfio_container;
+struct vfio_device_file {
+ struct vfio_device *device;
+ struct vfio_group *group;
+
+ u8 access_granted;
+ u32 devid; /* only valid when iommufd is valid */
+ spinlock_t kvm_ref_lock; /* protect kvm field */
+ struct kvm *kvm;
+ struct iommufd_ctx *iommufd; /* protected by struct vfio_device_set::lock */
+};
+
void vfio_device_put_registration(struct vfio_device *device);
bool vfio_device_try_get_registration(struct vfio_device *device);
-int vfio_device_open(struct vfio_device *device, struct iommufd_ctx *iommufd);
-void vfio_device_close(struct vfio_device *device,
- struct iommufd_ctx *iommufd);
+int vfio_df_open(struct vfio_device_file *df);
+void vfio_df_close(struct vfio_device_file *df);
+struct vfio_device_file *
+vfio_allocate_device_file(struct vfio_device *device);
extern const struct file_operations vfio_device_fops;
+#ifdef CONFIG_VFIO_NOIOMMU
+extern bool vfio_noiommu __read_mostly;
+#else
+enum { vfio_noiommu = false };
+#endif
+
enum vfio_group_type {
/*
* Physical device with IOMMU backing.
@@ -48,6 +66,7 @@ enum vfio_group_type {
VFIO_NO_IOMMU,
};
+#if IS_ENABLED(CONFIG_VFIO_GROUP)
struct vfio_group {
struct device dev;
struct cdev cdev;
@@ -74,8 +93,11 @@ struct vfio_group {
struct blocking_notifier_head notifier;
struct iommufd_ctx *iommufd;
spinlock_t kvm_ref_lock;
+ unsigned int cdev_device_open_cnt;
};
+int vfio_device_block_group(struct vfio_device *device);
+void vfio_device_unblock_group(struct vfio_device *device);
int vfio_device_set_group(struct vfio_device *device,
enum vfio_group_type type);
void vfio_device_remove_group(struct vfio_device *device);
@@ -83,7 +105,10 @@ void vfio_device_group_register(struct vfio_device *device);
void vfio_device_group_unregister(struct vfio_device *device);
int vfio_device_group_use_iommu(struct vfio_device *device);
void vfio_device_group_unuse_iommu(struct vfio_device *device);
-void vfio_device_group_close(struct vfio_device *device);
+void vfio_df_group_close(struct vfio_device_file *df);
+struct vfio_group *vfio_group_from_file(struct file *file);
+bool vfio_group_enforced_coherent(struct vfio_group *group);
+void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm);
bool vfio_device_has_container(struct vfio_device *device);
int __init vfio_group_init(void);
void vfio_group_cleanup(void);
@@ -93,6 +118,82 @@ static inline bool vfio_device_is_noiommu(struct vfio_device *vdev)
return IS_ENABLED(CONFIG_VFIO_NOIOMMU) &&
vdev->group->type == VFIO_NO_IOMMU;
}
+#else
+struct vfio_group;
+
+static inline int vfio_device_block_group(struct vfio_device *device)
+{
+ return 0;
+}
+
+static inline void vfio_device_unblock_group(struct vfio_device *device)
+{
+}
+
+static inline int vfio_device_set_group(struct vfio_device *device,
+ enum vfio_group_type type)
+{
+ return 0;
+}
+
+static inline void vfio_device_remove_group(struct vfio_device *device)
+{
+}
+
+static inline void vfio_device_group_register(struct vfio_device *device)
+{
+}
+
+static inline void vfio_device_group_unregister(struct vfio_device *device)
+{
+}
+
+static inline int vfio_device_group_use_iommu(struct vfio_device *device)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void vfio_device_group_unuse_iommu(struct vfio_device *device)
+{
+}
+
+static inline void vfio_df_group_close(struct vfio_device_file *df)
+{
+}
+
+static inline struct vfio_group *vfio_group_from_file(struct file *file)
+{
+ return NULL;
+}
+
+static inline bool vfio_group_enforced_coherent(struct vfio_group *group)
+{
+ return true;
+}
+
+static inline void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
+{
+}
+
+static inline bool vfio_device_has_container(struct vfio_device *device)
+{
+ return false;
+}
+
+static inline int __init vfio_group_init(void)
+{
+ return 0;
+}
+
+static inline void vfio_group_cleanup(void)
+{
+}
+
+static inline bool vfio_device_is_noiommu(struct vfio_device *vdev)
+{
+ return false;
+}
+#endif /* CONFIG_VFIO_GROUP */
#if IS_ENABLED(CONFIG_VFIO_CONTAINER)
/**
@@ -217,20 +318,109 @@ static inline void vfio_container_cleanup(void)
#endif
#if IS_ENABLED(CONFIG_IOMMUFD)
-int vfio_iommufd_bind(struct vfio_device *device, struct iommufd_ctx *ictx);
-void vfio_iommufd_unbind(struct vfio_device *device);
+bool vfio_iommufd_device_has_compat_ioas(struct vfio_device *vdev,
+ struct iommufd_ctx *ictx);
+int vfio_df_iommufd_bind(struct vfio_device_file *df);
+void vfio_df_iommufd_unbind(struct vfio_device_file *df);
+int vfio_iommufd_compat_attach_ioas(struct vfio_device *device,
+ struct iommufd_ctx *ictx);
#else
-static inline int vfio_iommufd_bind(struct vfio_device *device,
+static inline bool
+vfio_iommufd_device_has_compat_ioas(struct vfio_device *vdev,
struct iommufd_ctx *ictx)
{
+ return false;
+}
+
+static inline int vfio_df_iommufd_bind(struct vfio_device_file *fd)
+{
return -EOPNOTSUPP;
}
-static inline void vfio_iommufd_unbind(struct vfio_device *device)
+static inline void vfio_df_iommufd_unbind(struct vfio_device_file *df)
{
}
+
+static inline int
+vfio_iommufd_compat_attach_ioas(struct vfio_device *device,
+ struct iommufd_ctx *ictx)
+{
+ return -EOPNOTSUPP;
+}
#endif
+int vfio_df_ioctl_attach_pt(struct vfio_device_file *df,
+ struct vfio_device_attach_iommufd_pt __user *arg);
+int vfio_df_ioctl_detach_pt(struct vfio_device_file *df,
+ struct vfio_device_detach_iommufd_pt __user *arg);
+
+#if IS_ENABLED(CONFIG_VFIO_DEVICE_CDEV)
+void vfio_init_device_cdev(struct vfio_device *device);
+
+static inline int vfio_device_add(struct vfio_device *device)
+{
+ /* cdev does not support noiommu device */
+ if (vfio_device_is_noiommu(device))
+ return device_add(&device->device);
+ vfio_init_device_cdev(device);
+ return cdev_device_add(&device->cdev, &device->device);
+}
+
+static inline void vfio_device_del(struct vfio_device *device)
+{
+ if (vfio_device_is_noiommu(device))
+ device_del(&device->device);
+ else
+ cdev_device_del(&device->cdev, &device->device);
+}
+
+int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep);
+long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
+ struct vfio_device_bind_iommufd __user *arg);
+void vfio_df_unbind_iommufd(struct vfio_device_file *df);
+int vfio_cdev_init(struct class *device_class);
+void vfio_cdev_cleanup(void);
+#else
+static inline void vfio_init_device_cdev(struct vfio_device *device)
+{
+}
+
+static inline int vfio_device_add(struct vfio_device *device)
+{
+ return device_add(&device->device);
+}
+
+static inline void vfio_device_del(struct vfio_device *device)
+{
+ device_del(&device->device);
+}
+
+static inline int vfio_device_fops_cdev_open(struct inode *inode,
+ struct file *filep)
+{
+ return 0;
+}
+
+static inline long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
+ struct vfio_device_bind_iommufd __user *arg)
+{
+ return -ENOTTY;
+}
+
+static inline void vfio_df_unbind_iommufd(struct vfio_device_file *df)
+{
+}
+
+static inline int vfio_cdev_init(struct class *device_class)
+{
+ return 0;
+}
+
+static inline void vfio_cdev_cleanup(void)
+{
+}
+#endif /* CONFIG_VFIO_DEVICE_CDEV */
+
#if IS_ENABLED(CONFIG_VFIO_VIRQFD)
int __init vfio_virqfd_init(void);
void vfio_virqfd_exit(void);
@@ -244,18 +434,12 @@ static inline void vfio_virqfd_exit(void)
}
#endif
-#ifdef CONFIG_VFIO_NOIOMMU
-extern bool vfio_noiommu __read_mostly;
-#else
-enum { vfio_noiommu = false };
-#endif
-
#ifdef CONFIG_HAVE_KVM
-void _vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm);
+void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm);
void vfio_device_put_kvm(struct vfio_device *device);
#else
-static inline void _vfio_device_get_kvm_safe(struct vfio_device *device,
- struct kvm *kvm)
+static inline void vfio_device_get_kvm_safe(struct vfio_device *device,
+ struct kvm *kvm)
{
}
diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
index f0ca33b2e1df..0da8ed81a97d 100644
--- a/drivers/vfio/vfio_main.c
+++ b/drivers/vfio/vfio_main.c
@@ -141,6 +141,21 @@ unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set)
}
EXPORT_SYMBOL_GPL(vfio_device_set_open_count);
+struct vfio_device *
+vfio_find_device_in_devset(struct vfio_device_set *dev_set,
+ struct device *dev)
+{
+ struct vfio_device *cur;
+
+ lockdep_assert_held(&dev_set->lock);
+
+ list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
+ if (cur->dev == dev)
+ return cur;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(vfio_find_device_in_devset);
+
/*
* Device objects - create, release, get, put, search
*/
@@ -258,7 +273,8 @@ static int __vfio_register_dev(struct vfio_device *device,
if (WARN_ON(IS_ENABLED(CONFIG_IOMMUFD) &&
(!device->ops->bind_iommufd ||
!device->ops->unbind_iommufd ||
- !device->ops->attach_ioas)))
+ !device->ops->attach_ioas ||
+ !device->ops->detach_ioas)))
return -EINVAL;
/*
@@ -276,7 +292,18 @@ static int __vfio_register_dev(struct vfio_device *device,
if (ret)
return ret;
- ret = device_add(&device->device);
+ /*
+ * VFIO always sets IOMMU_CACHE because we offer no way for userspace to
+ * restore cache coherency. It has to be checked here because it is only
+ * valid for cases where we are using iommu groups.
+ */
+ if (type == VFIO_IOMMU && !vfio_device_is_noiommu(device) &&
+ !device_iommu_capable(device->dev, IOMMU_CAP_CACHE_COHERENCY)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ret = vfio_device_add(device);
if (ret)
goto err_out;
@@ -316,6 +343,18 @@ void vfio_unregister_group_dev(struct vfio_device *device)
bool interrupted = false;
long rc;
+ /*
+ * Prevent new device opened by userspace via the
+ * VFIO_GROUP_GET_DEVICE_FD in the group path.
+ */
+ vfio_device_group_unregister(device);
+
+ /*
+ * Balances vfio_device_add() in register path, also prevents
+ * new device opened by userspace in the cdev path.
+ */
+ vfio_device_del(device);
+
vfio_device_put_registration(device);
rc = try_wait_for_completion(&device->comp);
while (rc <= 0) {
@@ -339,18 +378,13 @@ void vfio_unregister_group_dev(struct vfio_device *device)
}
}
- vfio_device_group_unregister(device);
-
- /* Balances device_add in register path */
- device_del(&device->device);
-
/* Balances vfio_device_set_group in register path */
vfio_device_remove_group(device);
}
EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);
#ifdef CONFIG_HAVE_KVM
-void _vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
+void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
{
void (*pfn)(struct kvm *kvm);
bool (*fn)(struct kvm *kvm);
@@ -358,6 +392,9 @@ void _vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
lockdep_assert_held(&device->dev_set->lock);
+ if (!kvm)
+ return;
+
pfn = symbol_get(kvm_put_kvm);
if (WARN_ON(!pfn))
return;
@@ -404,9 +441,25 @@ static bool vfio_assert_device_open(struct vfio_device *device)
return !WARN_ON_ONCE(!READ_ONCE(device->open_count));
}
-static int vfio_device_first_open(struct vfio_device *device,
- struct iommufd_ctx *iommufd)
+struct vfio_device_file *
+vfio_allocate_device_file(struct vfio_device *device)
{
+ struct vfio_device_file *df;
+
+ df = kzalloc(sizeof(*df), GFP_KERNEL_ACCOUNT);
+ if (!df)
+ return ERR_PTR(-ENOMEM);
+
+ df->device = device;
+ spin_lock_init(&df->kvm_ref_lock);
+
+ return df;
+}
+
+static int vfio_df_device_first_open(struct vfio_device_file *df)
+{
+ struct vfio_device *device = df->device;
+ struct iommufd_ctx *iommufd = df->iommufd;
int ret;
lockdep_assert_held(&device->dev_set->lock);
@@ -415,7 +468,7 @@ static int vfio_device_first_open(struct vfio_device *device,
return -ENODEV;
if (iommufd)
- ret = vfio_iommufd_bind(device, iommufd);
+ ret = vfio_df_iommufd_bind(df);
else
ret = vfio_device_group_use_iommu(device);
if (ret)
@@ -430,7 +483,7 @@ static int vfio_device_first_open(struct vfio_device *device,
err_unuse_iommu:
if (iommufd)
- vfio_iommufd_unbind(device);
+ vfio_df_iommufd_unbind(df);
else
vfio_device_group_unuse_iommu(device);
err_module_put:
@@ -438,29 +491,39 @@ err_module_put:
return ret;
}
-static void vfio_device_last_close(struct vfio_device *device,
- struct iommufd_ctx *iommufd)
+static void vfio_df_device_last_close(struct vfio_device_file *df)
{
+ struct vfio_device *device = df->device;
+ struct iommufd_ctx *iommufd = df->iommufd;
+
lockdep_assert_held(&device->dev_set->lock);
if (device->ops->close_device)
device->ops->close_device(device);
if (iommufd)
- vfio_iommufd_unbind(device);
+ vfio_df_iommufd_unbind(df);
else
vfio_device_group_unuse_iommu(device);
module_put(device->dev->driver->owner);
}
-int vfio_device_open(struct vfio_device *device, struct iommufd_ctx *iommufd)
+int vfio_df_open(struct vfio_device_file *df)
{
+ struct vfio_device *device = df->device;
int ret = 0;
lockdep_assert_held(&device->dev_set->lock);
+ /*
+ * Only the group path allows the device to be opened multiple
+ * times. The device cdev path doesn't have a secure way for it.
+ */
+ if (device->open_count != 0 && !df->group)
+ return -EINVAL;
+
device->open_count++;
if (device->open_count == 1) {
- ret = vfio_device_first_open(device, iommufd);
+ ret = vfio_df_device_first_open(df);
if (ret)
device->open_count--;
}
@@ -468,14 +531,15 @@ int vfio_device_open(struct vfio_device *device, struct iommufd_ctx *iommufd)
return ret;
}
-void vfio_device_close(struct vfio_device *device,
- struct iommufd_ctx *iommufd)
+void vfio_df_close(struct vfio_device_file *df)
{
+ struct vfio_device *device = df->device;
+
lockdep_assert_held(&device->dev_set->lock);
vfio_assert_device_open(device);
if (device->open_count == 1)
- vfio_device_last_close(device, iommufd);
+ vfio_df_device_last_close(df);
device->open_count--;
}
@@ -517,12 +581,18 @@ static inline void vfio_device_pm_runtime_put(struct vfio_device *device)
*/
static int vfio_device_fops_release(struct inode *inode, struct file *filep)
{
- struct vfio_device *device = filep->private_data;
+ struct vfio_device_file *df = filep->private_data;
+ struct vfio_device *device = df->device;
- vfio_device_group_close(device);
+ if (df->group)
+ vfio_df_group_close(df);
+ else
+ vfio_df_unbind_iommufd(df);
vfio_device_put_registration(device);
+ kfree(df);
+
return 0;
}
@@ -1087,16 +1157,38 @@ static int vfio_ioctl_device_feature(struct vfio_device *device,
static long vfio_device_fops_unl_ioctl(struct file *filep,
unsigned int cmd, unsigned long arg)
{
- struct vfio_device *device = filep->private_data;
+ struct vfio_device_file *df = filep->private_data;
+ struct vfio_device *device = df->device;
+ void __user *uptr = (void __user *)arg;
int ret;
+ if (cmd == VFIO_DEVICE_BIND_IOMMUFD)
+ return vfio_df_ioctl_bind_iommufd(df, uptr);
+
+ /* Paired with smp_store_release() following vfio_df_open() */
+ if (!smp_load_acquire(&df->access_granted))
+ return -EINVAL;
+
ret = vfio_device_pm_runtime_get(device);
if (ret)
return ret;
+ /* cdev only ioctls */
+ if (IS_ENABLED(CONFIG_VFIO_DEVICE_CDEV) && !df->group) {
+ switch (cmd) {
+ case VFIO_DEVICE_ATTACH_IOMMUFD_PT:
+ ret = vfio_df_ioctl_attach_pt(df, uptr);
+ goto out;
+
+ case VFIO_DEVICE_DETACH_IOMMUFD_PT:
+ ret = vfio_df_ioctl_detach_pt(df, uptr);
+ goto out;
+ }
+ }
+
switch (cmd) {
case VFIO_DEVICE_FEATURE:
- ret = vfio_ioctl_device_feature(device, (void __user *)arg);
+ ret = vfio_ioctl_device_feature(device, uptr);
break;
default:
@@ -1106,7 +1198,7 @@ static long vfio_device_fops_unl_ioctl(struct file *filep,
ret = device->ops->ioctl(device, cmd, arg);
break;
}
-
+out:
vfio_device_pm_runtime_put(device);
return ret;
}
@@ -1114,7 +1206,12 @@ static long vfio_device_fops_unl_ioctl(struct file *filep,
static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
size_t count, loff_t *ppos)
{
- struct vfio_device *device = filep->private_data;
+ struct vfio_device_file *df = filep->private_data;
+ struct vfio_device *device = df->device;
+
+ /* Paired with smp_store_release() following vfio_df_open() */
+ if (!smp_load_acquire(&df->access_granted))
+ return -EINVAL;
if (unlikely(!device->ops->read))
return -EINVAL;
@@ -1126,7 +1223,12 @@ static ssize_t vfio_device_fops_write(struct file *filep,
const char __user *buf,
size_t count, loff_t *ppos)
{
- struct vfio_device *device = filep->private_data;
+ struct vfio_device_file *df = filep->private_data;
+ struct vfio_device *device = df->device;
+
+ /* Paired with smp_store_release() following vfio_df_open() */
+ if (!smp_load_acquire(&df->access_granted))
+ return -EINVAL;
if (unlikely(!device->ops->write))
return -EINVAL;
@@ -1136,7 +1238,12 @@ static ssize_t vfio_device_fops_write(struct file *filep,
static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
{
- struct vfio_device *device = filep->private_data;
+ struct vfio_device_file *df = filep->private_data;
+ struct vfio_device *device = df->device;
+
+ /* Paired with smp_store_release() following vfio_df_open() */
+ if (!smp_load_acquire(&df->access_granted))
+ return -EINVAL;
if (unlikely(!device->ops->mmap))
return -EINVAL;
@@ -1146,6 +1253,7 @@ static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
const struct file_operations vfio_device_fops = {
.owner = THIS_MODULE,
+ .open = vfio_device_fops_cdev_open,
.release = vfio_device_fops_release,
.read = vfio_device_fops_read,
.write = vfio_device_fops_write,
@@ -1154,6 +1262,88 @@ const struct file_operations vfio_device_fops = {
.mmap = vfio_device_fops_mmap,
};
+static struct vfio_device *vfio_device_from_file(struct file *file)
+{
+ struct vfio_device_file *df = file->private_data;
+
+ if (file->f_op != &vfio_device_fops)
+ return NULL;
+ return df->device;
+}
+
+/**
+ * vfio_file_is_valid - True if the file is valid vfio file
+ * @file: VFIO group file or VFIO device file
+ */
+bool vfio_file_is_valid(struct file *file)
+{
+ return vfio_group_from_file(file) ||
+ vfio_device_from_file(file);
+}
+EXPORT_SYMBOL_GPL(vfio_file_is_valid);
+
+/**
+ * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file
+ * is always CPU cache coherent
+ * @file: VFIO group file or VFIO device file
+ *
+ * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop
+ * bit in DMA transactions. A return of false indicates that the user has
+ * rights to access additional instructions such as wbinvd on x86.
+ */
+bool vfio_file_enforced_coherent(struct file *file)
+{
+ struct vfio_device *device;
+ struct vfio_group *group;
+
+ group = vfio_group_from_file(file);
+ if (group)
+ return vfio_group_enforced_coherent(group);
+
+ device = vfio_device_from_file(file);
+ if (device)
+ return device_iommu_capable(device->dev,
+ IOMMU_CAP_ENFORCE_CACHE_COHERENCY);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
+
+static void vfio_device_file_set_kvm(struct file *file, struct kvm *kvm)
+{
+ struct vfio_device_file *df = file->private_data;
+
+ /*
+ * The kvm is first recorded in the vfio_device_file, and will
+ * be propagated to vfio_device::kvm when the file is bound to
+ * iommufd successfully in the vfio device cdev path.
+ */
+ spin_lock(&df->kvm_ref_lock);
+ df->kvm = kvm;
+ spin_unlock(&df->kvm_ref_lock);
+}
+
+/**
+ * vfio_file_set_kvm - Link a kvm with VFIO drivers
+ * @file: VFIO group file or VFIO device file
+ * @kvm: KVM to link
+ *
+ * When a VFIO device is first opened the KVM will be available in
+ * device->kvm if one was associated with the file.
+ */
+void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
+{
+ struct vfio_group *group;
+
+ group = vfio_group_from_file(file);
+ if (group)
+ vfio_group_set_kvm(group, kvm);
+
+ if (vfio_device_from_file(file))
+ vfio_device_file_set_kvm(file, kvm);
+}
+EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
+
/*
* Sub-module support
*/
@@ -1293,6 +1483,8 @@ int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
/* group->container cannot change while a vfio device is open */
if (!pages || !npage || WARN_ON(!vfio_assert_device_open(device)))
return -EINVAL;
+ if (!device->ops->dma_unmap)
+ return -EINVAL;
if (vfio_device_has_container(device))
return vfio_device_container_pin_pages(device, iova,
npage, prot, pages);
@@ -1330,6 +1522,8 @@ void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage)
{
if (WARN_ON(!vfio_assert_device_open(device)))
return;
+ if (WARN_ON(!device->ops->dma_unmap))
+ return;
if (vfio_device_has_container(device)) {
vfio_device_container_unpin_pages(device, iova, npage);
@@ -1415,9 +1609,16 @@ static int __init vfio_init(void)
goto err_dev_class;
}
+ ret = vfio_cdev_init(vfio.device_class);
+ if (ret)
+ goto err_alloc_dev_chrdev;
+
pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
return 0;
+err_alloc_dev_chrdev:
+ class_destroy(vfio.device_class);
+ vfio.device_class = NULL;
err_dev_class:
vfio_virqfd_exit();
err_virqfd:
@@ -1428,6 +1629,7 @@ err_virqfd:
static void __exit vfio_cleanup(void)
{
ida_destroy(&vfio.device_ida);
+ vfio_cdev_cleanup();
class_destroy(vfio.device_class);
vfio.device_class = NULL;
vfio_virqfd_exit();