summaryrefslogtreecommitdiff
path: root/drivers/misc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-08 11:42:23 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-08 11:42:23 -0700
commit4e3a16ee9148e966678bbc713579235422271a63 (patch)
tree360e31fbfae5e1a6c385ff01919f61b32d74ed17 /drivers/misc
parent9413b9a690ec8aeaedea74bb875079d36f295304 (diff)
parent431275afdc7155415254aef4bd3816a1b8a2ead0 (diff)
Merge tag 'iommu-updates-v5.8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu updates from Joerg Roedel: "A big part of this is a change in how devices get connected to IOMMUs in the core code. It contains the change from the old add_device() / remove_device() to the new probe_device() / release_device() call-backs. As a result functionality that was previously in the IOMMU drivers has been moved to the IOMMU core code, including IOMMU group allocation for each device. The reason for this change was to get more robust allocation of default domains for the iommu groups. A couple of fixes were necessary after this was merged into the IOMMU tree, but there are no known bugs left. The last fix is applied on-top of the merge commit for the topic branches. Other than that change, we have: - Removal of the driver private domain handling in the Intel VT-d driver. This was fragile code and I am glad it is gone now. - More Intel VT-d updates from Lu Baolu: - Nested Shared Virtual Addressing (SVA) support to the Intel VT-d driver - Replacement of the Intel SVM interfaces to the common IOMMU SVA API - SVA Page Request draining support - ARM-SMMU Updates from Will: - Avoid mapping reserved MMIO space on SMMUv3, so that it can be claimed by the PMU driver - Use xarray to manage ASIDs on SMMUv3 - Reword confusing shutdown message - DT compatible string updates - Allow implementations to override the default domain type - A new IOMMU driver for the Allwinner Sun50i platform - Support for ATS gets disabled for untrusted devices (like Thunderbolt devices). This includes a PCI patch, acked by Bjorn. - Some cleanups to the AMD IOMMU driver to make more use of IOMMU core features. - Unification of some printk formats in the Intel and AMD IOMMU drivers and in the IOVA code. - Updates for DT bindings - A number of smaller fixes and cleanups. * tag 'iommu-updates-v5.8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (109 commits) iommu: Check for deferred attach in iommu_group_do_dma_attach() iommu/amd: Remove redundant devid checks iommu/amd: Store dev_data as device iommu private data iommu/amd: Merge private header files iommu/amd: Remove PD_DMA_OPS_MASK iommu/amd: Consolidate domain allocation/freeing iommu/amd: Free page-table in protection_domain_free() iommu/amd: Allocate page-table in protection_domain_init() iommu/amd: Let free_pagetable() not rely on domain->pt_root iommu/amd: Unexport get_dev_data() iommu/vt-d: Fix compile warning iommu/vt-d: Remove real DMA lookup in find_domain iommu/vt-d: Allocate domain info for real DMA sub-devices iommu/vt-d: Only clear real DMA device's context entries iommu: Remove iommu_sva_ops::mm_exit() uacce: Remove mm_exit() op iommu/sun50i: Constify sun50i_iommu_ops iommu/hyper-v: Constify hyperv_ir_domain_ops iommu/vt-d: Use pci_ats_supported() iommu/arm-smmu-v3: Use pci_ats_supported() ...
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/uacce/uacce.c172
1 files changed, 42 insertions, 130 deletions
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index d39307f060bd..107028e77ca3 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -90,109 +90,39 @@ static long uacce_fops_compat_ioctl(struct file *filep,
}
#endif
-static int uacce_sva_exit(struct device *dev, struct iommu_sva *handle,
- void *data)
+static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
{
- struct uacce_mm *uacce_mm = data;
- struct uacce_queue *q;
-
- /*
- * No new queue can be added concurrently because no caller can have a
- * reference to this mm. But there may be concurrent calls to
- * uacce_mm_put(), so we need the lock.
- */
- mutex_lock(&uacce_mm->lock);
- list_for_each_entry(q, &uacce_mm->queues, list)
- uacce_put_queue(q);
- uacce_mm->mm = NULL;
- mutex_unlock(&uacce_mm->lock);
+ int pasid;
+ struct iommu_sva *handle;
- return 0;
-}
-
-static struct iommu_sva_ops uacce_sva_ops = {
- .mm_exit = uacce_sva_exit,
-};
-
-static struct uacce_mm *uacce_mm_get(struct uacce_device *uacce,
- struct uacce_queue *q,
- struct mm_struct *mm)
-{
- struct uacce_mm *uacce_mm = NULL;
- struct iommu_sva *handle = NULL;
- int ret;
-
- lockdep_assert_held(&uacce->mm_lock);
-
- list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
- if (uacce_mm->mm == mm) {
- mutex_lock(&uacce_mm->lock);
- list_add(&q->list, &uacce_mm->queues);
- mutex_unlock(&uacce_mm->lock);
- return uacce_mm;
- }
- }
-
- uacce_mm = kzalloc(sizeof(*uacce_mm), GFP_KERNEL);
- if (!uacce_mm)
- return NULL;
+ if (!(uacce->flags & UACCE_DEV_SVA))
+ return 0;
- if (uacce->flags & UACCE_DEV_SVA) {
- /*
- * Safe to pass an incomplete uacce_mm, since mm_exit cannot
- * fire while we hold a reference to the mm.
- */
- handle = iommu_sva_bind_device(uacce->parent, mm, uacce_mm);
- if (IS_ERR(handle))
- goto err_free;
+ handle = iommu_sva_bind_device(uacce->parent, current->mm, NULL);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
- ret = iommu_sva_set_ops(handle, &uacce_sva_ops);
- if (ret)
- goto err_unbind;
-
- uacce_mm->pasid = iommu_sva_get_pasid(handle);
- if (uacce_mm->pasid == IOMMU_PASID_INVALID)
- goto err_unbind;
+ pasid = iommu_sva_get_pasid(handle);
+ if (pasid == IOMMU_PASID_INVALID) {
+ iommu_sva_unbind_device(handle);
+ return -ENODEV;
}
- uacce_mm->mm = mm;
- uacce_mm->handle = handle;
- INIT_LIST_HEAD(&uacce_mm->queues);
- mutex_init(&uacce_mm->lock);
- list_add(&q->list, &uacce_mm->queues);
- list_add(&uacce_mm->list, &uacce->mm_list);
-
- return uacce_mm;
-
-err_unbind:
- if (handle)
- iommu_sva_unbind_device(handle);
-err_free:
- kfree(uacce_mm);
- return NULL;
+ q->handle = handle;
+ q->pasid = pasid;
+ return 0;
}
-static void uacce_mm_put(struct uacce_queue *q)
+static void uacce_unbind_queue(struct uacce_queue *q)
{
- struct uacce_mm *uacce_mm = q->uacce_mm;
-
- lockdep_assert_held(&q->uacce->mm_lock);
-
- mutex_lock(&uacce_mm->lock);
- list_del(&q->list);
- mutex_unlock(&uacce_mm->lock);
-
- if (list_empty(&uacce_mm->queues)) {
- if (uacce_mm->handle)
- iommu_sva_unbind_device(uacce_mm->handle);
- list_del(&uacce_mm->list);
- kfree(uacce_mm);
- }
+ if (!q->handle)
+ return;
+ iommu_sva_unbind_device(q->handle);
+ q->handle = NULL;
}
static int uacce_fops_open(struct inode *inode, struct file *filep)
{
- struct uacce_mm *uacce_mm = NULL;
struct uacce_device *uacce;
struct uacce_queue *q;
int ret = 0;
@@ -205,21 +135,16 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
if (!q)
return -ENOMEM;
- mutex_lock(&uacce->mm_lock);
- uacce_mm = uacce_mm_get(uacce, q, current->mm);
- mutex_unlock(&uacce->mm_lock);
- if (!uacce_mm) {
- ret = -ENOMEM;
+ ret = uacce_bind_queue(uacce, q);
+ if (ret)
goto out_with_mem;
- }
q->uacce = uacce;
- q->uacce_mm = uacce_mm;
if (uacce->ops->get_queue) {
- ret = uacce->ops->get_queue(uacce, uacce_mm->pasid, q);
+ ret = uacce->ops->get_queue(uacce, q->pasid, q);
if (ret < 0)
- goto out_with_mm;
+ goto out_with_bond;
}
init_waitqueue_head(&q->wait);
@@ -227,12 +152,14 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
uacce->inode = inode;
q->state = UACCE_Q_INIT;
+ mutex_lock(&uacce->queues_lock);
+ list_add(&q->list, &uacce->queues);
+ mutex_unlock(&uacce->queues_lock);
+
return 0;
-out_with_mm:
- mutex_lock(&uacce->mm_lock);
- uacce_mm_put(q);
- mutex_unlock(&uacce->mm_lock);
+out_with_bond:
+ uacce_unbind_queue(q);
out_with_mem:
kfree(q);
return ret;
@@ -241,14 +168,12 @@ out_with_mem:
static int uacce_fops_release(struct inode *inode, struct file *filep)
{
struct uacce_queue *q = filep->private_data;
- struct uacce_device *uacce = q->uacce;
+ mutex_lock(&q->uacce->queues_lock);
+ list_del(&q->list);
+ mutex_unlock(&q->uacce->queues_lock);
uacce_put_queue(q);
-
- mutex_lock(&uacce->mm_lock);
- uacce_mm_put(q);
- mutex_unlock(&uacce->mm_lock);
-
+ uacce_unbind_queue(q);
kfree(q);
return 0;
@@ -513,8 +438,8 @@ struct uacce_device *uacce_alloc(struct device *parent,
if (ret < 0)
goto err_with_uacce;
- INIT_LIST_HEAD(&uacce->mm_list);
- mutex_init(&uacce->mm_lock);
+ INIT_LIST_HEAD(&uacce->queues);
+ mutex_init(&uacce->queues_lock);
device_initialize(&uacce->dev);
uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
uacce->dev.class = uacce_class;
@@ -561,8 +486,7 @@ EXPORT_SYMBOL_GPL(uacce_register);
*/
void uacce_remove(struct uacce_device *uacce)
{
- struct uacce_mm *uacce_mm;
- struct uacce_queue *q;
+ struct uacce_queue *q, *next_q;
if (!uacce)
return;
@@ -574,24 +498,12 @@ void uacce_remove(struct uacce_device *uacce)
unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
/* ensure no open queue remains */
- mutex_lock(&uacce->mm_lock);
- list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
- /*
- * We don't take the uacce_mm->lock here. Since we hold the
- * device's mm_lock, no queue can be added to or removed from
- * this uacce_mm. We may run concurrently with mm_exit, but
- * uacce_put_queue() is serialized and iommu_sva_unbind_device()
- * waits for the lock that mm_exit is holding.
- */
- list_for_each_entry(q, &uacce_mm->queues, list)
- uacce_put_queue(q);
-
- if (uacce->flags & UACCE_DEV_SVA) {
- iommu_sva_unbind_device(uacce_mm->handle);
- uacce_mm->handle = NULL;
- }
+ mutex_lock(&uacce->queues_lock);
+ list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
+ uacce_put_queue(q);
+ uacce_unbind_queue(q);
}
- mutex_unlock(&uacce->mm_lock);
+ mutex_unlock(&uacce->queues_lock);
/* disable sva now since no opened queues */
if (uacce->flags & UACCE_DEV_SVA)