diff options
author | Lu Baolu <baolu.lu@linux.intel.com> | 2025-05-13 11:07:37 +0800 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2025-05-16 08:49:28 +0200 |
commit | 868720fe15d1a6a1bddc28ad7b7e4b62448ee36b (patch) | |
tree | 9018dc0134a68ff60b67e43c7e75aa8fed08e9cf /drivers/iommu | |
parent | f93b4ac5929a5754fc4edba26b43d0c9fa57d576 (diff) |
iommu/vt-d: Replace spin_lock with mutex to protect domain ida
The domain ID allocator is currently protected by a spin_lock. However,
ida_alloc_range can potentially block if it needs to allocate memory to
grow its internal structures.
Replace the spin_lock with a mutex which allows sleep on block. Thus,
the memory allocation flags can be updated from GFP_ATOMIC to GFP_KERNEL
to allow blocking memory allocations if necessary.
Introduce a new mutex, did_lock, specifically for protecting the domain
ida. The existing spinlock will remain for protecting other intel_iommu
fields.
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20250430021135.2370244-3-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/intel/dmar.c | 1 | ||||
-rw-r--r-- | drivers/iommu/intel/iommu.c | 12 | ||||
-rw-r--r-- | drivers/iommu/intel/iommu.h | 2 |
3 files changed, 7 insertions, 8 deletions
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 0e35969c026b..9e17e8e56308 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -1101,6 +1101,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) iommu->node = NUMA_NO_NODE; spin_lock_init(&iommu->lock); ida_init(&iommu->domain_ida); + mutex_init(&iommu->did_lock); ver = readl(iommu->reg + DMAR_VER_REG); pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n", diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 023105234139..0f2845147058 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1345,17 +1345,16 @@ int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) if (!info) return -ENOMEM; - spin_lock(&iommu->lock); + guard(mutex)(&iommu->did_lock); curr = xa_load(&domain->iommu_array, iommu->seq_id); if (curr) { curr->refcnt++; - spin_unlock(&iommu->lock); kfree(info); return 0; } num = ida_alloc_range(&iommu->domain_ida, IDA_START_DID, - cap_ndoms(iommu->cap) - 1, GFP_ATOMIC); + cap_ndoms(iommu->cap) - 1, GFP_KERNEL); if (num < 0) { pr_err("%s: No free domain ids\n", iommu->name); goto err_unlock; @@ -1365,19 +1364,17 @@ int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) info->did = num; info->iommu = iommu; curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id, - NULL, info, GFP_ATOMIC); + NULL, info, GFP_KERNEL); if (curr) { ret = xa_err(curr) ? : -EBUSY; goto err_clear; } - spin_unlock(&iommu->lock); return 0; err_clear: ida_free(&iommu->domain_ida, info->did); err_unlock: - spin_unlock(&iommu->lock); kfree(info); return ret; } @@ -1389,7 +1386,7 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) if (domain->domain.type == IOMMU_DOMAIN_SVA) return; - spin_lock(&iommu->lock); + guard(mutex)(&iommu->did_lock); info = xa_load(&domain->iommu_array, iommu->seq_id); if (--info->refcnt == 0) { ida_free(&iommu->domain_ida, info->did); @@ -1397,7 +1394,6 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) domain->nid = NUMA_NO_NODE; kfree(info); } - spin_unlock(&iommu->lock); } static void domain_exit(struct dmar_domain *domain) diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index 25faf3aadd24..5f140892fae0 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -722,6 +722,8 @@ struct intel_iommu { unsigned char name[16]; /* Device Name */ #ifdef CONFIG_INTEL_IOMMU + /* mutex to protect domain_ida */ + struct mutex did_lock; struct ida domain_ida; /* domain id allocator */ unsigned long *copied_tables; /* bitmap of copied tables */ spinlock_t lock; /* protect context, domain ids */ |