summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2025-11-20 14:47:42 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2025-11-25 15:11:49 +0100
commit152c862c172162d1bed85bfb9ecdf62fec9e86ae (patch)
tree577d5e16034abb9aebff43ec2615e013ec006bad
parent1e8b6eb1418ca2fcd10282409c0e18f73a51280f (diff)
iommupt: Fix unlikely flows in increase_top()
Since increase_top() does it's own READ_ONCE() on top_of_table, the caller's prior READ_ONCE() could be inconsistent and the first time through the loop we may actually already have the right level if two threads are racing map. In this case new_level will be left uninitialized. Further all the exits from the loop have to either commit to the new top or free any memory allocated so the early return must be a goto err_free. Make it so the only break from the loop always sets new_level to the right value and all other exits go to err_free. Use pts.level (the pts represents the top we are stacking) within the loop instead of new_level. Fixes: dcd6a011a8d5 ("iommupt: Add map_pages op") Reported-by: Dan Carpenter <dan.carpenter@linaro.org> Closes: https://lore.kernel.org/r/aRwgNW9PiW2j-Qwo@stanley.mountain Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com> Reviewed-by: Vasant Hegde <vasant.hegde@amd.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
-rw-r--r--drivers/iommu/generic_pt/iommu_pt.h17
1 files changed, 11 insertions, 6 deletions
diff --git a/drivers/iommu/generic_pt/iommu_pt.h b/drivers/iommu/generic_pt/iommu_pt.h
index 0e046fe0eea3..032d04ec7b56 100644
--- a/drivers/iommu/generic_pt/iommu_pt.h
+++ b/drivers/iommu/generic_pt/iommu_pt.h
@@ -683,8 +683,11 @@ static int increase_top(struct pt_iommu *iommu_table, struct pt_range *range,
top_range.va = range->va;
top_range.last_va = range->last_va;
- if (!pt_check_range(&top_range) && map->leaf_level <= pts.level)
+ if (!pt_check_range(&top_range) &&
+ map->leaf_level <= pts.level) {
+ new_level = pts.level;
break;
+ }
pts.level++;
if (pts.level > PT_MAX_TOP_LEVEL ||
@@ -693,17 +696,18 @@ static int increase_top(struct pt_iommu *iommu_table, struct pt_range *range,
goto err_free;
}
- new_level = pts.level;
table_mem =
table_alloc_top(common, _pt_top_set(NULL, pts.level),
map->attrs.gfp, ALLOC_DEFER_COHERENT_FLUSH);
- if (IS_ERR(table_mem))
- return PTR_ERR(table_mem);
+ if (IS_ERR(table_mem)) {
+ ret = PTR_ERR(table_mem);
+ goto err_free;
+ }
iommu_pages_list_add(&free_list, table_mem);
/* The new table links to the lower table always at index 0 */
top_range.va = 0;
- top_range.top_level = new_level;
+ top_range.top_level = pts.level;
pts.table_lower = pts.table;
pts.table = table_mem;
pt_load_single_entry(&pts);
@@ -735,7 +739,8 @@ static int increase_top(struct pt_iommu *iommu_table, struct pt_range *range,
*/
domain_lock = iommu_table->driver_ops->get_top_lock(iommu_table);
spin_lock_irqsave(domain_lock, flags);
- if (common->top_of_table != top_of_table) {
+ if (common->top_of_table != top_of_table ||
+ top_of_table == new_top_of_table) {
spin_unlock_irqrestore(domain_lock, flags);
ret = -EAGAIN;
goto err_free;