summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2020-12-08 15:04:49 +0000
committerWill Deacon <will@kernel.org>2020-12-08 15:04:49 +0000
commit854623fdea9dc3ae8543a4d5d8448ebbaee61acc (patch)
tree960f6709bfdf98ee1e871205395cd741f7433925 /drivers/iommu
parent1ab2bf5831586820a1dbe4425daf1c86a482129d (diff)
parentf37eb48466d2ef4de33207f7389716d1734d9710 (diff)
Merge branch 'for-next/iommu/misc' into for-next/iommu/core
Miscellaneous IOMMU changes for 5.11. Largely cosmetic, apart from a change to the way in which identity-mapped domains are configured so that the requests are now batched and can potentially use larger pages for the mapping. * for-next/iommu/misc: iommu/io-pgtable-arm: Remove unused 'level' parameter from iopte_type() macro iommu: Defer the early return in arm_(v7s/lpae)_map iommu: Improve the performance for direct_mapping iommu: return error code when it can't get group iommu: Modify the description of iommu_sva_unbind_device
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c8
-rw-r--r--drivers/iommu/io-pgtable-arm.c18
-rw-r--r--drivers/iommu/iommu.c28
3 files changed, 33 insertions, 21 deletions
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index a688f22cbe3b..359b96b0fa3e 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -522,14 +522,14 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
struct io_pgtable *iop = &data->iop;
int ret;
- /* If no access, then nothing to do */
- if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
- return 0;
-
if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
paddr >= (1ULL << data->iop.cfg.oas)))
return -ERANGE;
+ /* If no access, then nothing to do */
+ if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
+ return 0;
+
ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd, gfp);
/*
* Synchronise all PTE updates for the new mapping before there's
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 7c9ea9d7874a..135f57b37bbd 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -130,7 +130,7 @@
/* IOPTE accessors */
#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
-#define iopte_type(pte,l) \
+#define iopte_type(pte) \
(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
@@ -151,9 +151,9 @@ static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
enum io_pgtable_fmt fmt)
{
if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
- return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;
+ return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE;
- return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
+ return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
}
static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
@@ -280,7 +280,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
/* We require an unmap first */
WARN_ON(!selftest_running);
return -EEXIST;
- } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
+ } else if (iopte_type(pte) == ARM_LPAE_PTE_TYPE_TABLE) {
/*
* We need to unmap and free the old table before
* overwriting it with a block entry.
@@ -444,10 +444,6 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
arm_lpae_iopte prot;
long iaext = (s64)iova >> cfg->ias;
- /* If no access, then nothing to do */
- if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
- return 0;
-
if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
return -EINVAL;
@@ -456,6 +452,10 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
if (WARN_ON(iaext || paddr >> cfg->oas))
return -ERANGE;
+ /* If no access, then nothing to do */
+ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+ return 0;
+
prot = arm_lpae_prot_to_pte(data, iommu_prot);
ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp);
/*
@@ -548,7 +548,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
* block, but anything else is invalid. We can't misinterpret
* a page entry here since we're never at the last level.
*/
- if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
+ if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE)
return 0;
tablep = iopte_deref(pte, data);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index cc06dcadff8f..a606b7aef96b 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -255,8 +255,10 @@ int iommu_probe_device(struct device *dev)
goto err_out;
group = iommu_group_get(dev);
- if (!group)
+ if (!group) {
+ ret = -ENODEV;
goto err_release;
+ }
/*
* Try to allocate a default domain - needs support from the
@@ -742,6 +744,7 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group,
/* We need to consider overlapping regions for different devices */
list_for_each_entry(entry, &mappings, list) {
dma_addr_t start, end, addr;
+ size_t map_size = 0;
if (domain->ops->apply_resv_region)
domain->ops->apply_resv_region(dev, domain, entry);
@@ -753,16 +756,27 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group,
entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
continue;
- for (addr = start; addr < end; addr += pg_size) {
+ for (addr = start; addr <= end; addr += pg_size) {
phys_addr_t phys_addr;
+ if (addr == end)
+ goto map_end;
+
phys_addr = iommu_iova_to_phys(domain, addr);
- if (phys_addr)
+ if (!phys_addr) {
+ map_size += pg_size;
continue;
+ }
- ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
- if (ret)
- goto out;
+map_end:
+ if (map_size) {
+ ret = iommu_map(domain, addr - map_size,
+ addr - map_size, map_size,
+ entry->prot);
+ if (ret)
+ goto out;
+ map_size = 0;
+ }
}
}
@@ -2998,8 +3012,6 @@ EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
* Put reference to a bond between device and address space. The device should
* not be issuing any more transaction for this PASID. All outstanding page
* requests for this PASID must have been flushed to the IOMMU.
- *
- * Returns 0 on success, or an error value
*/
void iommu_sva_unbind_device(struct iommu_sva *handle)
{