summaryrefslogtreecommitdiff
path: root/lib/iommu-common.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-18 18:01:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-18 18:01:29 -0400
commit64fb1d0e975e92e012802d371e417266d6531676 (patch)
treeeb3fba9d0fddc47e29699e689bb45e79a22d1116 /lib/iommu-common.c
parentdba94f2155f581395ef9608418778e3491b3d470 (diff)
parentccb301862aa51ea7c10c10b440f3e8bbeac5b720 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc fixes from David Miller "Unfortunately, I brown paper bagged the generic iommu pool allocator by applying the wrong revision of the patch series. This reverts the bad one, and puts the right one in" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc: iommu-common: Fix PARISC compile-time warnings sparc: Make LDC use common iommu poll management functions sparc: Make sparc64 use scalable lib/iommu-common.c functions Break up monolithic iommu table/lock into finer graularity pools and lock sparc: Revert generic IOMMU allocator.
Diffstat (limited to 'lib/iommu-common.c')
-rw-r--r--lib/iommu-common.c190
1 files changed, 118 insertions, 72 deletions
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
index fac4f35250c9..a1a517cba7ec 100644
--- a/lib/iommu-common.c
+++ b/lib/iommu-common.c
@@ -9,37 +9,72 @@
#include <linux/iommu-helper.h>
#include <linux/iommu-common.h>
#include <linux/dma-mapping.h>
+#include <linux/hash.h>
#ifndef DMA_ERROR_CODE
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
#endif
-#define IOMMU_LARGE_ALLOC 15
+unsigned long iommu_large_alloc = 15;
+
+static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
+
+static inline bool need_flush(struct iommu_map_table *iommu)
+{
+ return (iommu->lazy_flush != NULL &&
+ (iommu->flags & IOMMU_NEED_FLUSH) != 0);
+}
+
+static inline void set_flush(struct iommu_map_table *iommu)
+{
+ iommu->flags |= IOMMU_NEED_FLUSH;
+}
+
+static inline void clear_flush(struct iommu_map_table *iommu)
+{
+ iommu->flags &= ~IOMMU_NEED_FLUSH;
+}
+
+static void setup_iommu_pool_hash(void)
+{
+ unsigned int i;
+ static bool do_once;
+
+ if (do_once)
+ return;
+ do_once = true;
+ for_each_possible_cpu(i)
+ per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
+}
/*
- * Initialize iommu_pool entries for the iommu_table. `num_entries'
+ * Initialize iommu_pool entries for the iommu_map_table. `num_entries'
* is the number of table entries. If `large_pool' is set to true,
* the top 1/4 of the table will be set aside for pool allocations
- * of more than IOMMU_LARGE_ALLOC pages.
+ * of more than iommu_large_alloc pages.
*/
-extern void iommu_tbl_pool_init(struct iommu_table *iommu,
+extern void iommu_tbl_pool_init(struct iommu_map_table *iommu,
unsigned long num_entries,
- u32 page_table_shift,
- const struct iommu_tbl_ops *iommu_tbl_ops,
- bool large_pool, u32 npools)
+ u32 table_shift,
+ void (*lazy_flush)(struct iommu_map_table *),
+ bool large_pool, u32 npools,
+ bool skip_span_boundary_check)
{
unsigned int start, i;
struct iommu_pool *p = &(iommu->large_pool);
+ setup_iommu_pool_hash();
if (npools == 0)
iommu->nr_pools = IOMMU_NR_POOLS;
else
iommu->nr_pools = npools;
BUG_ON(npools > IOMMU_NR_POOLS);
- iommu->page_table_shift = page_table_shift;
- iommu->iommu_tbl_ops = iommu_tbl_ops;
+ iommu->table_shift = table_shift;
+ iommu->lazy_flush = lazy_flush;
start = 0;
+ if (skip_span_boundary_check)
+ iommu->flags |= IOMMU_NO_SPAN_BOUND;
if (large_pool)
iommu->flags |= IOMMU_HAS_LARGE_POOL;
@@ -48,11 +83,11 @@ extern void iommu_tbl_pool_init(struct iommu_table *iommu,
else
iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools;
for (i = 0; i < iommu->nr_pools; i++) {
- spin_lock_init(&(iommu->arena_pool[i].lock));
- iommu->arena_pool[i].start = start;
- iommu->arena_pool[i].hint = start;
+ spin_lock_init(&(iommu->pools[i].lock));
+ iommu->pools[i].start = start;
+ iommu->pools[i].hint = start;
start += iommu->poolsize; /* start for next pool */
- iommu->arena_pool[i].end = start - 1;
+ iommu->pools[i].end = start - 1;
}
if (!large_pool)
return;
@@ -65,121 +100,136 @@ extern void iommu_tbl_pool_init(struct iommu_table *iommu,
EXPORT_SYMBOL(iommu_tbl_pool_init);
unsigned long iommu_tbl_range_alloc(struct device *dev,
- struct iommu_table *iommu,
+ struct iommu_map_table *iommu,
unsigned long npages,
unsigned long *handle,
- unsigned int pool_hash)
+ unsigned long mask,
+ unsigned int align_order)
{
+ unsigned int pool_hash = __this_cpu_read(iommu_pool_hash);
unsigned long n, end, start, limit, boundary_size;
- struct iommu_pool *arena;
+ struct iommu_pool *pool;
int pass = 0;
unsigned int pool_nr;
unsigned int npools = iommu->nr_pools;
unsigned long flags;
bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0);
- bool largealloc = (large_pool && npages > IOMMU_LARGE_ALLOC);
+ bool largealloc = (large_pool && npages > iommu_large_alloc);
unsigned long shift;
+ unsigned long align_mask = 0;
+
+ if (align_order > 0)
+ align_mask = 0xffffffffffffffffl >> (64 - align_order);
/* Sanity check */
if (unlikely(npages == 0)) {
- printk_ratelimited("npages == 0\n");
+ WARN_ON_ONCE(1);
return DMA_ERROR_CODE;
}
if (largealloc) {
- arena = &(iommu->large_pool);
- spin_lock_irqsave(&arena->lock, flags);
+ pool = &(iommu->large_pool);
pool_nr = 0; /* to keep compiler happy */
} else {
/* pick out pool_nr */
pool_nr = pool_hash & (npools - 1);
- arena = &(iommu->arena_pool[pool_nr]);
-
- /* find first available unlocked pool */
- while (!spin_trylock_irqsave(&(arena->lock), flags)) {
- pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
- arena = &(iommu->arena_pool[pool_nr]);
- }
+ pool = &(iommu->pools[pool_nr]);
}
+ spin_lock_irqsave(&pool->lock, flags);
again:
if (pass == 0 && handle && *handle &&
- (*handle >= arena->start) && (*handle < arena->end))
+ (*handle >= pool->start) && (*handle < pool->end))
start = *handle;
else
- start = arena->hint;
+ start = pool->hint;
- limit = arena->end;
+ limit = pool->end;
/* The case below can happen if we have a small segment appended
* to a large, or when the previous alloc was at the very end of
- * the available space. If so, go back to the beginning and flush.
+ * the available space. If so, go back to the beginning. If a
+ * flush is needed, it will get done based on the return value
+ * from iommu_area_alloc() below.
*/
- if (start >= limit) {
- start = arena->start;
- if (iommu->iommu_tbl_ops->reset != NULL)
- iommu->iommu_tbl_ops->reset(iommu);
+ if (start >= limit)
+ start = pool->start;
+ shift = iommu->table_map_base >> iommu->table_shift;
+ if (limit + shift > mask) {
+ limit = mask - shift + 1;
+ /* If we're constrained on address range, first try
+ * at the masked hint to avoid O(n) search complexity,
+ * but on second pass, start at 0 in pool 0.
+ */
+ if ((start & mask) >= limit || pass > 0) {
+ spin_unlock(&(pool->lock));
+ pool = &(iommu->pools[0]);
+ spin_lock(&(pool->lock));
+ start = pool->start;
+ } else {
+ start &= mask;
+ }
}
if (dev)
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
- 1 << iommu->page_table_shift);
+ 1 << iommu->table_shift);
else
- boundary_size = ALIGN(1ULL << 32, 1 << iommu->page_table_shift);
+ boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift);
- shift = iommu->page_table_map_base >> iommu->page_table_shift;
- boundary_size = boundary_size >> iommu->page_table_shift;
+ boundary_size = boundary_size >> iommu->table_shift;
/*
- * if the iommu has a non-trivial cookie <-> index mapping, we set
+ * if the skip_span_boundary_check had been set during init, we set
* things up so that iommu_is_span_boundary() merely checks if the
* (index + npages) < num_tsb_entries
*/
- if (iommu->iommu_tbl_ops->cookie_to_index != NULL) {
+ if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) {
shift = 0;
boundary_size = iommu->poolsize * iommu->nr_pools;
}
n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
- boundary_size, 0);
+ boundary_size, align_mask);
if (n == -1) {
if (likely(pass == 0)) {
/* First failure, rescan from the beginning. */
- arena->hint = arena->start;
- if (iommu->iommu_tbl_ops->reset != NULL)
- iommu->iommu_tbl_ops->reset(iommu);
+ pool->hint = pool->start;
+ set_flush(iommu);
pass++;
goto again;
} else if (!largealloc && pass <= iommu->nr_pools) {
- spin_unlock(&(arena->lock));
+ spin_unlock(&(pool->lock));
pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
- arena = &(iommu->arena_pool[pool_nr]);
- while (!spin_trylock(&(arena->lock))) {
- pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
- arena = &(iommu->arena_pool[pool_nr]);
- }
- arena->hint = arena->start;
+ pool = &(iommu->pools[pool_nr]);
+ spin_lock(&(pool->lock));
+ pool->hint = pool->start;
+ set_flush(iommu);
pass++;
goto again;
} else {
/* give up */
- spin_unlock_irqrestore(&(arena->lock), flags);
- return DMA_ERROR_CODE;
+ n = DMA_ERROR_CODE;
+ goto bail;
}
}
+ if (n < pool->hint || need_flush(iommu)) {
+ clear_flush(iommu);
+ iommu->lazy_flush(iommu);
+ }
end = n + npages;
-
- arena->hint = end;
+ pool->hint = end;
/* Update handle for SG allocations */
if (handle)
*handle = end;
- spin_unlock_irqrestore(&(arena->lock), flags);
+bail:
+ spin_unlock_irqrestore(&(pool->lock), flags);
return n;
}
EXPORT_SYMBOL(iommu_tbl_range_alloc);
-static struct iommu_pool *get_pool(struct iommu_table *tbl,
+static struct iommu_pool *get_pool(struct iommu_map_table *tbl,
unsigned long entry)
{
struct iommu_pool *p;
@@ -193,31 +243,27 @@ static struct iommu_pool *get_pool(struct iommu_table *tbl,
unsigned int pool_nr = entry / tbl->poolsize;
BUG_ON(pool_nr >= tbl->nr_pools);
- p = &tbl->arena_pool[pool_nr];
+ p = &tbl->pools[pool_nr];
}
return p;
}
-void iommu_tbl_range_free(struct iommu_table *iommu, u64 dma_addr,
- unsigned long npages, bool do_demap, void *demap_arg)
+/* Caller supplies the index of the entry into the iommu map table
+ * itself when the mapping from dma_addr to the entry is not the
+ * default addr->entry mapping below.
+ */
+void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr,
+ unsigned long npages, unsigned long entry)
{
- unsigned long entry;
struct iommu_pool *pool;
unsigned long flags;
- unsigned long shift = iommu->page_table_shift;
+ unsigned long shift = iommu->table_shift;
- if (iommu->iommu_tbl_ops->cookie_to_index != NULL) {
- entry = (*iommu->iommu_tbl_ops->cookie_to_index)(dma_addr,
- demap_arg);
- } else {
- entry = (dma_addr - iommu->page_table_map_base) >> shift;
- }
+ if (entry == DMA_ERROR_CODE) /* use default addr->entry mapping */
+ entry = (dma_addr - iommu->table_map_base) >> shift;
pool = get_pool(iommu, entry);
spin_lock_irqsave(&(pool->lock), flags);
- if (do_demap && iommu->iommu_tbl_ops->demap != NULL)
- (*iommu->iommu_tbl_ops->demap)(demap_arg, entry, npages);
-
bitmap_clear(iommu->map, entry, npages);
spin_unlock_irqrestore(&(pool->lock), flags);
}