diff options
Diffstat (limited to 'drivers/iommu/iova.c')
| -rw-r--r-- | drivers/iommu/iova.c | 714 |
1 files changed, 339 insertions, 375 deletions
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index f8d3ba247523..18f839721813 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -1,42 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright © 2006-2009, Intel Corporation. * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> */ #include <linux/iova.h> +#include <linux/kmemleak.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/bitops.h> #include <linux/cpu.h> +#include <linux/workqueue.h> /* The anchor node sits above the top of the usable address space */ #define IOVA_ANCHOR ~0UL +#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */ + static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn, unsigned long size); static unsigned long iova_rcache_get(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn); -static void init_iova_rcaches(struct iova_domain *iovad); static void free_iova_rcaches(struct iova_domain *iovad); -static void fq_destroy_all_entries(struct iova_domain *iovad); -static void fq_flush_timeout(struct timer_list *t); +static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); +static void free_global_cached_iovas(struct iova_domain *iovad); + +static struct iova *to_iova(struct rb_node *node) +{ + return rb_entry(node, struct iova, node); +} void init_iova_domain(struct iova_domain *iovad, unsigned long granule, @@ -57,64 +53,12 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->start_pfn = start_pfn; iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad)); iovad->max32_alloc_size = iovad->dma_32bit_pfn; - iovad->flush_cb = NULL; - iovad->fq = NULL; iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR; rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node); rb_insert_color(&iovad->anchor.node, &iovad->rbroot); - init_iova_rcaches(iovad); } EXPORT_SYMBOL_GPL(init_iova_domain); -static void free_iova_flush_queue(struct iova_domain *iovad) -{ - if (!iovad->fq) - return; - - if (timer_pending(&iovad->fq_timer)) - del_timer(&iovad->fq_timer); - - fq_destroy_all_entries(iovad); - - free_percpu(iovad->fq); - - iovad->fq = NULL; - iovad->flush_cb = NULL; - iovad->entry_dtor = NULL; -} - -int init_iova_flush_queue(struct iova_domain *iovad, - iova_flush_cb flush_cb, iova_entry_dtor entry_dtor) -{ - int cpu; - - atomic64_set(&iovad->fq_flush_start_cnt, 0); - atomic64_set(&iovad->fq_flush_finish_cnt, 0); - - iovad->fq = alloc_percpu(struct iova_fq); - if (!iovad->fq) - return -ENOMEM; - - iovad->flush_cb = flush_cb; - iovad->entry_dtor = entry_dtor; - - for_each_possible_cpu(cpu) { - struct iova_fq *fq; - - fq = per_cpu_ptr(iovad->fq, cpu); - fq->head = 0; - fq->tail = 0; - - spin_lock_init(&fq->lock); - } - - timer_setup(&iovad->fq_timer, fq_flush_timeout, 0); - atomic_set(&iovad->fq_timer_on, 0); - - return 0; -} -EXPORT_SYMBOL_GPL(init_iova_flush_queue); - static struct rb_node * __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn) { @@ -138,18 +82,57 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) { struct iova *cached_iova; - cached_iova = rb_entry(iovad->cached32_node, struct iova, node); - if (free->pfn_hi < iovad->dma_32bit_pfn && - free->pfn_lo >= cached_iova->pfn_lo) { + cached_iova = to_iova(iovad->cached32_node); + if (free == cached_iova || + (free->pfn_hi < iovad->dma_32bit_pfn && + free->pfn_lo >= cached_iova->pfn_lo)) iovad->cached32_node = rb_next(&free->node); + + if (free->pfn_lo < iovad->dma_32bit_pfn) iovad->max32_alloc_size = iovad->dma_32bit_pfn; - } - cached_iova = rb_entry(iovad->cached_node, struct iova, node); + cached_iova = to_iova(iovad->cached_node); if (free->pfn_lo >= cached_iova->pfn_lo) iovad->cached_node = rb_next(&free->node); } +static struct rb_node *iova_find_limit(struct iova_domain *iovad, unsigned long limit_pfn) +{ + struct rb_node *node, *next; + /* + * Ideally what we'd like to judge here is whether limit_pfn is close + * enough to the highest-allocated IOVA that starting the allocation + * walk from the anchor node will be quicker than this initial work to + * find an exact starting point (especially if that ends up being the + * anchor node anyway). This is an incredibly crude approximation which + * only really helps the most likely case, but is at least trivially easy. + */ + if (limit_pfn > iovad->dma_32bit_pfn) + return &iovad->anchor.node; + + node = iovad->rbroot.rb_node; + while (to_iova(node)->pfn_hi < limit_pfn) + node = node->rb_right; + +search_left: + while (node->rb_left && to_iova(node->rb_left)->pfn_lo >= limit_pfn) + node = node->rb_left; + + if (!node->rb_left) + return node; + + next = node->rb_left; + while (next->rb_right) { + next = next->rb_right; + if (to_iova(next)->pfn_lo >= limit_pfn) { + node = next; + goto search_left; + } + } + + return node; +} + /* Insert the iova into domain rbtree by holding writer lock */ static void iova_insert_rbtree(struct rb_root *root, struct iova *iova, @@ -160,7 +143,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova, new = (start) ? &start : &(root->rb_node); /* Figure out where to put new node */ while (*new) { - struct iova *this = rb_entry(*new, struct iova, node); + struct iova *this = to_iova(*new); parent = *new; @@ -185,8 +168,9 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, struct rb_node *curr, *prev; struct iova *curr_iova; unsigned long flags; - unsigned long new_pfn; + unsigned long new_pfn, retry_pfn; unsigned long align_mask = ~0UL; + unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn; if (size_aligned) align_mask <<= fls_long(size - 1); @@ -198,17 +182,29 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, goto iova32_full; curr = __get_cached_rbnode(iovad, limit_pfn); - curr_iova = rb_entry(curr, struct iova, node); + curr_iova = to_iova(curr); + retry_pfn = curr_iova->pfn_hi; + +retry: do { - limit_pfn = min(limit_pfn, curr_iova->pfn_lo); - new_pfn = (limit_pfn - size) & align_mask; + high_pfn = min(high_pfn, curr_iova->pfn_lo); + new_pfn = (high_pfn - size) & align_mask; prev = curr; curr = rb_prev(curr); - curr_iova = rb_entry(curr, struct iova, node); - } while (curr && new_pfn <= curr_iova->pfn_hi); - - if (limit_pfn < size || new_pfn < iovad->start_pfn) + curr_iova = to_iova(curr); + } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn); + + if (high_pfn < size || new_pfn < low_pfn) { + if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) { + high_pfn = limit_pfn; + low_pfn = retry_pfn + 1; + curr = iova_find_limit(iovad, limit_pfn); + curr_iova = to_iova(curr); + goto retry; + } + iovad->max32_alloc_size = size; goto iova32_full; + } /* pfn_lo will point to size aligned address if size_aligned is set */ new->pfn_lo = new_pfn; @@ -222,7 +218,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, return 0; iova32_full: - iovad->max32_alloc_size = size; spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return -ENOMEM; } @@ -231,53 +226,16 @@ static struct kmem_cache *iova_cache; static unsigned int iova_cache_users; static DEFINE_MUTEX(iova_cache_mutex); -struct iova *alloc_iova_mem(void) +static struct iova *alloc_iova_mem(void) { - return kmem_cache_alloc(iova_cache, GFP_ATOMIC); + return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN); } -EXPORT_SYMBOL(alloc_iova_mem); -void free_iova_mem(struct iova *iova) +static void free_iova_mem(struct iova *iova) { if (iova->pfn_lo != IOVA_ANCHOR) kmem_cache_free(iova_cache, iova); } -EXPORT_SYMBOL(free_iova_mem); - -int iova_cache_get(void) -{ - mutex_lock(&iova_cache_mutex); - if (!iova_cache_users) { - iova_cache = kmem_cache_create( - "iommu_iova", sizeof(struct iova), 0, - SLAB_HWCACHE_ALIGN, NULL); - if (!iova_cache) { - mutex_unlock(&iova_cache_mutex); - printk(KERN_ERR "Couldn't create iova cache\n"); - return -ENOMEM; - } - } - - iova_cache_users++; - mutex_unlock(&iova_cache_mutex); - - return 0; -} -EXPORT_SYMBOL_GPL(iova_cache_get); - -void iova_cache_put(void) -{ - mutex_lock(&iova_cache_mutex); - if (WARN_ON(!iova_cache_users)) { - mutex_unlock(&iova_cache_mutex); - return; - } - iova_cache_users--; - if (!iova_cache_users) - kmem_cache_destroy(iova_cache); - mutex_unlock(&iova_cache_mutex); -} -EXPORT_SYMBOL_GPL(iova_cache_put); /** * alloc_iova - allocates an iova @@ -322,7 +280,7 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn) assert_spin_locked(&iovad->iova_rbtree_lock); while (node) { - struct iova *iova = rb_entry(node, struct iova, node); + struct iova *iova = to_iova(node); if (pfn < iova->pfn_lo) node = node->rb_left; @@ -335,12 +293,11 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn) return NULL; } -static void private_free_iova(struct iova_domain *iovad, struct iova *iova) +static void remove_iova(struct iova_domain *iovad, struct iova *iova) { assert_spin_locked(&iovad->iova_rbtree_lock); __cached_rbnode_delete_update(iovad, iova); rb_erase(&iova->node, &iovad->rbroot); - free_iova_mem(iova); } /** @@ -348,7 +305,7 @@ static void private_free_iova(struct iova_domain *iovad, struct iova *iova) * @iovad: - iova domain in question. * @pfn: - page frame number * This function finds and returns an iova belonging to the - * given doamin which matches the given pfn. + * given domain which matches the given pfn. */ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) { @@ -375,8 +332,9 @@ __free_iova(struct iova_domain *iovad, struct iova *iova) unsigned long flags; spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); - private_free_iova(iovad, iova); + remove_iova(iovad, iova); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + free_iova_mem(iova); } EXPORT_SYMBOL_GPL(__free_iova); @@ -390,11 +348,18 @@ EXPORT_SYMBOL_GPL(__free_iova); void free_iova(struct iova_domain *iovad, unsigned long pfn) { - struct iova *iova = find_iova(iovad, pfn); - - if (iova) - __free_iova(iovad, iova); + unsigned long flags; + struct iova *iova; + spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); + iova = private_find_iova(iovad, pfn); + if (!iova) { + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + return; + } + remove_iova(iovad, iova); + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + free_iova_mem(iova); } EXPORT_SYMBOL_GPL(free_iova); @@ -415,6 +380,15 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size, unsigned long iova_pfn; struct iova *new_iova; + /* + * Freeing non-power-of-two-sized allocations back into the IOVA caches + * will come back to bite us badly, so we have to waste a bit of space + * rounding up anything cacheable to make sure that can't happen. The + * order of the unadjusted size will still match upon freeing. + */ + if (size < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) + size = roundup_pow_of_two(size); + iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1); if (iova_pfn) return iova_pfn; @@ -431,6 +405,7 @@ retry: flush_rcache = false; for_each_online_cpu(cpu) free_cpu_cached_iovas(cpu, iovad); + free_global_cached_iovas(iovad); goto retry; } @@ -456,135 +431,15 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) } EXPORT_SYMBOL_GPL(free_iova_fast); -#define fq_ring_for_each(i, fq) \ - for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE) - -static inline bool fq_full(struct iova_fq *fq) -{ - assert_spin_locked(&fq->lock); - return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head); -} - -static inline unsigned fq_ring_add(struct iova_fq *fq) -{ - unsigned idx = fq->tail; - - assert_spin_locked(&fq->lock); - - fq->tail = (idx + 1) % IOVA_FQ_SIZE; - - return idx; -} - -static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq) -{ - u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt); - unsigned idx; - - assert_spin_locked(&fq->lock); - - fq_ring_for_each(idx, fq) { - - if (fq->entries[idx].counter >= counter) - break; - - if (iovad->entry_dtor) - iovad->entry_dtor(fq->entries[idx].data); - - free_iova_fast(iovad, - fq->entries[idx].iova_pfn, - fq->entries[idx].pages); - - fq->head = (fq->head + 1) % IOVA_FQ_SIZE; - } -} - -static void iova_domain_flush(struct iova_domain *iovad) -{ - atomic64_inc(&iovad->fq_flush_start_cnt); - iovad->flush_cb(iovad); - atomic64_inc(&iovad->fq_flush_finish_cnt); -} - -static void fq_destroy_all_entries(struct iova_domain *iovad) -{ - int cpu; - - /* - * This code runs when the iova_domain is being detroyed, so don't - * bother to free iovas, just call the entry_dtor on all remaining - * entries. - */ - if (!iovad->entry_dtor) - return; - - for_each_possible_cpu(cpu) { - struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu); - int idx; - - fq_ring_for_each(idx, fq) - iovad->entry_dtor(fq->entries[idx].data); - } -} - -static void fq_flush_timeout(struct timer_list *t) +static void iova_domain_free_rcaches(struct iova_domain *iovad) { - struct iova_domain *iovad = from_timer(iovad, t, fq_timer); - int cpu; - - atomic_set(&iovad->fq_timer_on, 0); - iova_domain_flush(iovad); - - for_each_possible_cpu(cpu) { - unsigned long flags; - struct iova_fq *fq; - - fq = per_cpu_ptr(iovad->fq, cpu); - spin_lock_irqsave(&fq->lock, flags); - fq_ring_free(iovad, fq); - spin_unlock_irqrestore(&fq->lock, flags); - } -} - -void queue_iova(struct iova_domain *iovad, - unsigned long pfn, unsigned long pages, - unsigned long data) -{ - struct iova_fq *fq = raw_cpu_ptr(iovad->fq); - unsigned long flags; - unsigned idx; - - spin_lock_irqsave(&fq->lock, flags); - - /* - * First remove all entries from the flush queue that have already been - * flushed out on another CPU. This makes the fq_full() check below less - * likely to be true. - */ - fq_ring_free(iovad, fq); - - if (fq_full(fq)) { - iova_domain_flush(iovad); - fq_ring_free(iovad, fq); - } - - idx = fq_ring_add(fq); - - fq->entries[idx].iova_pfn = pfn; - fq->entries[idx].pages = pages; - fq->entries[idx].data = data; - fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt); - - spin_unlock_irqrestore(&fq->lock, flags); - - if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0) - mod_timer(&iovad->fq_timer, - jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT)); + cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, + &iovad->cpuhp_dead); + free_iova_rcaches(iovad); } -EXPORT_SYMBOL_GPL(queue_iova); /** - * put_iova_domain - destroys the iova doamin + * put_iova_domain - destroys the iova domain * @iovad: - iova domain in question. * All the iova's in that domain are destroyed. */ @@ -592,8 +447,9 @@ void put_iova_domain(struct iova_domain *iovad) { struct iova *iova, *tmp; - free_iova_flush_queue(iovad); - free_iova_rcaches(iovad); + if (iovad->rcaches) + iova_domain_free_rcaches(iovad); + rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node) free_iova_mem(iova); } @@ -603,7 +459,7 @@ static int __is_range_overlap(struct rb_node *node, unsigned long pfn_lo, unsigned long pfn_hi) { - struct iova *iova = rb_entry(node, struct iova, node); + struct iova *iova = to_iova(node); if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) return 1; @@ -651,7 +507,7 @@ __adjust_overlap_range(struct iova *iova, * reserve_iova - reserves an iova in the given range * @iovad: - iova domain pointer * @pfn_lo: - lower page frame address - * @pfn_hi:- higher pfn adderss + * @pfn_hi:- higher pfn address * This function allocates reserves the address range from pfn_lo to pfn_hi so * that this address is not dished out as part of alloc_iova. */ @@ -671,7 +527,7 @@ reserve_iova(struct iova_domain *iovad, spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { if (__is_range_overlap(node, pfn_lo, pfn_hi)) { - iova = rb_entry(node, struct iova, node); + iova = to_iova(node); __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); if ((pfn_lo >= iova->pfn_lo) && (pfn_hi <= iova->pfn_hi)) @@ -693,77 +549,6 @@ finish: } EXPORT_SYMBOL_GPL(reserve_iova); -/** - * copy_reserved_iova - copies the reserved between domains - * @from: - source doamin from where to copy - * @to: - destination domin where to copy - * This function copies reserved iova's from one doamin to - * other. - */ -void -copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) -{ - unsigned long flags; - struct rb_node *node; - - spin_lock_irqsave(&from->iova_rbtree_lock, flags); - for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { - struct iova *iova = rb_entry(node, struct iova, node); - struct iova *new_iova; - - if (iova->pfn_lo == IOVA_ANCHOR) - continue; - - new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); - if (!new_iova) - printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", - iova->pfn_lo, iova->pfn_lo); - } - spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); -} -EXPORT_SYMBOL_GPL(copy_reserved_iova); - -struct iova * -split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, - unsigned long pfn_lo, unsigned long pfn_hi) -{ - unsigned long flags; - struct iova *prev = NULL, *next = NULL; - - spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); - if (iova->pfn_lo < pfn_lo) { - prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1); - if (prev == NULL) - goto error; - } - if (iova->pfn_hi > pfn_hi) { - next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi); - if (next == NULL) - goto error; - } - - __cached_rbnode_delete_update(iovad, iova); - rb_erase(&iova->node, &iovad->rbroot); - - if (prev) { - iova_insert_rbtree(&iovad->rbroot, prev, NULL); - iova->pfn_lo = pfn_lo; - } - if (next) { - iova_insert_rbtree(&iovad->rbroot, next, NULL); - iova->pfn_hi = pfn_hi; - } - spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); - - return iova; - -error: - spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); - if (prev) - free_iova_mem(prev); - return NULL; -} - /* * Magazine caches for IOVA ranges. For an introduction to magazines, * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab @@ -772,12 +557,24 @@ error: * dynamic size tuning described in the paper. */ -#define IOVA_MAG_SIZE 128 +/* + * As kmalloc's buffer size is fixed to power of 2, 127 is chosen to + * assure size of 'iova_magazine' to be 1024 bytes, so that no memory + * will be wasted. Since only full magazines are inserted into the depot, + * we don't need to waste PFN capacity on a separate list head either. + */ +#define IOVA_MAG_SIZE 127 + +#define IOVA_DEPOT_DELAY msecs_to_jiffies(100) struct iova_magazine { - unsigned long size; + union { + unsigned long size; + struct iova_magazine *next; + }; unsigned long pfns[IOVA_MAG_SIZE]; }; +static_assert(!(sizeof(struct iova_magazine) & (sizeof(struct iova_magazine) - 1))); struct iova_cpu_rcache { spinlock_t lock; @@ -785,14 +582,36 @@ struct iova_cpu_rcache { struct iova_magazine *prev; }; +struct iova_rcache { + spinlock_t lock; + unsigned int depot_size; + struct iova_magazine *depot; + struct iova_cpu_rcache __percpu *cpu_rcaches; + struct iova_domain *iovad; + struct delayed_work work; +}; + +static struct kmem_cache *iova_magazine_cache; + +unsigned long iova_rcache_range(void) +{ + return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1); +} + static struct iova_magazine *iova_magazine_alloc(gfp_t flags) { - return kzalloc(sizeof(struct iova_magazine), flags); + struct iova_magazine *mag; + + mag = kmem_cache_alloc(iova_magazine_cache, flags); + if (mag) + mag->size = 0; + + return mag; } static void iova_magazine_free(struct iova_magazine *mag) { - kfree(mag); + kmem_cache_free(iova_magazine_cache, mag); } static void @@ -801,16 +620,16 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad) unsigned long flags; int i; - if (!mag) - return; - spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); for (i = 0 ; i < mag->size; ++i) { struct iova *iova = private_find_iova(iovad, mag->pfns[i]); - BUG_ON(!iova); - private_free_iova(iovad, iova); + if (WARN_ON(!iova)) + continue; + + remove_iova(iovad, iova); + free_iova_mem(iova); } spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); @@ -820,12 +639,12 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad) static bool iova_magazine_full(struct iova_magazine *mag) { - return (mag && mag->size == IOVA_MAG_SIZE); + return mag->size == IOVA_MAG_SIZE; } static bool iova_magazine_empty(struct iova_magazine *mag) { - return (!mag || mag->size == 0); + return mag->size == 0; } static unsigned long iova_magazine_pop(struct iova_magazine *mag, @@ -834,8 +653,6 @@ static unsigned long iova_magazine_pop(struct iova_magazine *mag, int i; unsigned long pfn; - BUG_ON(iova_magazine_empty(mag)); - /* Only fall back to the rbtree if we have no suitable pfns at all */ for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--) if (i == 0) @@ -850,33 +667,98 @@ static unsigned long iova_magazine_pop(struct iova_magazine *mag, static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn) { - BUG_ON(iova_magazine_full(mag)); - mag->pfns[mag->size++] = pfn; } -static void init_iova_rcaches(struct iova_domain *iovad) +static struct iova_magazine *iova_depot_pop(struct iova_rcache *rcache) +{ + struct iova_magazine *mag = rcache->depot; + + /* + * As the mag->next pointer is moved to rcache->depot and reset via + * the mag->size assignment, mark it as a transient false positive. + */ + kmemleak_transient_leak(mag->next); + rcache->depot = mag->next; + mag->size = IOVA_MAG_SIZE; + rcache->depot_size--; + return mag; +} + +static void iova_depot_push(struct iova_rcache *rcache, struct iova_magazine *mag) +{ + mag->next = rcache->depot; + rcache->depot = mag; + rcache->depot_size++; +} + +static void iova_depot_work_func(struct work_struct *work) +{ + struct iova_rcache *rcache = container_of(work, typeof(*rcache), work.work); + struct iova_magazine *mag = NULL; + unsigned long flags; + + spin_lock_irqsave(&rcache->lock, flags); + if (rcache->depot_size > num_online_cpus()) + mag = iova_depot_pop(rcache); + spin_unlock_irqrestore(&rcache->lock, flags); + + if (mag) { + iova_magazine_free_pfns(mag, rcache->iovad); + iova_magazine_free(mag); + schedule_delayed_work(&rcache->work, IOVA_DEPOT_DELAY); + } +} + +int iova_domain_init_rcaches(struct iova_domain *iovad) { - struct iova_cpu_rcache *cpu_rcache; - struct iova_rcache *rcache; unsigned int cpu; - int i; + int i, ret; + + iovad->rcaches = kcalloc(IOVA_RANGE_CACHE_MAX_SIZE, + sizeof(struct iova_rcache), + GFP_KERNEL); + if (!iovad->rcaches) + return -ENOMEM; for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { + struct iova_cpu_rcache *cpu_rcache; + struct iova_rcache *rcache; + rcache = &iovad->rcaches[i]; spin_lock_init(&rcache->lock); - rcache->depot_size = 0; - rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size()); - if (WARN_ON(!rcache->cpu_rcaches)) - continue; + rcache->iovad = iovad; + INIT_DELAYED_WORK(&rcache->work, iova_depot_work_func); + rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), + cache_line_size()); + if (!rcache->cpu_rcaches) { + ret = -ENOMEM; + goto out_err; + } for_each_possible_cpu(cpu) { cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu); + spin_lock_init(&cpu_rcache->lock); cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL); cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL); + if (!cpu_rcache->loaded || !cpu_rcache->prev) { + ret = -ENOMEM; + goto out_err; + } } } + + ret = cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, + &iovad->cpuhp_dead); + if (ret) + goto out_err; + return 0; + +out_err: + free_iova_rcaches(iovad); + return ret; } +EXPORT_SYMBOL_GPL(iova_domain_init_rcaches); /* * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and @@ -888,7 +770,6 @@ static bool __iova_rcache_insert(struct iova_domain *iovad, struct iova_rcache *rcache, unsigned long iova_pfn) { - struct iova_magazine *mag_to_free = NULL; struct iova_cpu_rcache *cpu_rcache; bool can_insert = false; unsigned long flags; @@ -906,13 +787,9 @@ static bool __iova_rcache_insert(struct iova_domain *iovad, if (new_mag) { spin_lock(&rcache->lock); - if (rcache->depot_size < MAX_GLOBAL_MAGS) { - rcache->depot[rcache->depot_size++] = - cpu_rcache->loaded; - } else { - mag_to_free = cpu_rcache->loaded; - } + iova_depot_push(rcache, cpu_rcache->loaded); spin_unlock(&rcache->lock); + schedule_delayed_work(&rcache->work, IOVA_DEPOT_DELAY); cpu_rcache->loaded = new_mag; can_insert = true; @@ -924,11 +801,6 @@ static bool __iova_rcache_insert(struct iova_domain *iovad, spin_unlock_irqrestore(&cpu_rcache->lock, flags); - if (mag_to_free) { - iova_magazine_free_pfns(mag_to_free, iovad); - iova_magazine_free(mag_to_free); - } - return can_insert; } @@ -966,9 +838,9 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, has_pfn = true; } else { spin_lock(&rcache->lock); - if (rcache->depot_size > 0) { + if (rcache->depot) { iova_magazine_free(cpu_rcache->loaded); - cpu_rcache->loaded = rcache->depot[--rcache->depot_size]; + cpu_rcache->loaded = iova_depot_pop(rcache); has_pfn = true; } spin_unlock(&rcache->lock); @@ -1007,25 +879,30 @@ static void free_iova_rcaches(struct iova_domain *iovad) struct iova_rcache *rcache; struct iova_cpu_rcache *cpu_rcache; unsigned int cpu; - int i, j; - for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { + for (int i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { rcache = &iovad->rcaches[i]; + if (!rcache->cpu_rcaches) + break; for_each_possible_cpu(cpu) { cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu); iova_magazine_free(cpu_rcache->loaded); iova_magazine_free(cpu_rcache->prev); } free_percpu(rcache->cpu_rcaches); - for (j = 0; j < rcache->depot_size; ++j) - iova_magazine_free(rcache->depot[j]); + cancel_delayed_work_sync(&rcache->work); + while (rcache->depot) + iova_magazine_free(iova_depot_pop(rcache)); } + + kfree(iovad->rcaches); + iovad->rcaches = NULL; } /* * free all the IOVA ranges cached by a cpu (used when cpu is unplugged) */ -void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad) +static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad) { struct iova_cpu_rcache *cpu_rcache; struct iova_rcache *rcache; @@ -1042,5 +919,92 @@ void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad) } } +/* + * free all the IOVA ranges of global cache + */ +static void free_global_cached_iovas(struct iova_domain *iovad) +{ + struct iova_rcache *rcache; + unsigned long flags; + + for (int i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { + rcache = &iovad->rcaches[i]; + spin_lock_irqsave(&rcache->lock, flags); + while (rcache->depot) { + struct iova_magazine *mag = iova_depot_pop(rcache); + + iova_magazine_free_pfns(mag, iovad); + iova_magazine_free(mag); + } + spin_unlock_irqrestore(&rcache->lock, flags); + } +} + +static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node) +{ + struct iova_domain *iovad; + + iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead); + + free_cpu_cached_iovas(cpu, iovad); + return 0; +} + +int iova_cache_get(void) +{ + int err = -ENOMEM; + + mutex_lock(&iova_cache_mutex); + if (!iova_cache_users) { + iova_cache = kmem_cache_create("iommu_iova", sizeof(struct iova), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!iova_cache) + goto out_err; + + iova_magazine_cache = kmem_cache_create("iommu_iova_magazine", + sizeof(struct iova_magazine), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!iova_magazine_cache) + goto out_err; + + err = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead", + NULL, iova_cpuhp_dead); + if (err) { + pr_err("IOVA: Couldn't register cpuhp handler: %pe\n", ERR_PTR(err)); + goto out_err; + } + } + + iova_cache_users++; + mutex_unlock(&iova_cache_mutex); + + return 0; + +out_err: + kmem_cache_destroy(iova_cache); + kmem_cache_destroy(iova_magazine_cache); + mutex_unlock(&iova_cache_mutex); + return err; +} +EXPORT_SYMBOL_GPL(iova_cache_get); + +void iova_cache_put(void) +{ + mutex_lock(&iova_cache_mutex); + if (WARN_ON(!iova_cache_users)) { + mutex_unlock(&iova_cache_mutex); + return; + } + iova_cache_users--; + if (!iova_cache_users) { + cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD); + kmem_cache_destroy(iova_cache); + kmem_cache_destroy(iova_magazine_cache); + } + mutex_unlock(&iova_cache_mutex); +} +EXPORT_SYMBOL_GPL(iova_cache_put); + MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>"); +MODULE_DESCRIPTION("IOMMU I/O Virtual Address management"); MODULE_LICENSE("GPL"); |
