diff options
Diffstat (limited to 'include/linux/swiotlb.h')
| -rw-r--r-- | include/linux/swiotlb.h | 249 |
1 files changed, 181 insertions, 68 deletions
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index f6c3638255d5..3dae0f592063 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -8,16 +8,15 @@ #include <linux/types.h> #include <linux/limits.h> #include <linux/spinlock.h> +#include <linux/workqueue.h> struct device; struct page; struct scatterlist; -enum swiotlb_force { - SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */ - SWIOTLB_FORCE, /* swiotlb=force */ - SWIOTLB_NO_FORCE, /* swiotlb=noforce */ -}; +#define SWIOTLB_VERBOSE (1 << 0) /* verbose initialization */ +#define SWIOTLB_FORCE (1 << 1) /* force bounce buffering */ +#define SWIOTLB_ANY (1 << 2) /* allow any memory for the buffer */ /* * Maximum allowable number of contiguous slabs to map, @@ -36,37 +35,17 @@ enum swiotlb_force { /* default to 64MB */ #define IO_TLB_DEFAULT_SIZE (64UL<<20) -extern void swiotlb_init(int verbose); -int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); unsigned long swiotlb_size_or_default(void); -extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); -extern int swiotlb_late_init_with_default_size(size_t default_size); +void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, + int (*remap)(void *tlb, unsigned long nslabs)); +int swiotlb_init_late(size_t size, gfp_t gfp_mask, + int (*remap)(void *tlb, unsigned long nslabs)); extern void __init swiotlb_update_mem_attributes(void); -phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, - size_t mapping_size, size_t alloc_size, - unsigned int alloc_aligned_mask, enum dma_data_direction dir, - unsigned long attrs); - -extern void swiotlb_tbl_unmap_single(struct device *hwdev, - phys_addr_t tlb_addr, - size_t mapping_size, - enum dma_data_direction dir, - unsigned long attrs); - -void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir); -void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir); -dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, - size_t size, enum dma_data_direction dir, unsigned long attrs); - #ifdef CONFIG_SWIOTLB -extern enum swiotlb_force swiotlb_force; /** - * struct io_tlb_mem - IO TLB Memory Pool Descriptor - * + * struct io_tlb_pool - IO TLB memory pool descriptor * @start: The start address of the swiotlb memory pool. Used to do a quick * range check to see if the memory was in fact allocated by this * API. @@ -76,47 +55,118 @@ extern enum swiotlb_force swiotlb_force; * @vaddr: The vaddr of the swiotlb memory pool. The swiotlb memory pool * may be remapped in the memory encrypted case and store virtual * address for bounce buffer operation. - * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and - * @end. For default swiotlb, this is command line adjustable via - * setup_io_tlb_npages. - * @used: The number of used IO TLB block. - * @list: The free list describing the number of free entries available - * from each index. - * @index: The index to start searching in the next round. - * @orig_addr: The original address corresponding to a mapped entry. - * @alloc_size: Size of the allocated buffer. - * @lock: The lock to protect the above data structures in the map and - * unmap calls. + * @nslabs: The number of IO TLB slots between @start and @end. For the + * default swiotlb, this can be adjusted with a boot parameter, + * see setup_io_tlb_npages(). + * @late_alloc: %true if allocated using the page allocator. + * @nareas: Number of areas in the pool. + * @area_nslabs: Number of slots in each area. + * @areas: Array of memory area descriptors. + * @slots: Array of slot descriptors. + * @node: Member of the IO TLB memory pool list. + * @rcu: RCU head for swiotlb_dyn_free(). + * @transient: %true if transient memory pool. + */ +struct io_tlb_pool { + phys_addr_t start; + phys_addr_t end; + void *vaddr; + unsigned long nslabs; + bool late_alloc; + unsigned int nareas; + unsigned int area_nslabs; + struct io_tlb_area *areas; + struct io_tlb_slot *slots; +#ifdef CONFIG_SWIOTLB_DYNAMIC + struct list_head node; + struct rcu_head rcu; + bool transient; +#endif +}; + +/** + * struct io_tlb_mem - Software IO TLB allocator + * @defpool: Default (initial) IO TLB memory pool descriptor. + * @pool: IO TLB memory pool descriptor (if not dynamic). + * @nslabs: Total number of IO TLB slabs in all pools. * @debugfs: The dentry to debugfs. - * @late_alloc: %true if allocated using the page allocator * @force_bounce: %true if swiotlb bouncing is forced * @for_alloc: %true if the pool is used for memory allocation + * @can_grow: %true if more pools can be allocated dynamically. + * @phys_limit: Maximum allowed physical address. + * @lock: Lock to synchronize changes to the list. + * @pools: List of IO TLB memory pool descriptors (if dynamic). + * @dyn_alloc: Dynamic IO TLB pool allocation work. + * @total_used: The total number of slots in the pool that are currently used + * across all areas. Used only for calculating used_hiwater in + * debugfs. + * @used_hiwater: The high water mark for total_used. Used only for reporting + * in debugfs. + * @transient_nslabs: The total number of slots in all transient pools that + * are currently used across all areas. */ struct io_tlb_mem { - phys_addr_t start; - phys_addr_t end; - void *vaddr; + struct io_tlb_pool defpool; unsigned long nslabs; - unsigned long used; - unsigned int index; - spinlock_t lock; struct dentry *debugfs; - bool late_alloc; bool force_bounce; bool for_alloc; - struct io_tlb_slot { - phys_addr_t orig_addr; - size_t alloc_size; - unsigned int list; - } *slots; +#ifdef CONFIG_SWIOTLB_DYNAMIC + bool can_grow; + u64 phys_limit; + spinlock_t lock; + struct list_head pools; + struct work_struct dyn_alloc; +#endif +#ifdef CONFIG_DEBUG_FS + atomic_long_t total_used; + atomic_long_t used_hiwater; + atomic_long_t transient_nslabs; +#endif }; -extern struct io_tlb_mem io_tlb_default_mem; -static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) +struct io_tlb_pool *__swiotlb_find_pool(struct device *dev, phys_addr_t paddr); + +/** + * swiotlb_find_pool() - find swiotlb pool to which a physical address belongs + * @dev: Device which has mapped the buffer. + * @paddr: Physical address within the DMA buffer. + * + * Find the swiotlb pool that @paddr points into. + * + * Return: + * * pool address if @paddr points into a bounce buffer + * * NULL if @paddr does not point into a bounce buffer. As such, this function + * can be used to determine if @paddr denotes a swiotlb bounce buffer. + */ +static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev, + phys_addr_t paddr) { struct io_tlb_mem *mem = dev->dma_io_tlb_mem; - return mem && paddr >= mem->start && paddr < mem->end; + if (!mem) + return NULL; + +#ifdef CONFIG_SWIOTLB_DYNAMIC + /* + * All SWIOTLB buffer addresses must have been returned by + * swiotlb_tbl_map_single() and passed to a device driver. + * If a SWIOTLB address is checked on another CPU, then it was + * presumably loaded by the device driver from an unspecified private + * data structure. Make sure that this load is ordered before reading + * dev->dma_uses_io_tlb here and mem->pools in __swiotlb_find_pool(). + * + * This barrier pairs with smp_mb() in swiotlb_find_slots(). + */ + smp_rmb(); + if (READ_ONCE(dev->dma_uses_io_tlb)) + return __swiotlb_find_pool(dev, paddr); +#else + if (paddr >= mem->defpool.start && paddr < mem->defpool.end) + return &mem->defpool; +#endif + + return NULL; } static inline bool is_swiotlb_force_bounce(struct device *dev) @@ -126,16 +176,28 @@ static inline bool is_swiotlb_force_bounce(struct device *dev) return mem && mem->force_bounce; } +void swiotlb_init(bool addressing_limited, unsigned int flags); void __init swiotlb_exit(void); -unsigned int swiotlb_max_segment(void); +void swiotlb_dev_init(struct device *dev); size_t swiotlb_max_mapping_size(struct device *dev); +bool is_swiotlb_allocated(void); bool is_swiotlb_active(struct device *dev); void __init swiotlb_adjust_size(unsigned long size); +phys_addr_t default_swiotlb_base(void); +phys_addr_t default_swiotlb_limit(void); #else -#define swiotlb_force SWIOTLB_NO_FORCE -static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) +static inline void swiotlb_init(bool addressing_limited, unsigned int flags) { - return false; +} + +static inline void swiotlb_dev_init(struct device *dev) +{ +} + +static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev, + phys_addr_t paddr) +{ + return NULL; } static inline bool is_swiotlb_force_bounce(struct device *dev) { @@ -144,15 +206,16 @@ static inline bool is_swiotlb_force_bounce(struct device *dev) static inline void swiotlb_exit(void) { } -static inline unsigned int swiotlb_max_segment(void) -{ - return 0; -} static inline size_t swiotlb_max_mapping_size(struct device *dev) { return SIZE_MAX; } +static inline bool is_swiotlb_allocated(void) +{ + return false; +} + static inline bool is_swiotlb_active(struct device *dev) { return false; @@ -161,10 +224,62 @@ static inline bool is_swiotlb_active(struct device *dev) static inline void swiotlb_adjust_size(unsigned long size) { } + +static inline phys_addr_t default_swiotlb_base(void) +{ + return 0; +} + +static inline phys_addr_t default_swiotlb_limit(void) +{ + return 0; +} #endif /* CONFIG_SWIOTLB */ +phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, + size_t mapping_size, unsigned int alloc_aligned_mask, + enum dma_data_direction dir, unsigned long attrs); +dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs); + +void __swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, + size_t mapping_size, enum dma_data_direction dir, + unsigned long attrs, struct io_tlb_pool *pool); +static inline void swiotlb_tbl_unmap_single(struct device *dev, + phys_addr_t addr, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + struct io_tlb_pool *pool = swiotlb_find_pool(dev, addr); + + if (unlikely(pool)) + __swiotlb_tbl_unmap_single(dev, addr, size, dir, attrs, pool); +} + +void __swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir, + struct io_tlb_pool *pool); +static inline void swiotlb_sync_single_for_device(struct device *dev, + phys_addr_t addr, size_t size, enum dma_data_direction dir) +{ + struct io_tlb_pool *pool = swiotlb_find_pool(dev, addr); + + if (unlikely(pool)) + __swiotlb_sync_single_for_device(dev, addr, size, dir, pool); +} + +void __swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir, + struct io_tlb_pool *pool); +static inline void swiotlb_sync_single_for_cpu(struct device *dev, + phys_addr_t addr, size_t size, enum dma_data_direction dir) +{ + struct io_tlb_pool *pool = swiotlb_find_pool(dev, addr); + + if (unlikely(pool)) + __swiotlb_sync_single_for_cpu(dev, addr, size, dir, pool); +} + extern void swiotlb_print_info(void); -extern void swiotlb_set_max_segment(unsigned int); #ifdef CONFIG_DMA_RESTRICTED_POOL struct page *swiotlb_alloc(struct device *dev, size_t size); @@ -190,6 +305,4 @@ static inline bool is_swiotlb_for_alloc(struct device *dev) } #endif /* CONFIG_DMA_RESTRICTED_POOL */ -extern phys_addr_t swiotlb_unencrypted_base; - #endif /* __LINUX_SWIOTLB_H */ |
