summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-08-29 20:32:10 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-08-29 20:32:10 -0700
commit6c1b980a7e79e55e951b4b2c47eefebc75071209 (patch)
tree75fadaa5bc0b8bb1b488bd107926cdb6373c3946 /include
parent3d3dfeb3aec7b612d266d500c82054f1fded4980 (diff)
parentd069ed288ac74c24e2b1c294aa9445c80ed6c518 (diff)
Merge tag 'dma-mapping-6.6-2023-08-29' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-maping updates from Christoph Hellwig: - allow dynamic sizing of the swiotlb buffer, to cater for secure virtualization workloads that require all I/O to be bounce buffered (Petr Tesarik) - move a declaration to a header (Arnd Bergmann) - check for memory region overlap in dma-contiguous (Binglei Wang) - remove the somewhat dangerous runtime swiotlb-xen enablement and unexport is_swiotlb_active (Christoph Hellwig, Juergen Gross) - per-node CMA improvements (Yajun Deng) * tag 'dma-mapping-6.6-2023-08-29' of git://git.infradead.org/users/hch/dma-mapping: swiotlb: optimize get_max_slots() swiotlb: move slot allocation explanation comment where it belongs swiotlb: search the software IO TLB only if the device makes use of it swiotlb: allocate a new memory pool when existing pools are full swiotlb: determine potential physical address limit swiotlb: if swiotlb is full, fall back to a transient memory pool swiotlb: add a flag whether SWIOTLB is allowed to grow swiotlb: separate memory pool data from other allocator data swiotlb: add documentation and rename swiotlb_do_find_slots() swiotlb: make io_tlb_default_mem local to swiotlb.c swiotlb: bail out of swiotlb_init_late() if swiotlb is already allocated dma-contiguous: check for memory region overlap dma-contiguous: support numa CMA for specified node dma-contiguous: support per-numa CMA for all architectures dma-mapping: move arch_dma_set_mask() declaration to header swiotlb: unexport is_swiotlb_active x86: always initialize xen-swiotlb when xen-pcifront is enabling xen/pci: add flag for PCI passthrough being possible
Diffstat (limited to 'include')
-rw-r--r--include/linux/device.h10
-rw-r--r--include/linux/dma-map-ops.h12
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/swiotlb.h131
-rw-r--r--include/xen/xen.h6
5 files changed, 132 insertions, 29 deletions
diff --git a/include/linux/device.h b/include/linux/device.h
index bbaeabd04b0d..6fc808d22bfd 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -625,7 +625,10 @@ struct device_physical_location {
* @dma_pools: Dma pools (if dma'ble device).
* @dma_mem: Internal for coherent mem override.
* @cma_area: Contiguous memory area for dma allocations
- * @dma_io_tlb_mem: Pointer to the swiotlb pool used. Not for driver use.
+ * @dma_io_tlb_mem: Software IO TLB allocator. Not for driver use.
+ * @dma_io_tlb_pools: List of transient swiotlb memory pools.
+ * @dma_io_tlb_lock: Protects changes to the list of active pools.
+ * @dma_uses_io_tlb: %true if device has used the software IO TLB.
* @archdata: For arch-specific additions.
* @of_node: Associated device tree node.
* @fwnode: Associated device node supplied by platform firmware.
@@ -732,6 +735,11 @@ struct device {
#ifdef CONFIG_SWIOTLB
struct io_tlb_mem *dma_io_tlb_mem;
#endif
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ struct list_head dma_io_tlb_pools;
+ spinlock_t dma_io_tlb_lock;
+ bool dma_uses_io_tlb;
+#endif
/* arch specific additions */
struct dev_archdata archdata;
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 9bf19b5bf755..f2fc203fb8a1 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -169,12 +169,6 @@ static inline void dma_free_contiguous(struct device *dev, struct page *page,
}
#endif /* CONFIG_DMA_CMA*/
-#ifdef CONFIG_DMA_PERNUMA_CMA
-void dma_pernuma_cma_reserve(void);
-#else
-static inline void dma_pernuma_cma_reserve(void) { }
-#endif /* CONFIG_DMA_PERNUMA_CMA */
-
#ifdef CONFIG_DMA_DECLARE_COHERENT
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size);
@@ -343,6 +337,12 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
+#ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
+void arch_dma_set_mask(struct device *dev, u64 mask);
+#else
+#define arch_dma_set_mask(dev, mask) do { } while (0)
+#endif
+
#ifdef CONFIG_MMU
/*
* Page protection so that devices that can't snoop CPU caches can use the
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index e13050eb9777..f0ccca16a0ac 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -418,6 +418,8 @@ static inline void dma_sync_sgtable_for_device(struct device *dev,
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
+bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
+
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 4e52cd5e0bdc..b4536626f8ff 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/limits.h>
#include <linux/spinlock.h>
+#include <linux/workqueue.h>
struct device;
struct page;
@@ -62,8 +63,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
#ifdef CONFIG_SWIOTLB
/**
- * struct io_tlb_mem - IO TLB Memory Pool Descriptor
- *
+ * struct io_tlb_pool - IO TLB memory pool descriptor
* @start: The start address of the swiotlb memory pool. Used to do a quick
* range check to see if the memory was in fact allocated by this
* API.
@@ -73,19 +73,48 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
* @vaddr: The vaddr of the swiotlb memory pool. The swiotlb memory pool
* may be remapped in the memory encrypted case and store virtual
* address for bounce buffer operation.
- * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
- * @end. For default swiotlb, this is command line adjustable via
- * setup_io_tlb_npages.
- * @list: The free list describing the number of free entries available
- * from each index.
- * @orig_addr: The original address corresponding to a mapped entry.
- * @alloc_size: Size of the allocated buffer.
+ * @nslabs: The number of IO TLB slots between @start and @end. For the
+ * default swiotlb, this can be adjusted with a boot parameter,
+ * see setup_io_tlb_npages().
+ * @late_alloc: %true if allocated using the page allocator.
+ * @nareas: Number of areas in the pool.
+ * @area_nslabs: Number of slots in each area.
+ * @areas: Array of memory area descriptors.
+ * @slots: Array of slot descriptors.
+ * @node: Member of the IO TLB memory pool list.
+ * @rcu: RCU head for swiotlb_dyn_free().
+ * @transient: %true if transient memory pool.
+ */
+struct io_tlb_pool {
+ phys_addr_t start;
+ phys_addr_t end;
+ void *vaddr;
+ unsigned long nslabs;
+ bool late_alloc;
+ unsigned int nareas;
+ unsigned int area_nslabs;
+ struct io_tlb_area *areas;
+ struct io_tlb_slot *slots;
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ struct list_head node;
+ struct rcu_head rcu;
+ bool transient;
+#endif
+};
+
+/**
+ * struct io_tlb_mem - Software IO TLB allocator
+ * @defpool: Default (initial) IO TLB memory pool descriptor.
+ * @pool: IO TLB memory pool descriptor (if not dynamic).
+ * @nslabs: Total number of IO TLB slabs in all pools.
* @debugfs: The dentry to debugfs.
- * @late_alloc: %true if allocated using the page allocator
* @force_bounce: %true if swiotlb bouncing is forced
* @for_alloc: %true if the pool is used for memory allocation
- * @nareas: The area number in the pool.
- * @area_nslabs: The slot number in the area.
+ * @can_grow: %true if more pools can be allocated dynamically.
+ * @phys_limit: Maximum allowed physical address.
+ * @lock: Lock to synchronize changes to the list.
+ * @pools: List of IO TLB memory pool descriptors (if dynamic).
+ * @dyn_alloc: Dynamic IO TLB pool allocation work.
* @total_used: The total number of slots in the pool that are currently used
* across all areas. Used only for calculating used_hiwater in
* debugfs.
@@ -93,30 +122,64 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
* in debugfs.
*/
struct io_tlb_mem {
- phys_addr_t start;
- phys_addr_t end;
- void *vaddr;
+ struct io_tlb_pool defpool;
unsigned long nslabs;
struct dentry *debugfs;
- bool late_alloc;
bool force_bounce;
bool for_alloc;
- unsigned int nareas;
- unsigned int area_nslabs;
- struct io_tlb_area *areas;
- struct io_tlb_slot *slots;
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ bool can_grow;
+ u64 phys_limit;
+ spinlock_t lock;
+ struct list_head pools;
+ struct work_struct dyn_alloc;
+#endif
#ifdef CONFIG_DEBUG_FS
atomic_long_t total_used;
atomic_long_t used_hiwater;
#endif
};
-extern struct io_tlb_mem io_tlb_default_mem;
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+
+struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr);
+
+#else
+
+static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev,
+ phys_addr_t paddr)
+{
+ return &dev->dma_io_tlb_mem->defpool;
+}
+
+#endif
+
+/**
+ * is_swiotlb_buffer() - check if a physical address belongs to a swiotlb
+ * @dev: Device which has mapped the buffer.
+ * @paddr: Physical address within the DMA buffer.
+ *
+ * Check if @paddr points into a bounce buffer.
+ *
+ * Return:
+ * * %true if @paddr points into a bounce buffer
+ * * %false otherwise
+ */
static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
- return mem && paddr >= mem->start && paddr < mem->end;
+ if (!mem)
+ return false;
+
+ if (IS_ENABLED(CONFIG_SWIOTLB_DYNAMIC)) {
+ /* Pairs with smp_wmb() in swiotlb_find_slots() and
+ * swiotlb_dyn_alloc(), which modify the RCU lists.
+ */
+ smp_rmb();
+ return swiotlb_find_pool(dev, paddr);
+ }
+ return paddr >= mem->defpool.start && paddr < mem->defpool.end;
}
static inline bool is_swiotlb_force_bounce(struct device *dev)
@@ -128,13 +191,22 @@ static inline bool is_swiotlb_force_bounce(struct device *dev)
void swiotlb_init(bool addressing_limited, unsigned int flags);
void __init swiotlb_exit(void);
+void swiotlb_dev_init(struct device *dev);
size_t swiotlb_max_mapping_size(struct device *dev);
+bool is_swiotlb_allocated(void);
bool is_swiotlb_active(struct device *dev);
void __init swiotlb_adjust_size(unsigned long size);
+phys_addr_t default_swiotlb_base(void);
+phys_addr_t default_swiotlb_limit(void);
#else
static inline void swiotlb_init(bool addressing_limited, unsigned int flags)
{
}
+
+static inline void swiotlb_dev_init(struct device *dev)
+{
+}
+
static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
{
return false;
@@ -151,6 +223,11 @@ static inline size_t swiotlb_max_mapping_size(struct device *dev)
return SIZE_MAX;
}
+static inline bool is_swiotlb_allocated(void)
+{
+ return false;
+}
+
static inline bool is_swiotlb_active(struct device *dev)
{
return false;
@@ -159,6 +236,16 @@ static inline bool is_swiotlb_active(struct device *dev)
static inline void swiotlb_adjust_size(unsigned long size)
{
}
+
+static inline phys_addr_t default_swiotlb_base(void)
+{
+ return 0;
+}
+
+static inline phys_addr_t default_swiotlb_limit(void)
+{
+ return 0;
+}
#endif /* CONFIG_SWIOTLB */
extern void swiotlb_print_info(void);
diff --git a/include/xen/xen.h b/include/xen/xen.h
index f989162983c3..a1e5b3f18d69 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -29,6 +29,12 @@ extern bool xen_pvh;
extern uint32_t xen_start_flags;
+#ifdef CONFIG_XEN_PV
+extern bool xen_pv_pci_possible;
+#else
+#define xen_pv_pci_possible 0
+#endif
+
#include <xen/interface/hvm/start_info.h>
extern struct hvm_start_info pvh_start_info;
void xen_prepare_pvh(void);