diff options
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r-- | drivers/dma-buf/Kconfig | 1 | ||||
-rw-r--r-- | drivers/dma-buf/dma-fence-chain.c | 7 | ||||
-rw-r--r-- | drivers/dma-buf/dma-fence.c | 167 | ||||
-rw-r--r-- | drivers/dma-buf/dma-resv.c | 12 | ||||
-rw-r--r-- | drivers/dma-buf/heaps/Kconfig | 10 | ||||
-rw-r--r-- | drivers/dma-buf/heaps/cma_heap.c | 36 | ||||
-rw-r--r-- | drivers/dma-buf/heaps/system_heap.c | 43 | ||||
-rw-r--r-- | drivers/dma-buf/sw_sync.c | 2 | ||||
-rw-r--r-- | drivers/dma-buf/sync_file.c | 24 | ||||
-rw-r--r-- | drivers/dma-buf/udmabuf.c | 23 |
10 files changed, 240 insertions, 85 deletions
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index fee04fdb0822..b46eb8a552d7 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -36,7 +36,6 @@ config UDMABUF depends on DMA_SHARED_BUFFER depends on MEMFD_CREATE || COMPILE_TEST depends on MMU - select VMAP_PFN help A driver to let userspace turn memfd regions into dma-bufs. Qemu can use this to create host dmabufs for guest framebuffers. diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c index 9663ba1bb6ac..a8a90acf4f34 100644 --- a/drivers/dma-buf/dma-fence-chain.c +++ b/drivers/dma-buf/dma-fence-chain.c @@ -218,7 +218,6 @@ static void dma_fence_chain_set_deadline(struct dma_fence *fence, } const struct dma_fence_ops dma_fence_chain_ops = { - .use_64bit_seqno = true, .get_driver_name = dma_fence_chain_get_driver_name, .get_timeline_name = dma_fence_chain_get_timeline_name, .enable_signaling = dma_fence_chain_enable_signaling, @@ -252,7 +251,7 @@ void dma_fence_chain_init(struct dma_fence_chain *chain, chain->prev_seqno = 0; /* Try to reuse the context of the previous chain node. */ - if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) { + if (prev_chain && __dma_fence_is_later(prev, seqno, prev->seqno)) { context = prev->context; chain->prev_seqno = prev->seqno; } else { @@ -262,8 +261,8 @@ void dma_fence_chain_init(struct dma_fence_chain *chain, seqno = max(prev->seqno, seqno); } - dma_fence_init(&chain->base, &dma_fence_chain_ops, - &chain->lock, context, seqno); + dma_fence_init64(&chain->base, &dma_fence_chain_ops, &chain->lock, + context, seqno); /* * Chaining dma_fence_chain container together is only allowed through diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index f0cdd3e99d36..3f78c56b58dc 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -511,12 +511,20 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) dma_fence_enable_sw_signaling(fence); - trace_dma_fence_wait_start(fence); + if (trace_dma_fence_wait_start_enabled()) { + rcu_read_lock(); + trace_dma_fence_wait_start(fence); + rcu_read_unlock(); + } if (fence->ops->wait) ret = fence->ops->wait(fence, intr, timeout); else ret = dma_fence_default_wait(fence, intr, timeout); - trace_dma_fence_wait_end(fence); + if (trace_dma_fence_wait_end_enabled()) { + rcu_read_lock(); + trace_dma_fence_wait_end(fence); + rcu_read_unlock(); + } return ret; } EXPORT_SYMBOL(dma_fence_wait_timeout); @@ -533,16 +541,23 @@ void dma_fence_release(struct kref *kref) struct dma_fence *fence = container_of(kref, struct dma_fence, refcount); + rcu_read_lock(); trace_dma_fence_destroy(fence); - if (WARN(!list_empty(&fence->cb_list) && - !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags), - "Fence %s:%s:%llx:%llx released with pending signals!\n", - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), - fence->context, fence->seqno)) { + if (!list_empty(&fence->cb_list) && + !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + const char __rcu *timeline; + const char __rcu *driver; unsigned long flags; + driver = dma_fence_driver_name(fence); + timeline = dma_fence_timeline_name(fence); + + WARN(1, + "Fence %s:%s:%llx:%llx released with pending signals!\n", + rcu_dereference(driver), rcu_dereference(timeline), + fence->context, fence->seqno); + /* * Failed to signal before release, likely a refcounting issue. * @@ -556,6 +571,8 @@ void dma_fence_release(struct kref *kref) spin_unlock_irqrestore(fence->lock, flags); } + rcu_read_unlock(); + if (fence->ops->release) fence->ops->release(fence); else @@ -982,13 +999,43 @@ EXPORT_SYMBOL(dma_fence_set_deadline); */ void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq) { + const char __rcu *timeline; + const char __rcu *driver; + + rcu_read_lock(); + + timeline = dma_fence_timeline_name(fence); + driver = dma_fence_driver_name(fence); + seq_printf(seq, "%s %s seq %llu %ssignalled\n", - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), fence->seqno, + rcu_dereference(driver), + rcu_dereference(timeline), + fence->seqno, dma_fence_is_signaled(fence) ? "" : "un"); + + rcu_read_unlock(); } EXPORT_SYMBOL(dma_fence_describe); +static void +__dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, + spinlock_t *lock, u64 context, u64 seqno, unsigned long flags) +{ + BUG_ON(!lock); + BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); + + kref_init(&fence->refcount); + fence->ops = ops; + INIT_LIST_HEAD(&fence->cb_list); + fence->lock = lock; + fence->context = context; + fence->seqno = seqno; + fence->flags = flags; + fence->error = 0; + + trace_dma_fence_init(fence); +} + /** * dma_fence_init - Initialize a custom fence. * @fence: the fence to initialize @@ -1008,18 +1055,94 @@ void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, spinlock_t *lock, u64 context, u64 seqno) { - BUG_ON(!lock); - BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); + __dma_fence_init(fence, ops, lock, context, seqno, 0UL); +} +EXPORT_SYMBOL(dma_fence_init); - kref_init(&fence->refcount); - fence->ops = ops; - INIT_LIST_HEAD(&fence->cb_list); - fence->lock = lock; - fence->context = context; - fence->seqno = seqno; - fence->flags = 0UL; - fence->error = 0; +/** + * dma_fence_init64 - Initialize a custom fence with 64-bit seqno support. + * @fence: the fence to initialize + * @ops: the dma_fence_ops for operations on this fence + * @lock: the irqsafe spinlock to use for locking this fence + * @context: the execution context this fence is run on + * @seqno: a linear increasing sequence number for this context + * + * Initializes an allocated fence, the caller doesn't have to keep its + * refcount after committing with this fence, but it will need to hold a + * refcount again if &dma_fence_ops.enable_signaling gets called. + * + * Context and seqno are used for easy comparison between fences, allowing + * to check which fence is later by simply using dma_fence_later(). + */ +void +dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops, + spinlock_t *lock, u64 context, u64 seqno) +{ + __dma_fence_init(fence, ops, lock, context, seqno, + BIT(DMA_FENCE_FLAG_SEQNO64_BIT)); +} +EXPORT_SYMBOL(dma_fence_init64); - trace_dma_fence_init(fence); +/** + * dma_fence_driver_name - Access the driver name + * @fence: the fence to query + * + * Returns a driver name backing the dma-fence implementation. + * + * IMPORTANT CONSIDERATION: + * Dma-fence contract stipulates that access to driver provided data (data not + * directly embedded into the object itself), such as the &dma_fence.lock and + * memory potentially accessed by the &dma_fence.ops functions, is forbidden + * after the fence has been signalled. Drivers are allowed to free that data, + * and some do. + * + * To allow safe access drivers are mandated to guarantee a RCU grace period + * between signalling the fence and freeing said data. + * + * As such access to the driver name is only valid inside a RCU locked section. + * The pointer MUST be both queried and USED ONLY WITHIN a SINGLE block guarded + * by the &rcu_read_lock and &rcu_read_unlock pair. + */ +const char __rcu *dma_fence_driver_name(struct dma_fence *fence) +{ + RCU_LOCKDEP_WARN(!rcu_read_lock_held(), + "RCU protection is required for safe access to returned string"); + + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return fence->ops->get_driver_name(fence); + else + return "detached-driver"; } -EXPORT_SYMBOL(dma_fence_init); +EXPORT_SYMBOL(dma_fence_driver_name); + +/** + * dma_fence_timeline_name - Access the timeline name + * @fence: the fence to query + * + * Returns a timeline name provided by the dma-fence implementation. + * + * IMPORTANT CONSIDERATION: + * Dma-fence contract stipulates that access to driver provided data (data not + * directly embedded into the object itself), such as the &dma_fence.lock and + * memory potentially accessed by the &dma_fence.ops functions, is forbidden + * after the fence has been signalled. Drivers are allowed to free that data, + * and some do. + * + * To allow safe access drivers are mandated to guarantee a RCU grace period + * between signalling the fence and freeing said data. + * + * As such access to the driver name is only valid inside a RCU locked section. + * The pointer MUST be both queried and USED ONLY WITHIN a SINGLE block guarded + * by the &rcu_read_lock and &rcu_read_unlock pair. + */ +const char __rcu *dma_fence_timeline_name(struct dma_fence *fence) +{ + RCU_LOCKDEP_WARN(!rcu_read_lock_held(), + "RCU protection is required for safe access to returned string"); + + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return fence->ops->get_driver_name(fence); + else + return "signaled-timeline"; +} +EXPORT_SYMBOL(dma_fence_timeline_name); diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index b1ef4546346d..bea3e9858aca 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -685,11 +685,13 @@ long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, dma_resv_iter_begin(&cursor, obj, usage); dma_resv_for_each_fence_unlocked(&cursor, fence) { - ret = dma_fence_wait_timeout(fence, intr, ret); - if (ret <= 0) { - dma_resv_iter_end(&cursor); - return ret; - } + ret = dma_fence_wait_timeout(fence, intr, timeout); + if (ret <= 0) + break; + + /* Even for zero timeout the return value is 1 */ + if (timeout) + timeout = ret; } dma_resv_iter_end(&cursor); diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig index a5eef06c4226..bb369b38b001 100644 --- a/drivers/dma-buf/heaps/Kconfig +++ b/drivers/dma-buf/heaps/Kconfig @@ -12,3 +12,13 @@ config DMABUF_HEAPS_CMA Choose this option to enable dma-buf CMA heap. This heap is backed by the Contiguous Memory Allocator (CMA). If your system has these regions, you should say Y here. + +config DMABUF_HEAPS_CMA_LEGACY + bool "Legacy DMA-BUF CMA Heap" + default y + depends on DMABUF_HEAPS_CMA + help + Add a duplicate CMA-backed dma-buf heap with legacy naming derived + from the CMA area's devicetree node, or "reserved" if the area is not + defined in the devicetree. This uses the same underlying allocator as + CONFIG_DMABUF_HEAPS_CMA. diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index 9512d050563a..0df007111975 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -9,6 +9,9 @@ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ * Andrew F. Davis <afd@ti.com> */ + +#define pr_fmt(fmt) "cma_heap: " fmt + #include <linux/cma.h> #include <linux/dma-buf.h> #include <linux/dma-heap.h> @@ -22,6 +25,7 @@ #include <linux/slab.h> #include <linux/vmalloc.h> +#define DEFAULT_CMA_NAME "default_cma_region" struct cma_heap { struct dma_heap *heap; @@ -366,17 +370,17 @@ static const struct dma_heap_ops cma_heap_ops = { .allocate = cma_heap_allocate, }; -static int __init __add_cma_heap(struct cma *cma, void *data) +static int __init __add_cma_heap(struct cma *cma, const char *name) { - struct cma_heap *cma_heap; struct dma_heap_export_info exp_info; + struct cma_heap *cma_heap; cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); if (!cma_heap) return -ENOMEM; cma_heap->cma = cma; - exp_info.name = cma_get_name(cma); + exp_info.name = name; exp_info.ops = &cma_heap_ops; exp_info.priv = cma_heap; @@ -394,12 +398,30 @@ static int __init __add_cma_heap(struct cma *cma, void *data) static int __init add_default_cma_heap(void) { struct cma *default_cma = dev_get_cma_area(NULL); - int ret = 0; + const char *legacy_cma_name; + int ret; - if (default_cma) - ret = __add_cma_heap(default_cma, NULL); + if (!default_cma) + return 0; - return ret; + ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME); + if (ret) + return ret; + + if (IS_ENABLED(CONFIG_DMABUF_HEAPS_CMA_LEGACY)) { + legacy_cma_name = cma_get_name(default_cma); + if (!strcmp(legacy_cma_name, DEFAULT_CMA_NAME)) { + pr_warn("legacy name and default name are the same, skipping legacy heap\n"); + return 0; + } + + ret = __add_cma_heap(default_cma, legacy_cma_name); + if (ret) + pr_warn("failed to add legacy heap: %pe\n", + ERR_PTR(ret)); + } + + return 0; } module_init(add_default_cma_heap); MODULE_DESCRIPTION("DMA-BUF CMA Heap"); diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index 82b1b714300d..bbe7881f1360 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -33,7 +33,7 @@ struct system_heap_buffer { struct dma_heap_attachment { struct device *dev; - struct sg_table *table; + struct sg_table table; struct list_head list; bool mapped; }; @@ -52,29 +52,22 @@ static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP}; static const unsigned int orders[] = {8, 4, 0}; #define NUM_ORDERS ARRAY_SIZE(orders) -static struct sg_table *dup_sg_table(struct sg_table *table) +static int dup_sg_table(struct sg_table *from, struct sg_table *to) { - struct sg_table *new_table; - int ret, i; struct scatterlist *sg, *new_sg; + int ret, i; - new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); - if (!new_table) - return ERR_PTR(-ENOMEM); - - ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL); - if (ret) { - kfree(new_table); - return ERR_PTR(-ENOMEM); - } + ret = sg_alloc_table(to, from->orig_nents, GFP_KERNEL); + if (ret) + return ret; - new_sg = new_table->sgl; - for_each_sgtable_sg(table, sg, i) { + new_sg = to->sgl; + for_each_sgtable_sg(from, sg, i) { sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset); new_sg = sg_next(new_sg); } - return new_table; + return 0; } static int system_heap_attach(struct dma_buf *dmabuf, @@ -82,19 +75,18 @@ static int system_heap_attach(struct dma_buf *dmabuf, { struct system_heap_buffer *buffer = dmabuf->priv; struct dma_heap_attachment *a; - struct sg_table *table; + int ret; a = kzalloc(sizeof(*a), GFP_KERNEL); if (!a) return -ENOMEM; - table = dup_sg_table(&buffer->sg_table); - if (IS_ERR(table)) { + ret = dup_sg_table(&buffer->sg_table, &a->table); + if (ret) { kfree(a); - return -ENOMEM; + return ret; } - a->table = table; a->dev = attachment->dev; INIT_LIST_HEAD(&a->list); a->mapped = false; @@ -118,8 +110,7 @@ static void system_heap_detach(struct dma_buf *dmabuf, list_del(&a->list); mutex_unlock(&buffer->lock); - sg_free_table(a->table); - kfree(a->table); + sg_free_table(&a->table); kfree(a); } @@ -127,7 +118,7 @@ static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attac enum dma_data_direction direction) { struct dma_heap_attachment *a = attachment->priv; - struct sg_table *table = a->table; + struct sg_table *table = &a->table; int ret; ret = dma_map_sgtable(attachment->dev, table, direction, 0); @@ -162,7 +153,7 @@ static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, list_for_each_entry(a, &buffer->attachments, list) { if (!a->mapped) continue; - dma_sync_sgtable_for_cpu(a->dev, a->table, direction); + dma_sync_sgtable_for_cpu(a->dev, &a->table, direction); } mutex_unlock(&buffer->lock); @@ -183,7 +174,7 @@ static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, list_for_each_entry(a, &buffer->attachments, list) { if (!a->mapped) continue; - dma_sync_sgtable_for_device(a->dev, a->table, direction); + dma_sync_sgtable_for_device(a->dev, &a->table, direction); } mutex_unlock(&buffer->lock); diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 4f27ee93a00c..3c20f1d31cf5 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -170,7 +170,7 @@ static bool timeline_fence_signaled(struct dma_fence *fence) { struct sync_timeline *parent = dma_fence_parent(fence); - return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops); + return !__dma_fence_is_later(fence, fence->seqno, parent->value); } static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline) diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index d9b1c1b2a72b..747e377fb954 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -135,12 +135,18 @@ char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len) strscpy(buf, sync_file->user_name, len); } else { struct dma_fence *fence = sync_file->fence; + const char __rcu *timeline; + const char __rcu *driver; + rcu_read_lock(); + driver = dma_fence_driver_name(fence); + timeline = dma_fence_timeline_name(fence); snprintf(buf, len, "%s-%s%llu-%lld", - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), + rcu_dereference(driver), + rcu_dereference(timeline), fence->context, fence->seqno); + rcu_read_unlock(); } return buf; @@ -262,9 +268,17 @@ err_put_fd: static int sync_fill_fence_info(struct dma_fence *fence, struct sync_fence_info *info) { - strscpy(info->obj_name, fence->ops->get_timeline_name(fence), + const char __rcu *timeline; + const char __rcu *driver; + + rcu_read_lock(); + + driver = dma_fence_driver_name(fence); + timeline = dma_fence_timeline_name(fence); + + strscpy(info->obj_name, rcu_dereference(timeline), sizeof(info->obj_name)); - strscpy(info->driver_name, fence->ops->get_driver_name(fence), + strscpy(info->driver_name, rcu_dereference(driver), sizeof(info->driver_name)); info->status = dma_fence_get_status(fence); @@ -273,6 +287,8 @@ static int sync_fill_fence_info(struct dma_fence *fence, ktime_to_ns(dma_fence_timestamp(fence)) : ktime_set(0, 0); + rcu_read_unlock(); + return info->status; } diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index c9d0c68d2fcb..40399c26e6be 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -109,29 +109,22 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma) static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map) { struct udmabuf *ubuf = buf->priv; - unsigned long *pfns; + struct page **pages; void *vaddr; pgoff_t pg; dma_resv_assert_held(buf->resv); - /** - * HVO may free tail pages, so just use pfn to map each folio - * into vmalloc area. - */ - pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL); - if (!pfns) + pages = kvmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL); + if (!pages) return -ENOMEM; - for (pg = 0; pg < ubuf->pagecount; pg++) { - unsigned long pfn = folio_pfn(ubuf->folios[pg]); - - pfn += ubuf->offsets[pg] >> PAGE_SHIFT; - pfns[pg] = pfn; - } + for (pg = 0; pg < ubuf->pagecount; pg++) + pages[pg] = folio_page(ubuf->folios[pg], + ubuf->offsets[pg] >> PAGE_SHIFT); - vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL); - kvfree(pfns); + vaddr = vm_map_ram(pages, ubuf->pagecount, -1); + kvfree(pages); if (!vaddr) return -EINVAL; |