From b2a8116e25923643e9613ac5b65dd6e78dc5ee77 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 4 Nov 2019 18:37:59 +0100 Subject: dma_resv: prime lockdep annotations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Full audit of everyone: - i915, radeon, amdgpu should be clean per their maintainers. - vram helpers should be fine, they don't do command submission, so really no business holding struct_mutex while doing copy_*_user. But I haven't checked them all. - panfrost seems to dma_resv_lock only in panfrost_job_push, which looks clean. - v3d holds dma_resv locks in the tail of its v3d_submit_cl_ioctl(), copying from/to userspace happens all in v3d_lookup_bos which is outside of the critical section. - vmwgfx has a bunch of ioctls that do their own copy_*_user: - vmw_execbuf_process: First this does some copies in vmw_execbuf_cmdbuf() and also in the vmw_execbuf_process() itself. Then comes the usual ttm reserve/validate sequence, then actual submission/fencing, then unreserving, and finally some more copy_to_user in vmw_execbuf_copy_fence_user. Glossing over tons of details, but looks all safe. - vmw_fence_event_ioctl: No ttm_reserve/dma_resv_lock anywhere to be seen, seems to only create a fence and copy it out. - a pile of smaller ioctl in vmwgfx_ioctl.c, no reservations to be found there. Summary: vmwgfx seems to be fine too. - virtio: There's virtio_gpu_execbuffer_ioctl, which does all the copying from userspace before even looking up objects through their handles, so safe. Plus the getparam/getcaps ioctl, also both safe. - qxl only has qxl_execbuffer_ioctl, which calls into qxl_process_single_command. There's a lovely comment before the __copy_from_user_inatomic that the slowpath should be copied from i915, but I guess that never happened. Try not to be unlucky and get your CS data evicted between when it's written and the kernel tries to read it. The only other copy_from_user is for relocs, but those are done before qxl_release_reserve_list(), which seems to be the only thing reserving buffers (in the ttm/dma_resv sense) in that code. So looks safe. - A debugfs file in nouveau_debugfs_pstate_set() and the usif ioctl in usif_ioctl() look safe. nouveau_gem_ioctl_pushbuf() otoh breaks this everywhere and needs to be fixed up. v2: Thomas pointed at that vmwgfx calls dma_resv_init while it holds a dma_resv lock of a different object already. Christian mentioned that ttm core does this too for ghost objects. intel-gfx-ci highlighted that i915 has similar issues. Unfortunately we can't do this in the usual module init functions, because kernel threads don't have an ->mm - we have to wait around for some user thread to do this. Solution is to spawn a worker (but only once). It's horrible, but it works. v3: We can allocate mm! (Chris). Horrible worker hack out, clean initcall solution in. v4: Annotate with __init (Rob Herring) Cc: Rob Herring Cc: Alex Deucher Cc: Christian König Cc: Chris Wilson Cc: Thomas Zimmermann Cc: Rob Herring Cc: Tomeu Vizoso Cc: Eric Anholt Cc: Dave Airlie Cc: Gerd Hoffmann Cc: Ben Skeggs Cc: "VMware Graphics" Cc: Thomas Hellstrom Reviewed-by: Thomas Hellstrom Reviewed-by: Christian König Reviewed-by: Chris Wilson Tested-by: Chris Wilson Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20191104173801.2972-1-daniel.vetter@ffwll.ch --- drivers/dma-buf/dma-resv.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'drivers/dma-buf') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 709002515550..a05ff542be22 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -34,6 +34,7 @@ #include #include +#include /** * DOC: Reservation Object Overview @@ -95,6 +96,29 @@ static void dma_resv_list_free(struct dma_resv_list *list) kfree_rcu(list, rcu); } +#if IS_ENABLED(CONFIG_LOCKDEP) +static void __init dma_resv_lockdep(void) +{ + struct mm_struct *mm = mm_alloc(); + struct dma_resv obj; + + if (!mm) + return; + + dma_resv_init(&obj); + + down_read(&mm->mmap_sem); + ww_mutex_lock(&obj.lock, NULL); + fs_reclaim_acquire(GFP_KERNEL); + fs_reclaim_release(GFP_KERNEL); + ww_mutex_unlock(&obj.lock); + up_read(&mm->mmap_sem); + + mmput(mm); +} +subsys_initcall(dma_resv_lockdep); +#endif + /** * dma_resv_init - initialize a reservation object * @obj: the reservation object -- cgit From ffbbaa7420f7a7ee6d547cd9adc286fe2e3753e0 Mon Sep 17 00:00:00 2001 From: Steven Price Date: Mon, 11 Nov 2019 13:11:20 +0000 Subject: dma_resv: prime lockdep annotations From d07ea81611ed6e4fb8cc290f42d23dbcca2da2f8 Mon Sep 17 00:00:00 2001 From: Steven Price Date: Mon, 11 Nov 2019 13:07:19 +0000 Subject: [PATCH] dma_resv: Correct return type of dma_resv_lockdep() subsys_initcall() expects a function which returns 'int'. Fix dma_resv_lockdep() so it returns an 'int' error code. Fixes: b2a8116e2592 ("dma_resv: prime lockdep annotations") Signed-off-by: Steven Price Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/c0a0c70d-e6fe-1103-2888-1ce1425f4a5d@arm.com --- drivers/dma-buf/dma-resv.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/dma-buf') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index a05ff542be22..9918a6e5cf91 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -97,13 +97,13 @@ static void dma_resv_list_free(struct dma_resv_list *list) } #if IS_ENABLED(CONFIG_LOCKDEP) -static void __init dma_resv_lockdep(void) +static int __init dma_resv_lockdep(void) { struct mm_struct *mm = mm_alloc(); struct dma_resv obj; if (!mm) - return; + return -ENOMEM; dma_resv_init(&obj); @@ -115,6 +115,8 @@ static void __init dma_resv_lockdep(void) up_read(&mm->mmap_sem); mmput(mm); + + return 0; } subsys_initcall(dma_resv_lockdep); #endif -- cgit From fedf7a441fe89d17907218f6cd53975a9a929837 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 19 Nov 2019 22:08:43 +0100 Subject: dma-resv: Also prime acquire ctx for lockdep MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Semnatically it really doesn't matter where we grab the ticket. But since the ticket is a fake lockdep lock, it matters for lockdep validation purposes. This means stuff like grabbing a ticket and then doing copy_from/to_user isn't allowed anymore. This is a changed compared to the current ttm fault handler, which doesn't bother with having a full reservation. Since I'm looking into fixing the TODO entry in ttm_mem_evict_wait_busy() I think that'll have to change sooner or later anyway, better get started. A bit more context on why I'm looking into this: For backwards compat with existing i915 gem code I think we'll have to do full slowpath locking in the i915 equivalent of the eviction code. And with dynamic dma-buf that will leak across drivers, so another thing we need to standardize and make sure it's done the same way everyway. Unfortunately this means another full audit of all drivers: - gem helpers: acquire_init is done right before taking locks, so no problem. Same for acquire_fini and unlocking, which means nothing that's not already covered by the dma_resv_lock rules will be caught with this extension here to the acquire_ctx. - etnaviv: An absolute massive amount of code is run between the acquire_init and the first lock acquisition in submit_lock_objects. But nothing that would touch user memory and could cause a fault. Furthermore nothing that uses the ticket, so even if I missed something, it would be easy to fix by pushing the acquire_init right before the first use. Similar on the unlock/acquire_fini side. - i915: Right now (and this will likely change a lot rsn) the acquire ctx and actual locks are right next to each another. No problem. - msm has a problem: submit_create calls acquire_init, but then submit_lookup_objects() has a bunch of copy_from_user to do the object lookups. That's the only thing before submit_lock_objects call dma_resv_lock(). Despite all the copypasta to etnaviv, etnaviv does not have this issue since it copies all the userspace structs earlier. submit_cleanup does not have any such issues. With the prep patch to pull out the acquire_ctx and reorder it msm is going to be safe too. - nouveau: acquire_init is right next to ttm_bo_reserve, so all good. Similar on the acquire_fini/ttm_bo_unreserve side. - ttm execbuf utils: acquire context and locking are even in the same functions here (one function to reserve everything, the other to unreserve), so all good. - vc4: Another case where acquire context and locking are handled in the same functions (one function to lock everything, the other to unlock). Cc: Maarten Lankhorst Cc: Chris Wilson Cc: Christian König Cc: Sumit Semwal Cc: linux-media@vger.kernel.org Cc: linaro-mm-sig@lists.linaro.org Cc: Huang Rui Cc: Eric Anholt Cc: Ben Skeggs Cc: Alex Deucher Cc: Rob Herring Cc: Lucas Stach Cc: Russell King Cc: Christian Gmeiner Cc: Rob Clark Cc: Sean Paul Acked-by: Christian König Reviewed-by: Maarten Lankhorst Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20191119210844.16947-3-daniel.vetter@ffwll.ch --- drivers/dma-buf/dma-resv.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/dma-buf') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 9918a6e5cf91..4264e64788c4 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -100,7 +100,9 @@ static void dma_resv_list_free(struct dma_resv_list *list) static int __init dma_resv_lockdep(void) { struct mm_struct *mm = mm_alloc(); + struct ww_acquire_ctx ctx; struct dma_resv obj; + int ret; if (!mm) return -ENOMEM; @@ -108,10 +110,14 @@ static int __init dma_resv_lockdep(void) dma_resv_init(&obj); down_read(&mm->mmap_sem); - ww_mutex_lock(&obj.lock, NULL); + ww_acquire_init(&ctx, &reservation_ww_class); + ret = dma_resv_lock(&obj, &ctx); + if (ret == -EDEADLK) + dma_resv_lock_slow(&obj, &ctx); fs_reclaim_acquire(GFP_KERNEL); fs_reclaim_release(GFP_KERNEL); ww_mutex_unlock(&obj.lock); + ww_acquire_fini(&ctx); up_read(&mm->mmap_sem); mmput(mm); -- cgit From 7f0de8d80816d9620e995cf98acf4b6cd2d7c230 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 18 Nov 2019 11:35:30 +0100 Subject: dma-buf: Drop dma_buf_k(un)map It's unused. 10 years ago, back when 32bit was still fairly common and trying to not exhaust vmalloc space sounded like a worthwhile goal, adding these to dma_buf made sense. Reality is that they simply never caught on, and nowadays everyone who needs plenty of buffers will run in 64bit mode anyway. Also update the docs in this area to adjust them to reality. The actual hooks in dma_buf_ops will be removed once all the implementations are gone. Acked-by: Sumit Semwal Signed-off-by: Daniel Vetter Cc: Sumit Semwal Cc: linux-media@vger.kernel.org Cc: linaro-mm-sig@lists.linaro.org Link: https://patchwork.freedesktop.org/patch/msgid/20191118103536.17675-10-daniel.vetter@ffwll.ch --- drivers/dma-buf/dma-buf.c | 63 +++-------------------------------------------- 1 file changed, 3 insertions(+), 60 deletions(-) (limited to 'drivers/dma-buf') diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index d377b4ca66bf..97988ce1d2dc 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -880,29 +880,9 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access() * access. * - * To support dma_buf objects residing in highmem cpu access is page-based - * using an api similar to kmap. Accessing a dma_buf is done in aligned chunks - * of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which - * returns a pointer in kernel virtual address space. Afterwards the chunk - * needs to be unmapped again. There is no limit on how often a given chunk - * can be mapped and unmapped, i.e. the importer does not need to call - * begin_cpu_access again before mapping the same chunk again. - * - * Interfaces:: - * void \*dma_buf_kmap(struct dma_buf \*, unsigned long); - * void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*); - * - * Implementing the functions is optional for exporters and for importers all - * the restrictions of using kmap apply. - * - * dma_buf kmap calls outside of the range specified in begin_cpu_access are - * undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on - * the partial chunks at the beginning and end but may return stale or bogus - * data outside of the range (in these partial chunks). - * - * For some cases the overhead of kmap can be too high, a vmap interface - * is introduced. This interface should be used very carefully, as vmalloc - * space is a limited resources on many architectures. + * Since for most kernel internal dma-buf accesses need the entire buffer, a + * vmap interface is introduced. Note that on very old 32-bit architectures + * vmalloc space might be limited and result in vmap calls failing. * * Interfaces:: * void \*dma_buf_vmap(struct dma_buf \*dmabuf) @@ -1052,43 +1032,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf, } EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); -/** - * dma_buf_kmap - Map a page of the buffer object into kernel address space. The - * same restrictions as for kmap and friends apply. - * @dmabuf: [in] buffer to map page from. - * @page_num: [in] page in PAGE_SIZE units to map. - * - * This call must always succeed, any necessary preparations that might fail - * need to be done in begin_cpu_access. - */ -void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num) -{ - WARN_ON(!dmabuf); - - if (!dmabuf->ops->map) - return NULL; - return dmabuf->ops->map(dmabuf, page_num); -} -EXPORT_SYMBOL_GPL(dma_buf_kmap); - -/** - * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap. - * @dmabuf: [in] buffer to unmap page from. - * @page_num: [in] page in PAGE_SIZE units to unmap. - * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap. - * - * This call must always succeed. - */ -void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num, - void *vaddr) -{ - WARN_ON(!dmabuf); - - if (dmabuf->ops->unmap) - dmabuf->ops->unmap(dmabuf, page_num, vaddr); -} -EXPORT_SYMBOL_GPL(dma_buf_kunmap); - /** * dma_buf_mmap - Setup up a userspace mmap with the given vma -- cgit From 19d32ace8b6acebc45da1ea748000ac79ccc7721 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 26 Nov 2019 15:25:16 +0100 Subject: udmabuf: Remove deleted map/unmap handlers. Commit 7f0de8d80816 ("dma-buf: Drop dma_buf_k(un)map") removed map/unmap handlers, but they still existed in udmabuf. Remove them there as well Signed-off-by: Maarten Lankhorst Fixes: 7f0de8d80816 ("dma-buf: Drop dma_buf_k(un)map") Cc: Sumit Semwal Cc: Daniel Vetter Cc: linux-media@vger.kernel.org Cc: linaro-mm-sig@lists.linaro.org Cc: dri-devel@lists.freedesktop.org Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20191126142516.630200-1-maarten.lankhorst@linux.intel.com --- drivers/dma-buf/udmabuf.c | 16 ---------------- 1 file changed, 16 deletions(-) (limited to 'drivers/dma-buf') diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index 9635897458a0..9de539c1def4 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -93,26 +93,10 @@ static void release_udmabuf(struct dma_buf *buf) kfree(ubuf); } -static void *kmap_udmabuf(struct dma_buf *buf, unsigned long page_num) -{ - struct udmabuf *ubuf = buf->priv; - struct page *page = ubuf->pages[page_num]; - - return kmap(page); -} - -static void kunmap_udmabuf(struct dma_buf *buf, unsigned long page_num, - void *vaddr) -{ - kunmap(vaddr); -} - static const struct dma_buf_ops udmabuf_ops = { .map_dma_buf = map_udmabuf, .unmap_dma_buf = unmap_udmabuf, .release = release_udmabuf, - .map = kmap_udmabuf, - .unmap = kunmap_udmabuf, .mmap = mmap_udmabuf, }; -- cgit From bc7a71da43b48333f84c6534ab43d240e34cf9eb Mon Sep 17 00:00:00 2001 From: Gurchetan Singh Date: Mon, 2 Dec 2019 17:36:24 -0800 Subject: udmabuf: use cache_sgt_mapping option The GEM prime helpers do it, so should we. It's also possible to make it optional later. Signed-off-by: Gurchetan Singh Link: http://patchwork.freedesktop.org/patch/msgid/20191203013627.85991-1-gurchetansingh@chromium.org Signed-off-by: Gerd Hoffmann --- drivers/dma-buf/udmabuf.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/dma-buf') diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index 9de539c1def4..be15eb6b0586 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -94,10 +94,11 @@ static void release_udmabuf(struct dma_buf *buf) } static const struct dma_buf_ops udmabuf_ops = { - .map_dma_buf = map_udmabuf, - .unmap_dma_buf = unmap_udmabuf, - .release = release_udmabuf, - .mmap = mmap_udmabuf, + .cache_sgt_mapping = true, + .map_dma_buf = map_udmabuf, + .unmap_dma_buf = unmap_udmabuf, + .release = release_udmabuf, + .mmap = mmap_udmabuf, }; #define SEALS_WANTED (F_SEAL_SHRINK) -- cgit From c1bbed668997268c9edccdc9db1bd1487d9e20b0 Mon Sep 17 00:00:00 2001 From: Gurchetan Singh Date: Mon, 2 Dec 2019 17:36:25 -0800 Subject: udmabuf: add a pointer to the miscdevice in dma-buf private data Will be used later. v2: rename 'udmabuf_misc' to 'device' (kraxel) Signed-off-by: Gurchetan Singh Link: http://patchwork.freedesktop.org/patch/msgid/20191203013627.85991-2-gurchetansingh@chromium.org Signed-off-by: Gerd Hoffmann --- drivers/dma-buf/udmabuf.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers/dma-buf') diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index be15eb6b0586..f0bf3ba7441e 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -18,6 +18,7 @@ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */ struct udmabuf { pgoff_t pagecount; struct page **pages; + struct miscdevice *device; }; static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) @@ -104,8 +105,9 @@ static const struct dma_buf_ops udmabuf_ops = { #define SEALS_WANTED (F_SEAL_SHRINK) #define SEALS_DENIED (F_SEAL_WRITE) -static long udmabuf_create(const struct udmabuf_create_list *head, - const struct udmabuf_create_item *list) +static long udmabuf_create(struct miscdevice *device, + struct udmabuf_create_list *head, + struct udmabuf_create_item *list) { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); struct file *memfd = NULL; @@ -172,6 +174,7 @@ static long udmabuf_create(const struct udmabuf_create_list *head, exp_info.priv = ubuf; exp_info.flags = O_RDWR; + ubuf->device = device; buf = dma_buf_export(&exp_info); if (IS_ERR(buf)) { ret = PTR_ERR(buf); @@ -209,7 +212,7 @@ static long udmabuf_ioctl_create(struct file *filp, unsigned long arg) list.offset = create.offset; list.size = create.size; - return udmabuf_create(&head, &list); + return udmabuf_create(filp->private_data, &head, &list); } static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg) @@ -228,7 +231,7 @@ static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg) if (IS_ERR(list)) return PTR_ERR(list); - ret = udmabuf_create(&head, list); + ret = udmabuf_create(filp->private_data, &head, list); kfree(list); return ret; } -- cgit From 17a7ce203490459cff14fb1c8f9a15d65fd1c544 Mon Sep 17 00:00:00 2001 From: Gurchetan Singh Date: Mon, 2 Dec 2019 17:36:26 -0800 Subject: udmabuf: separate out creating/destroying scatter-table These are nice functions and can be re-used. Signed-off-by: Gurchetan Singh Link: http://patchwork.freedesktop.org/patch/msgid/20191203013627.85991-3-gurchetansingh@chromium.org Signed-off-by: Gerd Hoffmann --- drivers/dma-buf/udmabuf.c | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) (limited to 'drivers/dma-buf') diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index f0bf3ba7441e..0a610e09ae23 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -47,10 +47,10 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma) return 0; } -static struct sg_table *map_udmabuf(struct dma_buf_attachment *at, - enum dma_data_direction direction) +static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf, + enum dma_data_direction direction) { - struct udmabuf *ubuf = at->dmabuf->priv; + struct udmabuf *ubuf = buf->priv; struct sg_table *sg; int ret; @@ -62,7 +62,7 @@ static struct sg_table *map_udmabuf(struct dma_buf_attachment *at, GFP_KERNEL); if (ret < 0) goto err; - if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction)) { + if (!dma_map_sg(dev, sg->sgl, sg->nents, direction)) { ret = -EINVAL; goto err; } @@ -74,13 +74,25 @@ err: return ERR_PTR(ret); } +static void put_sg_table(struct device *dev, struct sg_table *sg, + enum dma_data_direction direction) +{ + dma_unmap_sg(dev, sg->sgl, sg->nents, direction); + sg_free_table(sg); + kfree(sg); +} + +static struct sg_table *map_udmabuf(struct dma_buf_attachment *at, + enum dma_data_direction direction) +{ + return get_sg_table(at->dev, at->dmabuf, direction); +} + static void unmap_udmabuf(struct dma_buf_attachment *at, struct sg_table *sg, enum dma_data_direction direction) { - dma_unmap_sg(at->dev, sg->sgl, sg->nents, direction); - sg_free_table(sg); - kfree(sg); + return put_sg_table(at->dev, sg, direction); } static void release_udmabuf(struct dma_buf *buf) -- cgit From 284562e1f34874e267d4f499362c3816f8f6bc3f Mon Sep 17 00:00:00 2001 From: Gurchetan Singh Date: Mon, 2 Dec 2019 17:36:27 -0800 Subject: udmabuf: implement begin_cpu_access/end_cpu_access hooks With the misc device, we should end up using the result of get_arch_dma_ops(..) or dma-direct ops. This can allow us to have WC mappings in the guest after synchronization. Signed-off-by: Gurchetan Singh Link: http://patchwork.freedesktop.org/patch/msgid/20191203013627.85991-4-gurchetansingh@chromium.org Signed-off-by: Gerd Hoffmann --- drivers/dma-buf/udmabuf.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'drivers/dma-buf') diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index 0a610e09ae23..61b0a2cff874 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -18,6 +18,7 @@ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */ struct udmabuf { pgoff_t pagecount; struct page **pages; + struct sg_table *sg; struct miscdevice *device; }; @@ -98,20 +99,58 @@ static void unmap_udmabuf(struct dma_buf_attachment *at, static void release_udmabuf(struct dma_buf *buf) { struct udmabuf *ubuf = buf->priv; + struct device *dev = ubuf->device->this_device; pgoff_t pg; + if (ubuf->sg) + put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL); + for (pg = 0; pg < ubuf->pagecount; pg++) put_page(ubuf->pages[pg]); kfree(ubuf->pages); kfree(ubuf); } +static int begin_cpu_udmabuf(struct dma_buf *buf, + enum dma_data_direction direction) +{ + struct udmabuf *ubuf = buf->priv; + struct device *dev = ubuf->device->this_device; + + if (!ubuf->sg) { + ubuf->sg = get_sg_table(dev, buf, direction); + if (IS_ERR(ubuf->sg)) + return PTR_ERR(ubuf->sg); + } else { + dma_sync_sg_for_device(dev, ubuf->sg->sgl, + ubuf->sg->nents, + direction); + } + + return 0; +} + +static int end_cpu_udmabuf(struct dma_buf *buf, + enum dma_data_direction direction) +{ + struct udmabuf *ubuf = buf->priv; + struct device *dev = ubuf->device->this_device; + + if (!ubuf->sg) + return -EINVAL; + + dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, direction); + return 0; +} + static const struct dma_buf_ops udmabuf_ops = { .cache_sgt_mapping = true, .map_dma_buf = map_udmabuf, .unmap_dma_buf = unmap_udmabuf, .release = release_udmabuf, .mmap = mmap_udmabuf, + .begin_cpu_access = begin_cpu_udmabuf, + .end_cpu_access = end_cpu_udmabuf, }; #define SEALS_WANTED (F_SEAL_SHRINK) -- cgit From c02a81fba74fe3488ad6b08bfb5a1329005418f8 Mon Sep 17 00:00:00 2001 From: "Andrew F. Davis" Date: Tue, 3 Dec 2019 17:26:37 +0000 Subject: dma-buf: Add dma-buf heaps framework This framework allows a unified userspace interface for dma-buf exporters, allowing userland to allocate specific types of memory for use in dma-buf sharing. Each heap is given its own device node, which a user can allocate a dma-buf fd from using the DMA_HEAP_IOC_ALLOC. This code is an evoluiton of the Android ION implementation, and a big thanks is due to its authors/maintainers over time for their effort: Rebecca Schultz Zavin, Colin Cross, Benjamin Gaignard, Laura Abbott, and many other contributors! Cc: Laura Abbott Cc: Benjamin Gaignard Cc: Sumit Semwal Cc: Liam Mark Cc: Pratik Patel Cc: Brian Starkey Cc: Vincent Donnefort Cc: Sudipto Paul Cc: Andrew F. Davis Cc: Christoph Hellwig Cc: Chenbo Feng Cc: Alistair Strachan Cc: Hridya Valsaraju Cc: Sandeep Patil Cc: Hillf Danton Cc: Dave Airlie Cc: dri-devel@lists.freedesktop.org Reviewed-by: Brian Starkey Acked-by: Sandeep Patil Signed-off-by: Andrew F. Davis Signed-off-by: John Stultz Signed-off-by: Sumit Semwal Link: https://patchwork.freedesktop.org/patch/msgid/20191203172641.66642-2-john.stultz@linaro.org --- drivers/dma-buf/Kconfig | 9 ++ drivers/dma-buf/Makefile | 1 + drivers/dma-buf/dma-heap.c | 297 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 307 insertions(+) create mode 100644 drivers/dma-buf/dma-heap.c (limited to 'drivers/dma-buf') diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index a23b6752d11a..bffa58fc3e6e 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -44,4 +44,13 @@ config DMABUF_SELFTESTS default n depends on DMA_SHARED_BUFFER +menuconfig DMABUF_HEAPS + bool "DMA-BUF Userland Memory Heaps" + select DMA_SHARED_BUFFER + help + Choose this option to enable the DMA-BUF userland memory heaps. + This options creates per heap chardevs in /dev/dma_heap/ which + allows userspace to allocate dma-bufs that can be shared + between drivers. + endmenu diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 03479da06422..caee5eb3d351 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ dma-resv.o seqno-fence.o +obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_UDMABUF) += udmabuf.o diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c new file mode 100644 index 000000000000..4f04d104ae61 --- /dev/null +++ b/drivers/dma-buf/dma-heap.c @@ -0,0 +1,297 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Framework for userspace DMA-BUF allocations + * + * Copyright (C) 2011 Google, Inc. + * Copyright (C) 2019 Linaro Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEVNAME "dma_heap" + +#define NUM_HEAP_MINORS 128 + +/** + * struct dma_heap - represents a dmabuf heap in the system + * @name: used for debugging/device-node name + * @ops: ops struct for this heap + * @heap_devt heap device node + * @list list head connecting to list of heaps + * @heap_cdev heap char device + * + * Represents a heap of memory from which buffers can be made. + */ +struct dma_heap { + const char *name; + const struct dma_heap_ops *ops; + void *priv; + dev_t heap_devt; + struct list_head list; + struct cdev heap_cdev; +}; + +static LIST_HEAD(heap_list); +static DEFINE_MUTEX(heap_list_lock); +static dev_t dma_heap_devt; +static struct class *dma_heap_class; +static DEFINE_XARRAY_ALLOC(dma_heap_minors); + +static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, + unsigned int fd_flags, + unsigned int heap_flags) +{ + /* + * Allocations from all heaps have to begin + * and end on page boundaries. + */ + len = PAGE_ALIGN(len); + if (!len) + return -EINVAL; + + return heap->ops->allocate(heap, len, fd_flags, heap_flags); +} + +static int dma_heap_open(struct inode *inode, struct file *file) +{ + struct dma_heap *heap; + + heap = xa_load(&dma_heap_minors, iminor(inode)); + if (!heap) { + pr_err("dma_heap: minor %d unknown.\n", iminor(inode)); + return -ENODEV; + } + + /* instance data as context */ + file->private_data = heap; + nonseekable_open(inode, file); + + return 0; +} + +static long dma_heap_ioctl_allocate(struct file *file, void *data) +{ + struct dma_heap_allocation_data *heap_allocation = data; + struct dma_heap *heap = file->private_data; + int fd; + + if (heap_allocation->fd) + return -EINVAL; + + if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS) + return -EINVAL; + + if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS) + return -EINVAL; + + fd = dma_heap_buffer_alloc(heap, heap_allocation->len, + heap_allocation->fd_flags, + heap_allocation->heap_flags); + if (fd < 0) + return fd; + + heap_allocation->fd = fd; + + return 0; +} + +unsigned int dma_heap_ioctl_cmds[] = { + DMA_HEAP_IOC_ALLOC, +}; + +static long dma_heap_ioctl(struct file *file, unsigned int ucmd, + unsigned long arg) +{ + char stack_kdata[128]; + char *kdata = stack_kdata; + unsigned int kcmd; + unsigned int in_size, out_size, drv_size, ksize; + int nr = _IOC_NR(ucmd); + int ret = 0; + + if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds)) + return -EINVAL; + + /* Get the kernel ioctl cmd that matches */ + kcmd = dma_heap_ioctl_cmds[nr]; + + /* Figure out the delta between user cmd size and kernel cmd size */ + drv_size = _IOC_SIZE(kcmd); + out_size = _IOC_SIZE(ucmd); + in_size = out_size; + if ((ucmd & kcmd & IOC_IN) == 0) + in_size = 0; + if ((ucmd & kcmd & IOC_OUT) == 0) + out_size = 0; + ksize = max(max(in_size, out_size), drv_size); + + /* If necessary, allocate buffer for ioctl argument */ + if (ksize > sizeof(stack_kdata)) { + kdata = kmalloc(ksize, GFP_KERNEL); + if (!kdata) + return -ENOMEM; + } + + if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) { + ret = -EFAULT; + goto err; + } + + /* zero out any difference between the kernel/user structure size */ + if (ksize > in_size) + memset(kdata + in_size, 0, ksize - in_size); + + switch (kcmd) { + case DMA_HEAP_IOC_ALLOC: + ret = dma_heap_ioctl_allocate(file, kdata); + break; + default: + return -ENOTTY; + } + + if (copy_to_user((void __user *)arg, kdata, out_size) != 0) + ret = -EFAULT; +err: + if (kdata != stack_kdata) + kfree(kdata); + return ret; +} + +static const struct file_operations dma_heap_fops = { + .owner = THIS_MODULE, + .open = dma_heap_open, + .unlocked_ioctl = dma_heap_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = dma_heap_ioctl, +#endif +}; + +/** + * dma_heap_get_drvdata() - get per-subdriver data for the heap + * @heap: DMA-Heap to retrieve private data for + * + * Returns: + * The per-subdriver data for the heap. + */ +void *dma_heap_get_drvdata(struct dma_heap *heap) +{ + return heap->priv; +} + +struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) +{ + struct dma_heap *heap, *h, *err_ret; + struct device *dev_ret; + unsigned int minor; + int ret; + + if (!exp_info->name || !strcmp(exp_info->name, "")) { + pr_err("dma_heap: Cannot add heap without a name\n"); + return ERR_PTR(-EINVAL); + } + + if (!exp_info->ops || !exp_info->ops->allocate) { + pr_err("dma_heap: Cannot add heap with invalid ops struct\n"); + return ERR_PTR(-EINVAL); + } + + /* check the name is unique */ + mutex_lock(&heap_list_lock); + list_for_each_entry(h, &heap_list, list) { + if (!strcmp(h->name, exp_info->name)) { + mutex_unlock(&heap_list_lock); + pr_err("dma_heap: Already registered heap named %s\n", + exp_info->name); + return ERR_PTR(-EINVAL); + } + } + mutex_unlock(&heap_list_lock); + + heap = kzalloc(sizeof(*heap), GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + + heap->name = exp_info->name; + heap->ops = exp_info->ops; + heap->priv = exp_info->priv; + + /* Find unused minor number */ + ret = xa_alloc(&dma_heap_minors, &minor, heap, + XA_LIMIT(0, NUM_HEAP_MINORS - 1), GFP_KERNEL); + if (ret < 0) { + pr_err("dma_heap: Unable to get minor number for heap\n"); + err_ret = ERR_PTR(ret); + goto err0; + } + + /* Create device */ + heap->heap_devt = MKDEV(MAJOR(dma_heap_devt), minor); + + cdev_init(&heap->heap_cdev, &dma_heap_fops); + ret = cdev_add(&heap->heap_cdev, heap->heap_devt, 1); + if (ret < 0) { + pr_err("dma_heap: Unable to add char device\n"); + err_ret = ERR_PTR(ret); + goto err1; + } + + dev_ret = device_create(dma_heap_class, + NULL, + heap->heap_devt, + NULL, + heap->name); + if (IS_ERR(dev_ret)) { + pr_err("dma_heap: Unable to create device\n"); + err_ret = ERR_CAST(dev_ret); + goto err2; + } + /* Add heap to the list */ + mutex_lock(&heap_list_lock); + list_add(&heap->list, &heap_list); + mutex_unlock(&heap_list_lock); + + return heap; + +err2: + cdev_del(&heap->heap_cdev); +err1: + xa_erase(&dma_heap_minors, minor); +err0: + kfree(heap); + return err_ret; +} + +static char *dma_heap_devnode(struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev)); +} + +static int dma_heap_init(void) +{ + int ret; + + ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME); + if (ret) + return ret; + + dma_heap_class = class_create(THIS_MODULE, DEVNAME); + if (IS_ERR(dma_heap_class)) { + unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS); + return PTR_ERR(dma_heap_class); + } + dma_heap_class->devnode = dma_heap_devnode; + + return 0; +} +subsys_initcall(dma_heap_init); -- cgit From 5248eb12fea890a03b4cdc3ef546d6319d4d9b73 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Tue, 3 Dec 2019 17:26:38 +0000 Subject: dma-buf: heaps: Add heap helpers Add generic helper dmabuf ops for dma heaps, so we can reduce the amount of duplicative code for the exported dmabufs. This code is an evolution of the Android ION implementation, so thanks to its original authors and maintainters: Rebecca Schultz Zavin, Colin Cross, Laura Abbott, and others! Cc: Laura Abbott Cc: Benjamin Gaignard Cc: Sumit Semwal Cc: Liam Mark Cc: Pratik Patel Cc: Brian Starkey Cc: Vincent Donnefort Cc: Sudipto Paul Cc: Andrew F. Davis Cc: Christoph Hellwig Cc: Chenbo Feng Cc: Alistair Strachan Cc: Hridya Valsaraju Cc: Sandeep Patil Cc: Hillf Danton Cc: Dave Airlie Cc: dri-devel@lists.freedesktop.org Reviewed-by: Benjamin Gaignard Reviewed-by: Brian Starkey Acked-by: Sandeep Patil Acked-by: Laura Abbott Tested-by: Ayan Kumar Halder Signed-off-by: John Stultz Signed-off-by: Sumit Semwal Link: https://patchwork.freedesktop.org/patch/msgid/20191203172641.66642-3-john.stultz@linaro.org --- drivers/dma-buf/Makefile | 1 + drivers/dma-buf/heaps/Makefile | 2 + drivers/dma-buf/heaps/heap-helpers.c | 271 +++++++++++++++++++++++++++++++++++ drivers/dma-buf/heaps/heap-helpers.h | 53 +++++++ 4 files changed, 327 insertions(+) create mode 100644 drivers/dma-buf/heaps/Makefile create mode 100644 drivers/dma-buf/heaps/heap-helpers.c create mode 100644 drivers/dma-buf/heaps/heap-helpers.h (limited to 'drivers/dma-buf') diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index caee5eb3d351..9c190026bfab 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -2,6 +2,7 @@ obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ dma-resv.o seqno-fence.o obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o +obj-$(CONFIG_DMABUF_HEAPS) += heaps/ obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_UDMABUF) += udmabuf.o diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile new file mode 100644 index 000000000000..de49898112db --- /dev/null +++ b/drivers/dma-buf/heaps/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-y += heap-helpers.o diff --git a/drivers/dma-buf/heaps/heap-helpers.c b/drivers/dma-buf/heaps/heap-helpers.c new file mode 100644 index 000000000000..9f964ca3f59c --- /dev/null +++ b/drivers/dma-buf/heaps/heap-helpers.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "heap-helpers.h" + +void init_heap_helper_buffer(struct heap_helper_buffer *buffer, + void (*free)(struct heap_helper_buffer *)) +{ + buffer->priv_virt = NULL; + mutex_init(&buffer->lock); + buffer->vmap_cnt = 0; + buffer->vaddr = NULL; + buffer->pagecount = 0; + buffer->pages = NULL; + INIT_LIST_HEAD(&buffer->attachments); + buffer->free = free; +} + +struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer, + int fd_flags) +{ + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &heap_helper_ops; + exp_info.size = buffer->size; + exp_info.flags = fd_flags; + exp_info.priv = buffer; + + return dma_buf_export(&exp_info); +} + +static void *dma_heap_map_kernel(struct heap_helper_buffer *buffer) +{ + void *vaddr; + + vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); + if (!vaddr) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +static void dma_heap_buffer_destroy(struct heap_helper_buffer *buffer) +{ + if (buffer->vmap_cnt > 0) { + WARN(1, "%s: buffer still mapped in the kernel\n", __func__); + vunmap(buffer->vaddr); + } + + buffer->free(buffer); +} + +static void *dma_heap_buffer_vmap_get(struct heap_helper_buffer *buffer) +{ + void *vaddr; + + if (buffer->vmap_cnt) { + buffer->vmap_cnt++; + return buffer->vaddr; + } + vaddr = dma_heap_map_kernel(buffer); + if (IS_ERR(vaddr)) + return vaddr; + buffer->vaddr = vaddr; + buffer->vmap_cnt++; + return vaddr; +} + +static void dma_heap_buffer_vmap_put(struct heap_helper_buffer *buffer) +{ + if (!--buffer->vmap_cnt) { + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } +} + +struct dma_heaps_attachment { + struct device *dev; + struct sg_table table; + struct list_head list; +}; + +static int dma_heap_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct dma_heaps_attachment *a; + struct heap_helper_buffer *buffer = dmabuf->priv; + int ret; + + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; + + ret = sg_alloc_table_from_pages(&a->table, buffer->pages, + buffer->pagecount, 0, + buffer->pagecount << PAGE_SHIFT, + GFP_KERNEL); + if (ret) { + kfree(a); + return ret; + } + + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->list); + + attachment->priv = a; + + mutex_lock(&buffer->lock); + list_add(&a->list, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void dma_heap_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct dma_heaps_attachment *a = attachment->priv; + struct heap_helper_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + list_del(&a->list); + mutex_unlock(&buffer->lock); + + sg_free_table(&a->table); + kfree(a); +} + +static +struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_heaps_attachment *a = attachment->priv; + struct sg_table *table; + + table = &a->table; + + if (!dma_map_sg(attachment->dev, table->sgl, table->nents, + direction)) + table = ERR_PTR(-ENOMEM); + return table; +} + +static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ + dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction); +} + +static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct heap_helper_buffer *buffer = vma->vm_private_data; + + if (vmf->pgoff > buffer->pagecount) + return VM_FAULT_SIGBUS; + + vmf->page = buffer->pages[vmf->pgoff]; + get_page(vmf->page); + + return 0; +} + +static const struct vm_operations_struct dma_heap_vm_ops = { + .fault = dma_heap_vm_fault, +}; + +static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct heap_helper_buffer *buffer = dmabuf->priv; + + if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) + return -EINVAL; + + vma->vm_ops = &dma_heap_vm_ops; + vma->vm_private_data = buffer; + + return 0; +} + +static void dma_heap_dma_buf_release(struct dma_buf *dmabuf) +{ + struct heap_helper_buffer *buffer = dmabuf->priv; + + dma_heap_buffer_destroy(buffer); +} + +static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct heap_helper_buffer *buffer = dmabuf->priv; + struct dma_heaps_attachment *a; + int ret = 0; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->size); + + list_for_each_entry(a, &buffer->attachments, list) { + dma_sync_sg_for_cpu(a->dev, a->table.sgl, a->table.nents, + direction); + } + mutex_unlock(&buffer->lock); + + return ret; +} + +static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct heap_helper_buffer *buffer = dmabuf->priv; + struct dma_heaps_attachment *a; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->size); + + list_for_each_entry(a, &buffer->attachments, list) { + dma_sync_sg_for_device(a->dev, a->table.sgl, a->table.nents, + direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf) +{ + struct heap_helper_buffer *buffer = dmabuf->priv; + void *vaddr; + + mutex_lock(&buffer->lock); + vaddr = dma_heap_buffer_vmap_get(buffer); + mutex_unlock(&buffer->lock); + + return vaddr; +} + +static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) +{ + struct heap_helper_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + dma_heap_buffer_vmap_put(buffer); + mutex_unlock(&buffer->lock); +} + +const struct dma_buf_ops heap_helper_ops = { + .map_dma_buf = dma_heap_map_dma_buf, + .unmap_dma_buf = dma_heap_unmap_dma_buf, + .mmap = dma_heap_mmap, + .release = dma_heap_dma_buf_release, + .attach = dma_heap_attach, + .detach = dma_heap_detach, + .begin_cpu_access = dma_heap_dma_buf_begin_cpu_access, + .end_cpu_access = dma_heap_dma_buf_end_cpu_access, + .vmap = dma_heap_dma_buf_vmap, + .vunmap = dma_heap_dma_buf_vunmap, +}; diff --git a/drivers/dma-buf/heaps/heap-helpers.h b/drivers/dma-buf/heaps/heap-helpers.h new file mode 100644 index 000000000000..805d2df88024 --- /dev/null +++ b/drivers/dma-buf/heaps/heap-helpers.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DMABUF Heaps helper code + * + * Copyright (C) 2011 Google, Inc. + * Copyright (C) 2019 Linaro Ltd. + */ + +#ifndef _HEAP_HELPERS_H +#define _HEAP_HELPERS_H + +#include +#include + +/** + * struct heap_helper_buffer - helper buffer metadata + * @heap: back pointer to the heap the buffer came from + * @dmabuf: backing dma-buf for this buffer + * @size: size of the buffer + * @priv_virt pointer to heap specific private value + * @lock mutext to protect the data in this structure + * @vmap_cnt count of vmap references on the buffer + * @vaddr vmap'ed virtual address + * @pagecount number of pages in the buffer + * @pages list of page pointers + * @attachments list of device attachments + * + * @free heap callback to free the buffer + */ +struct heap_helper_buffer { + struct dma_heap *heap; + struct dma_buf *dmabuf; + size_t size; + + void *priv_virt; + struct mutex lock; + int vmap_cnt; + void *vaddr; + pgoff_t pagecount; + struct page **pages; + struct list_head attachments; + + void (*free)(struct heap_helper_buffer *buffer); +}; + +void init_heap_helper_buffer(struct heap_helper_buffer *buffer, + void (*free)(struct heap_helper_buffer *)); + +struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer, + int fd_flags); + +extern const struct dma_buf_ops heap_helper_ops; +#endif /* _HEAP_HELPERS_H */ -- cgit From efa04fefebbd724ffda7f49e42d057a7217c45b0 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Tue, 3 Dec 2019 17:26:39 +0000 Subject: dma-buf: heaps: Add system heap to dmabuf heaps This patch adds system heap to the dma-buf heaps framework. This allows applications to get a page-allocator backed dma-buf for non-contiguous memory. This code is an evolution of the Android ION implementation, so thanks to its original authors and maintainters: Rebecca Schultz Zavin, Colin Cross, Laura Abbott, and others! Cc: Laura Abbott Cc: Benjamin Gaignard Cc: Sumit Semwal Cc: Liam Mark Cc: Pratik Patel Cc: Brian Starkey Cc: Vincent Donnefort Cc: Sudipto Paul Cc: Andrew F. Davis Cc: Christoph Hellwig Cc: Chenbo Feng Cc: Alistair Strachan Cc: Hridya Valsaraju Cc: Sandeep Patil Cc: Hillf Danton Cc: Dave Airlie Cc: dri-devel@lists.freedesktop.org Reviewed-by: Benjamin Gaignard Reviewed-by: Brian Starkey Acked-by: Sandeep Patil Acked-by: Laura Abbott Tested-by: Ayan Kumar Halder Signed-off-by: John Stultz Signed-off-by: Sumit Semwal Link: https://patchwork.freedesktop.org/patch/msgid/20191203172641.66642-4-john.stultz@linaro.org --- drivers/dma-buf/Kconfig | 2 + drivers/dma-buf/heaps/Kconfig | 6 ++ drivers/dma-buf/heaps/Makefile | 1 + drivers/dma-buf/heaps/system_heap.c | 123 ++++++++++++++++++++++++++++++++++++ 4 files changed, 132 insertions(+) create mode 100644 drivers/dma-buf/heaps/Kconfig create mode 100644 drivers/dma-buf/heaps/system_heap.c (limited to 'drivers/dma-buf') diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index bffa58fc3e6e..0613bb7770f5 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -53,4 +53,6 @@ menuconfig DMABUF_HEAPS allows userspace to allocate dma-bufs that can be shared between drivers. +source "drivers/dma-buf/heaps/Kconfig" + endmenu diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig new file mode 100644 index 000000000000..205052744169 --- /dev/null +++ b/drivers/dma-buf/heaps/Kconfig @@ -0,0 +1,6 @@ +config DMABUF_HEAPS_SYSTEM + bool "DMA-BUF System Heap" + depends on DMABUF_HEAPS + help + Choose this option to enable the system dmabuf heap. The system heap + is backed by pages from the buddy allocator. If in doubt, say Y. diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile index de49898112db..d1808eca2581 100644 --- a/drivers/dma-buf/heaps/Makefile +++ b/drivers/dma-buf/heaps/Makefile @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += heap-helpers.o +obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c new file mode 100644 index 000000000000..1aa01e98c595 --- /dev/null +++ b/drivers/dma-buf/heaps/system_heap.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DMABUF System heap exporter + * + * Copyright (C) 2011 Google, Inc. + * Copyright (C) 2019 Linaro Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "heap-helpers.h" + +struct dma_heap *sys_heap; + +static void system_heap_free(struct heap_helper_buffer *buffer) +{ + pgoff_t pg; + + for (pg = 0; pg < buffer->pagecount; pg++) + __free_page(buffer->pages[pg]); + kfree(buffer->pages); + kfree(buffer); +} + +static int system_heap_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) +{ + struct heap_helper_buffer *helper_buffer; + struct dma_buf *dmabuf; + int ret = -ENOMEM; + pgoff_t pg; + + helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); + if (!helper_buffer) + return -ENOMEM; + + init_heap_helper_buffer(helper_buffer, system_heap_free); + helper_buffer->heap = heap; + helper_buffer->size = len; + + helper_buffer->pagecount = len / PAGE_SIZE; + helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, + sizeof(*helper_buffer->pages), + GFP_KERNEL); + if (!helper_buffer->pages) { + ret = -ENOMEM; + goto err0; + } + + for (pg = 0; pg < helper_buffer->pagecount; pg++) { + /* + * Avoid trying to allocate memory if the process + * has been killed by by SIGKILL + */ + if (fatal_signal_pending(current)) + goto err1; + + helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!helper_buffer->pages[pg]) + goto err1; + } + + /* create the dmabuf */ + dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); + if (IS_ERR(dmabuf)) { + ret = PTR_ERR(dmabuf); + goto err1; + } + + helper_buffer->dmabuf = dmabuf; + + ret = dma_buf_fd(dmabuf, fd_flags); + if (ret < 0) { + dma_buf_put(dmabuf); + /* just return, as put will call release and that will free */ + return ret; + } + + return ret; + +err1: + while (pg > 0) + __free_page(helper_buffer->pages[--pg]); + kfree(helper_buffer->pages); +err0: + kfree(helper_buffer); + + return ret; +} + +static const struct dma_heap_ops system_heap_ops = { + .allocate = system_heap_allocate, +}; + +static int system_heap_create(void) +{ + struct dma_heap_export_info exp_info; + int ret = 0; + + exp_info.name = "system_heap"; + exp_info.ops = &system_heap_ops; + exp_info.priv = NULL; + + sys_heap = dma_heap_add(&exp_info); + if (IS_ERR(sys_heap)) + ret = PTR_ERR(sys_heap); + + return ret; +} +module_init(system_heap_create); +MODULE_LICENSE("GPL v2"); -- cgit From b61614ec318aae0c77ecd2816878d851dd61d9a6 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Tue, 3 Dec 2019 17:26:40 +0000 Subject: dma-buf: heaps: Add CMA heap to dmabuf heaps This adds a CMA heap, which allows userspace to allocate a dma-buf of contiguous memory out of a CMA region. This code is an evolution of the Android ION implementation, so thanks to its original author and maintainters: Benjamin Gaignard, Laura Abbott, and others! NOTE: This patch only adds the default CMA heap. We will enable selectively adding other CMA memory regions to the dmabuf heaps interface with a later patch (which requires a dt binding) Cc: Laura Abbott Cc: Benjamin Gaignard Cc: Sumit Semwal Cc: Liam Mark Cc: Pratik Patel Cc: Brian Starkey Cc: Vincent Donnefort Cc: Sudipto Paul Cc: Andrew F. Davis Cc: Christoph Hellwig Cc: Chenbo Feng Cc: Alistair Strachan Cc: Hridya Valsaraju Cc: Sandeep Patil Cc: Hillf Danton Cc: Dave Airlie Cc: dri-devel@lists.freedesktop.org Reviewed-by: Benjamin Gaignard Reviewed-by: Brian Starkey Acked-by: Sandeep Patil Acked-by: Laura Abbott Tested-by: Ayan Kumar Halder Signed-off-by: John Stultz Signed-off-by: Sumit Semwal Link: https://patchwork.freedesktop.org/patch/msgid/20191203172641.66642-5-john.stultz@linaro.org --- drivers/dma-buf/heaps/Kconfig | 8 ++ drivers/dma-buf/heaps/Makefile | 1 + drivers/dma-buf/heaps/cma_heap.c | 177 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 186 insertions(+) create mode 100644 drivers/dma-buf/heaps/cma_heap.c (limited to 'drivers/dma-buf') diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig index 205052744169..a5eef06c4226 100644 --- a/drivers/dma-buf/heaps/Kconfig +++ b/drivers/dma-buf/heaps/Kconfig @@ -4,3 +4,11 @@ config DMABUF_HEAPS_SYSTEM help Choose this option to enable the system dmabuf heap. The system heap is backed by pages from the buddy allocator. If in doubt, say Y. + +config DMABUF_HEAPS_CMA + bool "DMA-BUF CMA Heap" + depends on DMABUF_HEAPS && DMA_CMA + help + Choose this option to enable dma-buf CMA heap. This heap is backed + by the Contiguous Memory Allocator (CMA). If your system has these + regions, you should say Y here. diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile index d1808eca2581..6e54cdec3da0 100644 --- a/drivers/dma-buf/heaps/Makefile +++ b/drivers/dma-buf/heaps/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += heap-helpers.o obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o +obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c new file mode 100644 index 000000000000..626cf7fd033a --- /dev/null +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DMABUF CMA heap exporter + * + * Copyright (C) 2012, 2019 Linaro Ltd. + * Author: for ST-Ericsson. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "heap-helpers.h" + +struct cma_heap { + struct dma_heap *heap; + struct cma *cma; +}; + +static void cma_heap_free(struct heap_helper_buffer *buffer) +{ + struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap); + unsigned long nr_pages = buffer->pagecount; + struct page *cma_pages = buffer->priv_virt; + + /* free page list */ + kfree(buffer->pages); + /* release memory */ + cma_release(cma_heap->cma, cma_pages, nr_pages); + kfree(buffer); +} + +/* dmabuf heap CMA operations functions */ +static int cma_heap_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) +{ + struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); + struct heap_helper_buffer *helper_buffer; + struct page *cma_pages; + size_t size = PAGE_ALIGN(len); + unsigned long nr_pages = size >> PAGE_SHIFT; + unsigned long align = get_order(size); + struct dma_buf *dmabuf; + int ret = -ENOMEM; + pgoff_t pg; + + if (align > CONFIG_CMA_ALIGNMENT) + align = CONFIG_CMA_ALIGNMENT; + + helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); + if (!helper_buffer) + return -ENOMEM; + + init_heap_helper_buffer(helper_buffer, cma_heap_free); + helper_buffer->heap = heap; + helper_buffer->size = len; + + cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false); + if (!cma_pages) + goto free_buf; + + if (PageHighMem(cma_pages)) { + unsigned long nr_clear_pages = nr_pages; + struct page *page = cma_pages; + + while (nr_clear_pages > 0) { + void *vaddr = kmap_atomic(page); + + memset(vaddr, 0, PAGE_SIZE); + kunmap_atomic(vaddr); + /* + * Avoid wasting time zeroing memory if the process + * has been killed by by SIGKILL + */ + if (fatal_signal_pending(current)) + goto free_cma; + + page++; + nr_clear_pages--; + } + } else { + memset(page_address(cma_pages), 0, size); + } + + helper_buffer->pagecount = nr_pages; + helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, + sizeof(*helper_buffer->pages), + GFP_KERNEL); + if (!helper_buffer->pages) { + ret = -ENOMEM; + goto free_cma; + } + + for (pg = 0; pg < helper_buffer->pagecount; pg++) + helper_buffer->pages[pg] = &cma_pages[pg]; + + /* create the dmabuf */ + dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); + if (IS_ERR(dmabuf)) { + ret = PTR_ERR(dmabuf); + goto free_pages; + } + + helper_buffer->dmabuf = dmabuf; + helper_buffer->priv_virt = cma_pages; + + ret = dma_buf_fd(dmabuf, fd_flags); + if (ret < 0) { + dma_buf_put(dmabuf); + /* just return, as put will call release and that will free */ + return ret; + } + + return ret; + +free_pages: + kfree(helper_buffer->pages); +free_cma: + cma_release(cma_heap->cma, cma_pages, nr_pages); +free_buf: + kfree(helper_buffer); + return ret; +} + +static const struct dma_heap_ops cma_heap_ops = { + .allocate = cma_heap_allocate, +}; + +static int __add_cma_heap(struct cma *cma, void *data) +{ + struct cma_heap *cma_heap; + struct dma_heap_export_info exp_info; + + cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); + if (!cma_heap) + return -ENOMEM; + cma_heap->cma = cma; + + exp_info.name = cma_get_name(cma); + exp_info.ops = &cma_heap_ops; + exp_info.priv = cma_heap; + + cma_heap->heap = dma_heap_add(&exp_info); + if (IS_ERR(cma_heap->heap)) { + int ret = PTR_ERR(cma_heap->heap); + + kfree(cma_heap); + return ret; + } + + return 0; +} + +static int add_default_cma_heap(void) +{ + struct cma *default_cma = dev_get_cma_area(NULL); + int ret = 0; + + if (default_cma) + ret = __add_cma_heap(default_cma, NULL); + + return ret; +} +module_init(add_default_cma_heap); +MODULE_DESCRIPTION("DMA-BUF CMA Heap"); +MODULE_LICENSE("GPL v2"); -- cgit