summaryrefslogtreecommitdiff
path: root/drivers/dma-buf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r--drivers/dma-buf/Kconfig1
-rw-r--r--drivers/dma-buf/dma-buf.c29
-rw-r--r--drivers/dma-buf/dma-fence.c10
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c10
-rw-r--r--drivers/dma-buf/heaps/system_heap.c2
-rw-r--r--drivers/dma-buf/sw_sync.c6
-rw-r--r--drivers/dma-buf/udmabuf.c275
7 files changed, 197 insertions, 136 deletions
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index b46eb8a552d7..fee04fdb0822 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -36,6 +36,7 @@ config UDMABUF
depends on DMA_SHARED_BUFFER
depends on MEMFD_CREATE || COMPILE_TEST
depends on MMU
+ select VMAP_PFN
help
A driver to let userspace turn memfd regions into dma-bufs.
Qemu can use this to create host dmabufs for guest framebuffers.
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 8892bc701a66..5ad0e9e2e1b9 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -176,8 +176,9 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
dmabuf = file->private_data;
/* only support discovering the end of the buffer,
- but also allow SEEK_SET to maintain the idiomatic
- SEEK_END(0), SEEK_CUR(0) pattern */
+ * but also allow SEEK_SET to maintain the idiomatic
+ * SEEK_END(0), SEEK_CUR(0) pattern.
+ */
if (whence == SEEK_END)
base = dmabuf->size;
else if (whence == SEEK_SET)
@@ -558,7 +559,7 @@ static struct file *dma_buf_getfile(size_t size, int flags)
* Override ->i_ino with the unique and dmabuffs specific
* value.
*/
- inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
+ inode->i_ino = atomic64_inc_return(&dmabuf_inode);
flags &= O_ACCMODE | O_NONBLOCK;
file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
flags, &dma_buf_fops);
@@ -782,13 +783,14 @@ static void mangle_sg_table(struct sg_table *sg_table)
/* To catch abuse of the underlying struct page by importers mix
* up the bits, but take care to preserve the low SG_ bits to
* not corrupt the sgt. The mixing is undone in __unmap_dma_buf
- * before passing the sgt back to the exporter. */
+ * before passing the sgt back to the exporter.
+ */
for_each_sgtable_sg(sg_table, sg, i)
sg->page_link ^= ~0xffUL;
#endif
}
-static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
+static struct sg_table *__map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table;
@@ -1296,10 +1298,12 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
* vmap interface is introduced. Note that on very old 32-bit architectures
* vmalloc space might be limited and result in vmap calls failing.
*
- * Interfaces::
+ * Interfaces:
*
- * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
- * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
+ * .. code-block:: c
+ *
+ * void *dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
+ * void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
*
* The vmap call can fail if there is no vmap support in the exporter, or if
* it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
@@ -1356,10 +1360,11 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
* enough, since adding interfaces to intercept pagefaults and allow pte
* shootdowns would increase the complexity quite a bit.
*
- * Interface::
+ * Interface:
+ *
+ * .. code-block:: c
*
- * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
- * unsigned long);
+ * int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long);
*
* If the importing subsystem simply provides a special-purpose mmap call to
* set up a mapping in userspace, calling do_mmap with &dma_buf.file will
@@ -1694,7 +1699,7 @@ static int dma_buf_init_debugfs(void)
dma_buf_debugfs_dir = d;
- d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
+ d = debugfs_create_file("bufinfo", 0444, dma_buf_debugfs_dir,
NULL, &dma_buf_debug_fops);
if (IS_ERR(d)) {
pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 0393a9bba3a8..f0cdd3e99d36 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -309,8 +309,8 @@ bool dma_fence_begin_signalling(void)
if (in_atomic())
return true;
- /* ... and non-recursive readlock */
- lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _RET_IP_);
+ /* ... and non-recursive successful read_trylock */
+ lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _RET_IP_);
return false;
}
@@ -341,7 +341,7 @@ void __dma_fence_might_wait(void)
lock_map_acquire(&dma_fence_lockdep_map);
lock_map_release(&dma_fence_lockdep_map);
if (tmp)
- lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _THIS_IP_);
+ lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _THIS_IP_);
}
#endif
@@ -412,7 +412,7 @@ int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
unsigned long flags;
int ret;
- if (!fence)
+ if (WARN_ON(!fence))
return -EINVAL;
spin_lock_irqsave(fence->lock, flags);
@@ -464,7 +464,7 @@ int dma_fence_signal(struct dma_fence *fence)
int ret;
bool tmp;
- if (!fence)
+ if (WARN_ON(!fence))
return -EINVAL;
tmp = dma_fence_begin_signalling();
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 93be88b805fe..9512d050563a 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -309,13 +309,13 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
struct page *page = cma_pages;
while (nr_clear_pages > 0) {
- void *vaddr = kmap_atomic(page);
+ void *vaddr = kmap_local_page(page);
memset(vaddr, 0, PAGE_SIZE);
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
/*
* Avoid wasting time zeroing memory if the process
- * has been killed by by SIGKILL
+ * has been killed by SIGKILL.
*/
if (fatal_signal_pending(current))
goto free_cma;
@@ -366,7 +366,7 @@ static const struct dma_heap_ops cma_heap_ops = {
.allocate = cma_heap_allocate,
};
-static int __add_cma_heap(struct cma *cma, void *data)
+static int __init __add_cma_heap(struct cma *cma, void *data)
{
struct cma_heap *cma_heap;
struct dma_heap_export_info exp_info;
@@ -391,7 +391,7 @@ static int __add_cma_heap(struct cma *cma, void *data)
return 0;
}
-static int add_default_cma_heap(void)
+static int __init add_default_cma_heap(void)
{
struct cma *default_cma = dev_get_cma_area(NULL);
int ret = 0;
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index d78cdb9d01e5..26d5dc89ea16 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -421,7 +421,7 @@ static const struct dma_heap_ops system_heap_ops = {
.allocate = system_heap_allocate,
};
-static int system_heap_create(void)
+static int __init system_heap_create(void)
{
struct dma_heap_export_info exp_info;
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index c353029789cf..f5905d67dedb 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -173,11 +173,6 @@ static bool timeline_fence_signaled(struct dma_fence *fence)
return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops);
}
-static bool timeline_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static void timeline_fence_value_str(struct dma_fence *fence,
char *str, int size)
{
@@ -211,7 +206,6 @@ static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadlin
static const struct dma_fence_ops timeline_fence_ops = {
.get_driver_name = timeline_fence_get_driver_name,
.get_timeline_name = timeline_fence_get_timeline_name,
- .enable_signaling = timeline_fence_enable_signaling,
.signaled = timeline_fence_signaled,
.release = timeline_fence_release,
.fence_value_str = timeline_fence_value_str,
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 047c3cd2ceff..8ce1f074c2d3 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -27,15 +27,21 @@ MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is
struct udmabuf {
pgoff_t pagecount;
struct folio **folios;
+
+ /**
+ * Unlike folios, pinned_folios is only used for unpin.
+ * So, nr_pinned is not the same to pagecount, the pinned_folios
+ * only set each folio which already pinned when udmabuf_create.
+ * Note that, since a folio may be pinned multiple times, each folio
+ * can be added to pinned_folios multiple times, depending on how many
+ * times the folio has been pinned when create.
+ */
+ pgoff_t nr_pinned;
+ struct folio **pinned_folios;
+
struct sg_table *sg;
struct miscdevice *device;
pgoff_t *offsets;
- struct list_head unpin_list;
-};
-
-struct udmabuf_folio {
- struct folio *folio;
- struct list_head list;
};
static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
@@ -43,7 +49,8 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct udmabuf *ubuf = vma->vm_private_data;
pgoff_t pgoff = vmf->pgoff;
- unsigned long pfn;
+ unsigned long addr, pfn;
+ vm_fault_t ret;
if (pgoff >= ubuf->pagecount)
return VM_FAULT_SIGBUS;
@@ -51,7 +58,35 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
pfn = folio_pfn(ubuf->folios[pgoff]);
pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
- return vmf_insert_pfn(vma, vmf->address, pfn);
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
+ if (ret & VM_FAULT_ERROR)
+ return ret;
+
+ /* pre fault */
+ pgoff = vma->vm_pgoff;
+ addr = vma->vm_start;
+
+ for (; addr < vma->vm_end; pgoff++, addr += PAGE_SIZE) {
+ if (addr == vmf->address)
+ continue;
+
+ if (WARN_ON(pgoff >= ubuf->pagecount))
+ break;
+
+ pfn = folio_pfn(ubuf->folios[pgoff]);
+ pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
+
+ /**
+ * If the below vmf_insert_pfn() fails, we do not return an
+ * error here during this pre-fault step. However, an error
+ * will be returned if the failure occurs when the addr is
+ * truly accessed.
+ */
+ if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR)
+ break;
+ }
+
+ return ret;
}
static const struct vm_operations_struct udmabuf_vm_ops = {
@@ -74,21 +109,29 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
{
struct udmabuf *ubuf = buf->priv;
- struct page **pages;
+ unsigned long *pfns;
void *vaddr;
pgoff_t pg;
dma_resv_assert_held(buf->resv);
- pages = kmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
- if (!pages)
+ /**
+ * HVO may free tail pages, so just use pfn to map each folio
+ * into vmalloc area.
+ */
+ pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL);
+ if (!pfns)
return -ENOMEM;
- for (pg = 0; pg < ubuf->pagecount; pg++)
- pages[pg] = &ubuf->folios[pg]->page;
+ for (pg = 0; pg < ubuf->pagecount; pg++) {
+ unsigned long pfn = folio_pfn(ubuf->folios[pg]);
+
+ pfn += ubuf->offsets[pg] >> PAGE_SHIFT;
+ pfns[pg] = pfn;
+ }
- vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
- kfree(pages);
+ vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL);
+ kvfree(pfns);
if (!vaddr)
return -EINVAL;
@@ -159,34 +202,42 @@ static void unmap_udmabuf(struct dma_buf_attachment *at,
return put_sg_table(at->dev, sg, direction);
}
-static void unpin_all_folios(struct list_head *unpin_list)
+static void unpin_all_folios(struct udmabuf *ubuf)
{
- struct udmabuf_folio *ubuf_folio;
+ pgoff_t i;
- while (!list_empty(unpin_list)) {
- ubuf_folio = list_first_entry(unpin_list,
- struct udmabuf_folio, list);
- unpin_folio(ubuf_folio->folio);
+ for (i = 0; i < ubuf->nr_pinned; ++i)
+ unpin_folio(ubuf->pinned_folios[i]);
- list_del(&ubuf_folio->list);
- kfree(ubuf_folio);
- }
+ kvfree(ubuf->pinned_folios);
}
-static int add_to_unpin_list(struct list_head *unpin_list,
- struct folio *folio)
+static __always_inline int init_udmabuf(struct udmabuf *ubuf, pgoff_t pgcnt)
{
- struct udmabuf_folio *ubuf_folio;
+ ubuf->folios = kvmalloc_array(pgcnt, sizeof(*ubuf->folios), GFP_KERNEL);
+ if (!ubuf->folios)
+ return -ENOMEM;
- ubuf_folio = kzalloc(sizeof(*ubuf_folio), GFP_KERNEL);
- if (!ubuf_folio)
+ ubuf->offsets = kvcalloc(pgcnt, sizeof(*ubuf->offsets), GFP_KERNEL);
+ if (!ubuf->offsets)
+ return -ENOMEM;
+
+ ubuf->pinned_folios = kvmalloc_array(pgcnt,
+ sizeof(*ubuf->pinned_folios),
+ GFP_KERNEL);
+ if (!ubuf->pinned_folios)
return -ENOMEM;
- ubuf_folio->folio = folio;
- list_add_tail(&ubuf_folio->list, unpin_list);
return 0;
}
+static __always_inline void deinit_udmabuf(struct udmabuf *ubuf)
+{
+ unpin_all_folios(ubuf);
+ kvfree(ubuf->offsets);
+ kvfree(ubuf->folios);
+}
+
static void release_udmabuf(struct dma_buf *buf)
{
struct udmabuf *ubuf = buf->priv;
@@ -195,9 +246,7 @@ static void release_udmabuf(struct dma_buf *buf)
if (ubuf->sg)
put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
- unpin_all_folios(&ubuf->unpin_list);
- kfree(ubuf->offsets);
- kfree(ubuf->folios);
+ deinit_udmabuf(ubuf);
kfree(ubuf);
}
@@ -254,9 +303,6 @@ static int check_memfd_seals(struct file *memfd)
{
int seals;
- if (!memfd)
- return -EBADFD;
-
if (!shmem_file(memfd) && !is_file_hugepages(memfd))
return -EBADFD;
@@ -291,100 +337,116 @@ static int export_udmabuf(struct udmabuf *ubuf,
return dma_buf_fd(buf, flags);
}
+static long udmabuf_pin_folios(struct udmabuf *ubuf, struct file *memfd,
+ loff_t start, loff_t size, struct folio **folios)
+{
+ pgoff_t nr_pinned = ubuf->nr_pinned;
+ pgoff_t upgcnt = ubuf->pagecount;
+ u32 cur_folio, cur_pgcnt;
+ pgoff_t pgoff, pgcnt;
+ long nr_folios;
+ loff_t end;
+
+ pgcnt = size >> PAGE_SHIFT;
+ end = start + (pgcnt << PAGE_SHIFT) - 1;
+ nr_folios = memfd_pin_folios(memfd, start, end, folios, pgcnt, &pgoff);
+ if (nr_folios <= 0)
+ return nr_folios ? nr_folios : -EINVAL;
+
+ cur_pgcnt = 0;
+ for (cur_folio = 0; cur_folio < nr_folios; ++cur_folio) {
+ pgoff_t subpgoff = pgoff;
+ size_t fsize = folio_size(folios[cur_folio]);
+
+ ubuf->pinned_folios[nr_pinned++] = folios[cur_folio];
+
+ for (; subpgoff < fsize; subpgoff += PAGE_SIZE) {
+ ubuf->folios[upgcnt] = folios[cur_folio];
+ ubuf->offsets[upgcnt] = subpgoff;
+ ++upgcnt;
+
+ if (++cur_pgcnt >= pgcnt)
+ goto end;
+ }
+
+ /**
+ * In a given range, only the first subpage of the first folio
+ * has an offset, that is returned by memfd_pin_folios().
+ * The first subpages of other folios (in the range) have an
+ * offset of 0.
+ */
+ pgoff = 0;
+ }
+end:
+ ubuf->pagecount = upgcnt;
+ ubuf->nr_pinned = nr_pinned;
+ return 0;
+}
+
static long udmabuf_create(struct miscdevice *device,
struct udmabuf_create_list *head,
struct udmabuf_create_item *list)
{
- pgoff_t pgoff, pgcnt, pglimit, pgbuf = 0;
- long nr_folios, ret = -EINVAL;
- struct file *memfd = NULL;
- struct folio **folios;
+ unsigned long max_nr_folios = 0;
+ struct folio **folios = NULL;
+ pgoff_t pgcnt = 0, pglimit;
struct udmabuf *ubuf;
- u32 i, j, k, flags;
- loff_t end;
+ long ret = -EINVAL;
+ u32 i, flags;
ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
if (!ubuf)
return -ENOMEM;
- INIT_LIST_HEAD(&ubuf->unpin_list);
pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
for (i = 0; i < head->count; i++) {
- if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
- goto err;
- if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
- goto err;
- ubuf->pagecount += list[i].size >> PAGE_SHIFT;
- if (ubuf->pagecount > pglimit)
- goto err;
+ pgoff_t subpgcnt;
+
+ if (!PAGE_ALIGNED(list[i].offset))
+ goto err_noinit;
+ if (!PAGE_ALIGNED(list[i].size))
+ goto err_noinit;
+
+ subpgcnt = list[i].size >> PAGE_SHIFT;
+ pgcnt += subpgcnt;
+ if (pgcnt > pglimit)
+ goto err_noinit;
+
+ max_nr_folios = max_t(unsigned long, subpgcnt, max_nr_folios);
}
- if (!ubuf->pagecount)
- goto err;
+ if (!pgcnt)
+ goto err_noinit;
- ubuf->folios = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->folios),
- GFP_KERNEL);
- if (!ubuf->folios) {
- ret = -ENOMEM;
+ ret = init_udmabuf(ubuf, pgcnt);
+ if (ret)
goto err;
- }
- ubuf->offsets = kcalloc(ubuf->pagecount, sizeof(*ubuf->offsets),
- GFP_KERNEL);
- if (!ubuf->offsets) {
+
+ folios = kvmalloc_array(max_nr_folios, sizeof(*folios), GFP_KERNEL);
+ if (!folios) {
ret = -ENOMEM;
goto err;
}
- pgbuf = 0;
for (i = 0; i < head->count; i++) {
- memfd = fget(list[i].memfd);
- ret = check_memfd_seals(memfd);
- if (ret < 0)
- goto err;
+ struct file *memfd = fget(list[i].memfd);
- pgcnt = list[i].size >> PAGE_SHIFT;
- folios = kmalloc_array(pgcnt, sizeof(*folios), GFP_KERNEL);
- if (!folios) {
- ret = -ENOMEM;
+ if (!memfd) {
+ ret = -EBADFD;
goto err;
}
- end = list[i].offset + (pgcnt << PAGE_SHIFT) - 1;
- ret = memfd_pin_folios(memfd, list[i].offset, end,
- folios, pgcnt, &pgoff);
- if (ret <= 0) {
- kfree(folios);
- if (!ret)
- ret = -EINVAL;
+ ret = check_memfd_seals(memfd);
+ if (ret < 0) {
+ fput(memfd);
goto err;
}
- nr_folios = ret;
- pgoff >>= PAGE_SHIFT;
- for (j = 0, k = 0; j < pgcnt; j++) {
- ubuf->folios[pgbuf] = folios[k];
- ubuf->offsets[pgbuf] = pgoff << PAGE_SHIFT;
-
- if (j == 0 || ubuf->folios[pgbuf-1] != folios[k]) {
- ret = add_to_unpin_list(&ubuf->unpin_list,
- folios[k]);
- if (ret < 0) {
- kfree(folios);
- goto err;
- }
- }
-
- pgbuf++;
- if (++pgoff == folio_nr_pages(folios[k])) {
- pgoff = 0;
- if (++k == nr_folios)
- break;
- }
- }
-
- kfree(folios);
+ ret = udmabuf_pin_folios(ubuf, memfd, list[i].offset,
+ list[i].size, folios);
fput(memfd);
- memfd = NULL;
+ if (ret)
+ goto err;
}
flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0;
@@ -392,15 +454,14 @@ static long udmabuf_create(struct miscdevice *device,
if (ret < 0)
goto err;
+ kvfree(folios);
return ret;
err:
- if (memfd)
- fput(memfd);
- unpin_all_folios(&ubuf->unpin_list);
- kfree(ubuf->offsets);
- kfree(ubuf->folios);
+ deinit_udmabuf(ubuf);
+err_noinit:
kfree(ubuf);
+ kvfree(folios);
return ret;
}