summaryrefslogtreecommitdiff
path: root/drivers/tee/tee_shm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/tee/tee_shm.c')
-rw-r--r--drivers/tee/tee_shm.c335
1 files changed, 276 insertions, 59 deletions
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 27295bda3e0b..4a47de4bb2e5 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -4,15 +4,25 @@
*/
#include <linux/anon_inodes.h>
#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
#include <linux/idr.h>
+#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
#include "tee_private.h"
+struct tee_shm_dma_mem {
+ struct tee_shm shm;
+ dma_addr_t dma_addr;
+ struct page *page;
+};
+
static void shm_put_kernel_pages(struct page **pages, size_t page_count)
{
size_t n;
@@ -21,41 +31,12 @@ static void shm_put_kernel_pages(struct page **pages, size_t page_count)
put_page(pages[n]);
}
-static int shm_get_kernel_pages(unsigned long start, size_t page_count,
- struct page **pages)
+static void shm_get_kernel_pages(struct page **pages, size_t page_count)
{
size_t n;
- int rc;
-
- if (is_vmalloc_addr((void *)start)) {
- struct page *page;
-
- for (n = 0; n < page_count; n++) {
- page = vmalloc_to_page((void *)(start + PAGE_SIZE * n));
- if (!page)
- return -ENOMEM;
-
- get_page(page);
- pages[n] = page;
- }
- rc = page_count;
- } else {
- struct kvec *kiov;
-
- kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL);
- if (!kiov)
- return -ENOMEM;
- for (n = 0; n < page_count; n++) {
- kiov[n].iov_base = (void *)(start + n * PAGE_SIZE);
- kiov[n].iov_len = PAGE_SIZE;
- }
-
- rc = get_kernel_pages(kiov, page_count, 0, pages);
- kfree(kiov);
- }
-
- return rc;
+ for (n = 0; n < page_count; n++)
+ get_page(pages[n]);
}
static void release_registered_pages(struct tee_shm *shm)
@@ -72,7 +53,24 @@ static void release_registered_pages(struct tee_shm *shm)
static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
{
- if (shm->flags & TEE_SHM_POOL) {
+ void *p = shm;
+
+ if (shm->flags & TEE_SHM_DMA_MEM) {
+#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
+ struct tee_shm_dma_mem *dma_mem;
+
+ dma_mem = container_of(shm, struct tee_shm_dma_mem, shm);
+ p = dma_mem;
+ dma_free_pages(&teedev->dev, shm->size, dma_mem->page,
+ dma_mem->dma_addr, DMA_BIDIRECTIONAL);
+#endif
+ } else if (shm->flags & TEE_SHM_DMA_BUF) {
+ struct tee_shm_dmabuf_ref *ref;
+
+ ref = container_of(shm, struct tee_shm_dmabuf_ref, shm);
+ p = ref;
+ dma_buf_put(ref->dmabuf);
+ } else if (shm->flags & TEE_SHM_POOL) {
teedev->pool->ops->free(teedev->pool, shm);
} else if (shm->flags & TEE_SHM_DYNAMIC) {
int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
@@ -86,7 +84,7 @@ static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
teedev_ctx_put(shm->ctx);
- kfree(shm);
+ kfree(p);
tee_device_put(teedev);
}
@@ -196,7 +194,7 @@ struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size)
* tee_client_invoke_func(). The memory allocated is later freed with a
* call to tee_shm_free().
*
- * @returns a pointer to 'struct tee_shm'
+ * @returns a pointer to 'struct tee_shm' on success, and ERR_PTR on failure
*/
struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
{
@@ -206,6 +204,62 @@ struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
}
EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
+struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd)
+{
+ struct tee_shm_dmabuf_ref *ref;
+ int rc;
+
+ if (!tee_device_get(ctx->teedev))
+ return ERR_PTR(-EINVAL);
+
+ teedev_ctx_get(ctx);
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref) {
+ rc = -ENOMEM;
+ goto err_put_tee;
+ }
+
+ refcount_set(&ref->shm.refcount, 1);
+ ref->shm.ctx = ctx;
+ ref->shm.id = -1;
+ ref->shm.flags = TEE_SHM_DMA_BUF;
+
+ ref->dmabuf = dma_buf_get(fd);
+ if (IS_ERR(ref->dmabuf)) {
+ rc = PTR_ERR(ref->dmabuf);
+ goto err_kfree_ref;
+ }
+
+ rc = tee_heap_update_from_dma_buf(ctx->teedev, ref->dmabuf,
+ &ref->offset, &ref->shm,
+ &ref->parent_shm);
+ if (rc)
+ goto err_put_dmabuf;
+
+ mutex_lock(&ref->shm.ctx->teedev->mutex);
+ ref->shm.id = idr_alloc(&ref->shm.ctx->teedev->idr, &ref->shm,
+ 1, 0, GFP_KERNEL);
+ mutex_unlock(&ref->shm.ctx->teedev->mutex);
+ if (ref->shm.id < 0) {
+ rc = ref->shm.id;
+ goto err_put_dmabuf;
+ }
+
+ return &ref->shm;
+
+err_put_dmabuf:
+ dma_buf_put(ref->dmabuf);
+err_kfree_ref:
+ kfree(ref);
+err_put_tee:
+ teedev_ctx_put(ctx);
+ tee_device_put(ctx->teedev);
+
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(tee_shm_register_fd);
+
/**
* tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared
* kernel buffer
@@ -230,14 +284,146 @@ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
}
EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
+#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
+/**
+ * tee_shm_alloc_dma_mem() - Allocate DMA memory as shared memory object
+ * @ctx: Context that allocates the shared memory
+ * @page_count: Number of pages
+ *
+ * The allocated memory is expected to be lent (made inaccessible to the
+ * kernel) to the TEE while it's used and returned (accessible to the
+ * kernel again) before it's freed.
+ *
+ * This function should normally only be used internally in the TEE
+ * drivers.
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
+ size_t page_count)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct tee_shm_dma_mem *dma_mem;
+ dma_addr_t dma_addr;
+ struct page *page;
+
+ if (!tee_device_get(teedev))
+ return ERR_PTR(-EINVAL);
+
+ page = dma_alloc_pages(&teedev->dev, page_count * PAGE_SIZE,
+ &dma_addr, DMA_BIDIRECTIONAL, GFP_KERNEL);
+ if (!page)
+ goto err_put_teedev;
+
+ dma_mem = kzalloc(sizeof(*dma_mem), GFP_KERNEL);
+ if (!dma_mem)
+ goto err_free_pages;
+
+ refcount_set(&dma_mem->shm.refcount, 1);
+ dma_mem->shm.ctx = ctx;
+ dma_mem->shm.paddr = page_to_phys(page);
+ dma_mem->dma_addr = dma_addr;
+ dma_mem->page = page;
+ dma_mem->shm.size = page_count * PAGE_SIZE;
+ dma_mem->shm.flags = TEE_SHM_DMA_MEM;
+
+ teedev_ctx_get(ctx);
+
+ return &dma_mem->shm;
+
+err_free_pages:
+ dma_free_pages(&teedev->dev, page_count * PAGE_SIZE, page, dma_addr,
+ DMA_BIDIRECTIONAL);
+err_put_teedev:
+ tee_device_put(teedev);
+
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem);
+#else
+struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
+ size_t page_count)
+{
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem);
+#endif
+
+int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
+ int (*shm_register)(struct tee_context *ctx,
+ struct tee_shm *shm,
+ struct page **pages,
+ size_t num_pages,
+ unsigned long start))
+{
+ size_t nr_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
+ struct page **pages;
+ unsigned int i;
+ int rc = 0;
+
+ /*
+ * Ignore alignment since this is already going to be page aligned
+ * and there's no need for any larger alignment.
+ */
+ shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!shm->kaddr)
+ return -ENOMEM;
+
+ shm->paddr = virt_to_phys(shm->kaddr);
+ shm->size = nr_pages * PAGE_SIZE;
+
+ pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages) {
+ rc = -ENOMEM;
+ goto err_pages;
+ }
+
+ for (i = 0; i < nr_pages; i++)
+ pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE);
+
+ shm->pages = pages;
+ shm->num_pages = nr_pages;
+
+ if (shm_register) {
+ rc = shm_register(shm->ctx, shm, pages, nr_pages,
+ (unsigned long)shm->kaddr);
+ if (rc)
+ goto err_kfree;
+ }
+
+ return 0;
+err_kfree:
+ kfree(pages);
+err_pages:
+ free_pages_exact(shm->kaddr, shm->size);
+ shm->kaddr = NULL;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tee_dyn_shm_alloc_helper);
+
+void tee_dyn_shm_free_helper(struct tee_shm *shm,
+ int (*shm_unregister)(struct tee_context *ctx,
+ struct tee_shm *shm))
+{
+ if (shm_unregister)
+ shm_unregister(shm->ctx, shm);
+ free_pages_exact(shm->kaddr, shm->size);
+ shm->kaddr = NULL;
+ kfree(shm->pages);
+ shm->pages = NULL;
+}
+EXPORT_SYMBOL_GPL(tee_dyn_shm_free_helper);
+
static struct tee_shm *
-register_shm_helper(struct tee_context *ctx, unsigned long addr,
- size_t length, u32 flags, int id)
+register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags,
+ int id)
{
struct tee_device *teedev = ctx->teedev;
struct tee_shm *shm;
- unsigned long start;
- size_t num_pages;
+ unsigned long start, addr;
+ size_t num_pages, off;
+ ssize_t len;
void *ret;
int rc;
@@ -262,31 +448,46 @@ register_shm_helper(struct tee_context *ctx, unsigned long addr,
shm->flags = flags;
shm->ctx = ctx;
shm->id = id;
- addr = untagged_addr(addr);
+ addr = untagged_addr((unsigned long)iter_iov_addr(iter));
start = rounddown(addr, PAGE_SIZE);
- shm->offset = addr - start;
- shm->size = length;
- num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
+ num_pages = iov_iter_npages(iter, INT_MAX);
+ if (!num_pages) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_ctx_put;
+ }
+
shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
if (!shm->pages) {
ret = ERR_PTR(-ENOMEM);
goto err_free_shm;
}
- if (flags & TEE_SHM_USER_MAPPED)
- rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE,
- shm->pages);
- else
- rc = shm_get_kernel_pages(start, num_pages, shm->pages);
- if (rc > 0)
- shm->num_pages = rc;
- if (rc != num_pages) {
- if (rc >= 0)
- rc = -ENOMEM;
- ret = ERR_PTR(rc);
+ len = iov_iter_extract_pages(iter, &shm->pages, LONG_MAX, num_pages, 0,
+ &off);
+ if (unlikely(len <= 0)) {
+ ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM);
+ goto err_free_shm_pages;
+ } else if (DIV_ROUND_UP(len + off, PAGE_SIZE) != num_pages) {
+ /*
+ * If we only got a few pages, update to release the
+ * correct amount below.
+ */
+ shm->num_pages = len / PAGE_SIZE;
+ ret = ERR_PTR(-ENOMEM);
goto err_put_shm_pages;
}
+ /*
+ * iov_iter_extract_kvec_pages does not get reference on the pages,
+ * get a reference on them.
+ */
+ if (iov_iter_is_kvec(iter))
+ shm_get_kernel_pages(shm->pages, num_pages);
+
+ shm->offset = off;
+ shm->size = len;
+ shm->num_pages = num_pages;
+
rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
shm->num_pages, start);
if (rc) {
@@ -296,10 +497,11 @@ register_shm_helper(struct tee_context *ctx, unsigned long addr,
return shm;
err_put_shm_pages:
- if (flags & TEE_SHM_USER_MAPPED)
+ if (!iov_iter_is_kvec(iter))
unpin_user_pages(shm->pages, shm->num_pages);
else
shm_put_kernel_pages(shm->pages, shm->num_pages);
+err_free_shm_pages:
kfree(shm->pages);
err_free_shm:
kfree(shm);
@@ -324,6 +526,7 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
u32 flags = TEE_SHM_USER_MAPPED | TEE_SHM_DYNAMIC;
struct tee_device *teedev = ctx->teedev;
struct tee_shm *shm;
+ struct iov_iter iter;
void *ret;
int id;
@@ -336,7 +539,8 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
if (id < 0)
return ERR_PTR(id);
- shm = register_shm_helper(ctx, addr, length, flags, id);
+ iov_iter_ubuf(&iter, ITER_DEST, (void __user *)addr, length);
+ shm = register_shm_helper(ctx, &iter, flags, id);
if (IS_ERR(shm)) {
mutex_lock(&teedev->mutex);
idr_remove(&teedev->idr, id);
@@ -369,8 +573,14 @@ struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
void *addr, size_t length)
{
u32 flags = TEE_SHM_DYNAMIC;
+ struct kvec kvec;
+ struct iov_iter iter;
+
+ kvec.iov_base = addr;
+ kvec.iov_len = length;
+ iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, length);
- return register_shm_helper(ctx, (unsigned long)addr, length, flags, -1);
+ return register_shm_helper(ctx, &iter, flags, -1);
}
EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf);
@@ -388,6 +598,9 @@ static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
/* Refuse sharing shared memory provided by application */
if (shm->flags & TEE_SHM_USER_MAPPED)
return -EINVAL;
+ /* Refuse sharing registered DMA_bufs with the application */
+ if (shm->flags & TEE_SHM_DMA_BUF)
+ return -EINVAL;
/* check for overflowing the buffer's size */
if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
@@ -506,9 +719,13 @@ EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
*/
void tee_shm_put(struct tee_shm *shm)
{
- struct tee_device *teedev = shm->ctx->teedev;
+ struct tee_device *teedev;
bool do_release = false;
+ if (!shm || !shm->ctx || !shm->ctx->teedev)
+ return;
+
+ teedev = shm->ctx->teedev;
mutex_lock(&teedev->mutex);
if (refcount_dec_and_test(&shm->refcount)) {
/*