diff options
Diffstat (limited to 'drivers/gpu/drm/exynos/exynos_drm_gem.c')
| -rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_gem.c | 844 |
1 files changed, 255 insertions, 589 deletions
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 24c22a8c3364..b9b2f000072d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -1,108 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* exynos_drm_gem.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * Author: Inki Dae <inki.dae@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. */ -#include <drm/drmP.h> +#include <linux/dma-buf.h> #include <linux/shmem_fs.h> +#include <linux/module.h> + +#include <drm/drm_dumb_buffers.h> +#include <drm/drm_prime.h> +#include <drm/drm_print.h> +#include <drm/drm_vma_manager.h> #include <drm/exynos_drm.h> #include "exynos_drm_drv.h" #include "exynos_drm_gem.h" -#include "exynos_drm_buf.h" -static unsigned int convert_to_vm_err_msg(int msg) -{ - unsigned int out_msg; - - switch (msg) { - case 0: - case -ERESTARTSYS: - case -EINTR: - out_msg = VM_FAULT_NOPAGE; - break; - - case -ENOMEM: - out_msg = VM_FAULT_OOM; - break; - - default: - out_msg = VM_FAULT_SIGBUS; - break; - } +MODULE_IMPORT_NS("DMA_BUF"); - return out_msg; -} +static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); -static int check_gem_flags(unsigned int flags) +static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap) { - if (flags & ~(EXYNOS_BO_MASK)) { - DRM_ERROR("invalid flags.\n"); - return -EINVAL; - } + struct drm_device *dev = exynos_gem->base.dev; + unsigned long attr = 0; - return 0; -} + if (exynos_gem->dma_addr) { + DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n"); + return 0; + } -static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj, - struct vm_area_struct *vma) -{ - DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags); + /* + * if EXYNOS_BO_CONTIG, fully physically contiguous memory + * region will be allocated else physically contiguous + * as possible. + */ + if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG)) + attr |= DMA_ATTR_FORCE_CONTIGUOUS; - /* non-cachable as default. */ - if (obj->flags & EXYNOS_BO_CACHABLE) - vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - else if (obj->flags & EXYNOS_BO_WC) - vma->vm_page_prot = - pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); - else - vma->vm_page_prot = - pgprot_noncached(vm_get_page_prot(vma->vm_flags)); -} + /* + * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping + * else cachable mapping. + */ + if (exynos_gem->flags & EXYNOS_BO_WC || + !(exynos_gem->flags & EXYNOS_BO_CACHABLE)) + attr |= DMA_ATTR_WRITE_COMBINE; + + /* FBDev emulation requires kernel mapping */ + if (!kvmap) + attr |= DMA_ATTR_NO_KERNEL_MAPPING; + + exynos_gem->dma_attrs = attr; + exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size, + &exynos_gem->dma_addr, GFP_KERNEL, + exynos_gem->dma_attrs); + if (!exynos_gem->cookie) { + DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n"); + return -ENOMEM; + } -static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) -{ - /* TODO */ + if (kvmap) + exynos_gem->kvaddr = exynos_gem->cookie; - return roundup(size, PAGE_SIZE); + DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n", + (unsigned long)exynos_gem->dma_addr, exynos_gem->size); + return 0; } -static int exynos_drm_gem_map_buf(struct drm_gem_object *obj, - struct vm_area_struct *vma, - unsigned long f_vaddr, - pgoff_t page_offset) +static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem) { - struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); - struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; - struct scatterlist *sgl; - unsigned long pfn; - int i; - - if (!buf->sgt) - return -EINTR; - - if (page_offset >= (buf->size >> PAGE_SHIFT)) { - DRM_ERROR("invalid page offset\n"); - return -EINVAL; - } + struct drm_device *dev = exynos_gem->base.dev; - sgl = buf->sgt->sgl; - for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) { - if (page_offset < (sgl->length >> PAGE_SHIFT)) - break; - page_offset -= (sgl->length >> PAGE_SHIFT); + if (!exynos_gem->dma_addr) { + DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n"); + return; } - pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset; + DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n", + (unsigned long)exynos_gem->dma_addr, exynos_gem->size); - return vm_insert_mixed(vma, f_vaddr, pfn); + dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, + (dma_addr_t)exynos_gem->dma_addr, + exynos_gem->dma_attrs); } static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, @@ -119,23 +101,20 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, if (ret) return ret; - DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle); + DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle); /* drop reference from allocate - handle holds it now. */ - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put(obj); return 0; } -void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) +void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem) { - struct drm_gem_object *obj; - struct exynos_drm_gem_buf *buf; + struct drm_gem_object *obj = &exynos_gem->base; - obj = &exynos_gem_obj->base; - buf = exynos_gem_obj->buffer; - - DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); + DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n", + obj->handle_count); /* * do not release memory region from exporter. @@ -144,651 +123,338 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) * once dmabuf's refcount becomes 0. */ if (obj->import_attach) - goto out; - - exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf); - -out: - exynos_drm_fini_buf(obj->dev, buf); - exynos_gem_obj->buffer = NULL; - - if (obj->map_list.map) - drm_gem_free_mmap_offset(obj); + drm_prime_gem_destroy(obj, exynos_gem->sgt); + else + exynos_drm_free_buf(exynos_gem); /* release file pointer to gem object. */ drm_gem_object_release(obj); - kfree(exynos_gem_obj); - exynos_gem_obj = NULL; + kfree(exynos_gem); } -unsigned long exynos_drm_gem_get_size(struct drm_device *dev, - unsigned int gem_handle, - struct drm_file *file_priv) -{ - struct exynos_drm_gem_obj *exynos_gem_obj; - struct drm_gem_object *obj; - - obj = drm_gem_object_lookup(dev, file_priv, gem_handle); - if (!obj) { - DRM_ERROR("failed to lookup gem object.\n"); - return 0; - } - - exynos_gem_obj = to_exynos_gem_obj(obj); - - drm_gem_object_unreference_unlocked(obj); - - return exynos_gem_obj->buffer->size; -} +static const struct vm_operations_struct exynos_drm_gem_vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; +static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = { + .free = exynos_drm_gem_free_object, + .get_sg_table = exynos_drm_gem_prime_get_sg_table, + .mmap = exynos_drm_gem_mmap, + .vm_ops = &exynos_drm_gem_vm_ops, +}; -struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, - unsigned long size) +static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev, + unsigned long size) { - struct exynos_drm_gem_obj *exynos_gem_obj; + struct exynos_drm_gem *exynos_gem; struct drm_gem_object *obj; int ret; - exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); - if (!exynos_gem_obj) { - DRM_ERROR("failed to allocate exynos gem object\n"); - return NULL; - } + exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL); + if (!exynos_gem) + return ERR_PTR(-ENOMEM); - exynos_gem_obj->size = size; - obj = &exynos_gem_obj->base; + exynos_gem->size = size; + obj = &exynos_gem->base; + + obj->funcs = &exynos_drm_gem_object_funcs; ret = drm_gem_object_init(dev, obj, size); if (ret < 0) { - DRM_ERROR("failed to initialize gem object\n"); - kfree(exynos_gem_obj); - return NULL; + DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n"); + kfree(exynos_gem); + return ERR_PTR(ret); + } + + ret = drm_gem_create_mmap_offset(obj); + if (ret < 0) { + drm_gem_object_release(obj); + kfree(exynos_gem); + return ERR_PTR(ret); } - DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); + DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %p\n", obj->filp); - return exynos_gem_obj; + return exynos_gem; } -struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, - unsigned int flags, - unsigned long size) +struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev, + unsigned int flags, + unsigned long size, + bool kvmap) { - struct exynos_drm_gem_obj *exynos_gem_obj; - struct exynos_drm_gem_buf *buf; + struct exynos_drm_gem *exynos_gem; int ret; - if (!size) { - DRM_ERROR("invalid size.\n"); + if (flags & ~(EXYNOS_BO_MASK)) { + DRM_DEV_ERROR(dev->dev, + "invalid GEM buffer flags: %u\n", flags); return ERR_PTR(-EINVAL); } - size = roundup_gem_size(size, flags); + if (!size) { + DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size); + return ERR_PTR(-EINVAL); + } - ret = check_gem_flags(flags); - if (ret) - return ERR_PTR(ret); + size = roundup(size, PAGE_SIZE); - buf = exynos_drm_init_buf(dev, size); - if (!buf) - return ERR_PTR(-ENOMEM); + exynos_gem = exynos_drm_gem_init(dev, size); + if (IS_ERR(exynos_gem)) + return exynos_gem; - exynos_gem_obj = exynos_drm_gem_init(dev, size); - if (!exynos_gem_obj) { - ret = -ENOMEM; - goto err_fini_buf; + if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) { + /* + * when no IOMMU is available, all allocated buffers are + * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag + */ + flags &= ~EXYNOS_BO_NONCONTIG; + DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n"); } - exynos_gem_obj->buffer = buf; - /* set memory type and cache attribute from user side. */ - exynos_gem_obj->flags = flags; + exynos_gem->flags = flags; - ret = exynos_drm_alloc_buf(dev, buf, flags); - if (ret < 0) - goto err_gem_fini; - - return exynos_gem_obj; + ret = exynos_drm_alloc_buf(exynos_gem, kvmap); + if (ret < 0) { + drm_gem_object_release(&exynos_gem->base); + kfree(exynos_gem); + return ERR_PTR(ret); + } -err_gem_fini: - drm_gem_object_release(&exynos_gem_obj->base); - kfree(exynos_gem_obj); -err_fini_buf: - exynos_drm_fini_buf(dev, buf); - return ERR_PTR(ret); + return exynos_gem; } int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_exynos_gem_create *args = data; - struct exynos_drm_gem_obj *exynos_gem_obj; + struct exynos_drm_gem *exynos_gem; int ret; - exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size); - if (IS_ERR(exynos_gem_obj)) - return PTR_ERR(exynos_gem_obj); + exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size, false); + if (IS_ERR(exynos_gem)) + return PTR_ERR(exynos_gem); - ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv, - &args->handle); + ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv, + &args->handle); if (ret) { - exynos_drm_gem_destroy(exynos_gem_obj); + exynos_drm_gem_destroy(exynos_gem); return ret; } return 0; } -dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, - unsigned int gem_handle, - struct drm_file *filp) +int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct exynos_drm_gem_obj *exynos_gem_obj; - struct drm_gem_object *obj; - - obj = drm_gem_object_lookup(dev, filp, gem_handle); - if (!obj) { - DRM_ERROR("failed to lookup gem object.\n"); - return ERR_PTR(-EINVAL); - } - - exynos_gem_obj = to_exynos_gem_obj(obj); + struct drm_exynos_gem_map *args = data; - return &exynos_gem_obj->buffer->dma_addr; + return drm_gem_dumb_map_offset(file_priv, dev, args->handle, + &args->offset); } -void exynos_drm_gem_put_dma_addr(struct drm_device *dev, - unsigned int gem_handle, - struct drm_file *filp) +struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp, + unsigned int gem_handle) { - struct exynos_drm_gem_obj *exynos_gem_obj; struct drm_gem_object *obj; - obj = drm_gem_object_lookup(dev, filp, gem_handle); - if (!obj) { - DRM_ERROR("failed to lookup gem object.\n"); - return; - } - - exynos_gem_obj = to_exynos_gem_obj(obj); - - drm_gem_object_unreference_unlocked(obj); - - /* - * decrease obj->refcount one more time because we has already - * increased it at exynos_drm_gem_get_dma_addr(). - */ - drm_gem_object_unreference_unlocked(obj); -} - -int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_exynos_gem_map_off *args = data; - - DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n", - args->handle, (unsigned long)args->offset); - - if (!(dev->driver->driver_features & DRIVER_GEM)) { - DRM_ERROR("does not support GEM.\n"); - return -ENODEV; - } - - return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle, - &args->offset); -} - -static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev, - struct file *filp) -{ - struct drm_file *file_priv; - - /* find current process's drm_file from filelist. */ - list_for_each_entry(file_priv, &drm_dev->filelist, lhead) - if (file_priv->filp == filp) - return file_priv; - - WARN_ON(1); - - return ERR_PTR(-EFAULT); + obj = drm_gem_object_lookup(filp, gem_handle); + if (!obj) + return NULL; + return to_exynos_gem(obj); } -static int exynos_drm_gem_mmap_buffer(struct file *filp, +static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem, struct vm_area_struct *vma) { - struct drm_gem_object *obj = filp->private_data; - struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); - struct drm_device *drm_dev = obj->dev; - struct exynos_drm_gem_buf *buffer; - struct drm_file *file_priv; + struct drm_device *drm_dev = exynos_gem->base.dev; unsigned long vm_size; int ret; - vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; - vma->vm_private_data = obj; - vma->vm_ops = drm_dev->driver->gem_vm_ops; - - /* restore it to driver's fops. */ - filp->f_op = fops_get(drm_dev->driver->fops); - - file_priv = exynos_drm_find_drm_file(drm_dev, filp); - if (IS_ERR(file_priv)) - return PTR_ERR(file_priv); - - /* restore it to drm_file. */ - filp->private_data = file_priv; - - update_vm_cache_attr(exynos_gem_obj, vma); + vm_flags_clear(vma, VM_PFNMAP); + vma->vm_pgoff = 0; vm_size = vma->vm_end - vma->vm_start; - /* - * a buffer contains information to physically continuous memory - * allocated by user request or at framebuffer creation. - */ - buffer = exynos_gem_obj->buffer; - /* check if user-requested size is valid. */ - if (vm_size > buffer->size) + if (vm_size > exynos_gem->size) return -EINVAL; - ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages, - buffer->dma_addr, buffer->size, - &buffer->dma_attrs); + ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie, + exynos_gem->dma_addr, exynos_gem->size, + exynos_gem->dma_attrs); if (ret < 0) { DRM_ERROR("failed to mmap.\n"); return ret; } - /* - * take a reference to this mapping of the object. And this reference - * is unreferenced by the corresponding vm_close call. - */ - drm_gem_object_reference(obj); - - drm_vm_open_locked(drm_dev, vma); - - return 0; -} - -static const struct file_operations exynos_drm_gem_fops = { - .mmap = exynos_drm_gem_mmap_buffer, -}; - -int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_exynos_gem_mmap *args = data; - struct drm_gem_object *obj; - unsigned long addr; - - if (!(dev->driver->driver_features & DRIVER_GEM)) { - DRM_ERROR("does not support GEM.\n"); - return -ENODEV; - } - - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (!obj) { - DRM_ERROR("failed to lookup gem object.\n"); - return -EINVAL; - } - - /* - * We have to use gem object and its fops for specific mmaper, - * but vm_mmap() can deliver only filp. So we have to change - * filp->f_op and filp->private_data temporarily, then restore - * again. So it is important to keep lock until restoration the - * settings to prevent others from misuse of filp->f_op or - * filp->private_data. - */ - mutex_lock(&dev->struct_mutex); - - /* - * Set specific mmper's fops. And it will be restored by - * exynos_drm_gem_mmap_buffer to dev->driver->fops. - * This is used to call specific mapper temporarily. - */ - file_priv->filp->f_op = &exynos_drm_gem_fops; - - /* - * Set gem object to private_data so that specific mmaper - * can get the gem object. And it will be restored by - * exynos_drm_gem_mmap_buffer to drm_file. - */ - file_priv->filp->private_data = obj; - - addr = vm_mmap(file_priv->filp, 0, args->size, - PROT_READ | PROT_WRITE, MAP_SHARED, 0); - - drm_gem_object_unreference(obj); - - if (IS_ERR_VALUE(addr)) { - /* check filp->f_op, filp->private_data are restored */ - if (file_priv->filp->f_op == &exynos_drm_gem_fops) { - file_priv->filp->f_op = fops_get(dev->driver->fops); - file_priv->filp->private_data = file_priv; - } - mutex_unlock(&dev->struct_mutex); - return (int)addr; - } - - mutex_unlock(&dev->struct_mutex); - - args->mapped = addr; - - DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped); - return 0; } int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ struct exynos_drm_gem_obj *exynos_gem_obj; +{ + struct exynos_drm_gem *exynos_gem; struct drm_exynos_gem_info *args = data; struct drm_gem_object *obj; - mutex_lock(&dev->struct_mutex); - - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = drm_gem_object_lookup(file_priv, args->handle); if (!obj) { - DRM_ERROR("failed to lookup gem object.\n"); - mutex_unlock(&dev->struct_mutex); + DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n"); return -EINVAL; } - exynos_gem_obj = to_exynos_gem_obj(obj); - - args->flags = exynos_gem_obj->flags; - args->size = exynos_gem_obj->size; - - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma) -{ - struct vm_area_struct *vma_copy; - - vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL); - if (!vma_copy) - return NULL; - - if (vma->vm_ops && vma->vm_ops->open) - vma->vm_ops->open(vma); - - if (vma->vm_file) - get_file(vma->vm_file); - - memcpy(vma_copy, vma, sizeof(*vma)); - - vma_copy->vm_mm = NULL; - vma_copy->vm_next = NULL; - vma_copy->vm_prev = NULL; - - return vma_copy; -} - -void exynos_gem_put_vma(struct vm_area_struct *vma) -{ - if (!vma) - return; - - if (vma->vm_ops && vma->vm_ops->close) - vma->vm_ops->close(vma); - - if (vma->vm_file) - fput(vma->vm_file); - - kfree(vma); -} - -int exynos_gem_get_pages_from_userptr(unsigned long start, - unsigned int npages, - struct page **pages, - struct vm_area_struct *vma) -{ - int get_npages; - - /* the memory region mmaped with VM_PFNMAP. */ - if (vma_is_io(vma)) { - unsigned int i; - - for (i = 0; i < npages; ++i, start += PAGE_SIZE) { - unsigned long pfn; - int ret = follow_pfn(vma, start, &pfn); - if (ret) - return ret; - - pages[i] = pfn_to_page(pfn); - } - - if (i != npages) { - DRM_ERROR("failed to get user_pages.\n"); - return -EINVAL; - } - - return 0; - } - - get_npages = get_user_pages(current, current->mm, start, - npages, 1, 1, pages, NULL); - get_npages = max(get_npages, 0); - if (get_npages != npages) { - DRM_ERROR("failed to get user_pages.\n"); - while (get_npages) - put_page(pages[--get_npages]); - return -EFAULT; - } - - return 0; -} - -void exynos_gem_put_pages_to_userptr(struct page **pages, - unsigned int npages, - struct vm_area_struct *vma) -{ - if (!vma_is_io(vma)) { - unsigned int i; - - for (i = 0; i < npages; i++) { - set_page_dirty_lock(pages[i]); - - /* - * undo the reference we took when populating - * the table. - */ - put_page(pages[i]); - } - } -} - -int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, - struct sg_table *sgt, - enum dma_data_direction dir) -{ - int nents; - - mutex_lock(&drm_dev->struct_mutex); - - nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); - if (!nents) { - DRM_ERROR("failed to map sgl with dma.\n"); - mutex_unlock(&drm_dev->struct_mutex); - return nents; - } + exynos_gem = to_exynos_gem(obj); - mutex_unlock(&drm_dev->struct_mutex); - return 0; -} + args->flags = exynos_gem->flags; + args->size = exynos_gem->size; -void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev, - struct sg_table *sgt, - enum dma_data_direction dir) -{ - dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); -} + drm_gem_object_put(obj); -int exynos_drm_gem_init_object(struct drm_gem_object *obj) -{ return 0; } void exynos_drm_gem_free_object(struct drm_gem_object *obj) { - struct exynos_drm_gem_obj *exynos_gem_obj; - struct exynos_drm_gem_buf *buf; - - exynos_gem_obj = to_exynos_gem_obj(obj); - buf = exynos_gem_obj->buffer; - - if (obj->import_attach) - drm_prime_gem_destroy(obj, buf->sgt); - - exynos_drm_gem_destroy(to_exynos_gem_obj(obj)); + exynos_drm_gem_destroy(to_exynos_gem(obj)); } int exynos_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) { - struct exynos_drm_gem_obj *exynos_gem_obj; + struct exynos_drm_gem *exynos_gem; + unsigned int flags; int ret; + ret = drm_mode_size_dumb(dev, args, 0, 0); + if (ret) + return ret; + /* - * alocate memory to be used for framebuffer. + * allocate memory to be used for framebuffer. * - this callback would be called by user application * with DRM_IOCTL_MODE_CREATE_DUMB command. */ - args->pitch = args->width * ((args->bpp + 7) / 8); - args->size = args->pitch * args->height; + if (is_drm_iommu_supported(dev)) + flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC; + else + flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC; - exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG | - EXYNOS_BO_WC, args->size); - if (IS_ERR(exynos_gem_obj)) - return PTR_ERR(exynos_gem_obj); + exynos_gem = exynos_drm_gem_create(dev, flags, args->size, false); + if (IS_ERR(exynos_gem)) { + dev_warn(dev->dev, "FB allocation failed.\n"); + return PTR_ERR(exynos_gem); + } - ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv, - &args->handle); + ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv, + &args->handle); if (ret) { - exynos_drm_gem_destroy(exynos_gem_obj); + exynos_drm_gem_destroy(exynos_gem); return ret; } return 0; } -int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *dev, uint32_t handle, - uint64_t *offset) +static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { - struct drm_gem_object *obj; - int ret = 0; + struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); + int ret; - mutex_lock(&dev->struct_mutex); + if (obj->import_attach) + return dma_buf_mmap(obj->dma_buf, vma, 0); - /* - * get offset of memory allocated for drm framebuffer. - * - this callback would be called by user application - * with DRM_IOCTL_MODE_MAP_DUMB command. - */ + vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP); - obj = drm_gem_object_lookup(dev, file_priv, handle); - if (!obj) { - DRM_ERROR("failed to lookup gem object.\n"); - ret = -EINVAL; - goto unlock; - } + DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n", + exynos_gem->flags); - if (!obj->map_list.map) { - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto out; - } + /* non-cachable as default. */ + if (exynos_gem->flags & EXYNOS_BO_CACHABLE) + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + else if (exynos_gem->flags & EXYNOS_BO_WC) + vma->vm_page_prot = + pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + else + vma->vm_page_prot = + pgprot_noncached(vm_get_page_prot(vma->vm_flags)); - *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; - DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); + ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma); + if (ret) + goto err_close_vm; -out: - drm_gem_object_unreference(obj); -unlock: - mutex_unlock(&dev->struct_mutex); return ret; -} -int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - unsigned int handle) -{ - int ret; +err_close_vm: + drm_gem_vm_close(vma); - /* - * obj->refcount and obj->handle_count are decreased and - * if both them are 0 then exynos_drm_gem_free_object() - * would be called by callback to release resources. - */ - ret = drm_gem_handle_delete(file_priv, handle); - if (ret < 0) { - DRM_ERROR("failed to delete drm_gem_handle.\n"); - return ret; - } + return ret; +} - return 0; +/* low-level interface prime helpers */ +struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev)); } -int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj) { - struct drm_gem_object *obj = vma->vm_private_data; - struct drm_device *dev = obj->dev; - unsigned long f_vaddr; - pgoff_t page_offset; + struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); + struct drm_device *drm_dev = obj->dev; + struct sg_table *sgt; int ret; - page_offset = ((unsigned long)vmf->virtual_address - - vma->vm_start) >> PAGE_SHIFT; - f_vaddr = (unsigned long)vmf->virtual_address; - - mutex_lock(&dev->struct_mutex); - - ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset); - if (ret < 0) - DRM_ERROR("failed to map a buffer with user.\n"); + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return ERR_PTR(-ENOMEM); - mutex_unlock(&dev->struct_mutex); + ret = dma_get_sgtable_attrs(to_dma_dev(drm_dev), sgt, exynos_gem->cookie, + exynos_gem->dma_addr, exynos_gem->size, + exynos_gem->dma_attrs); + if (ret) { + DRM_ERROR("failed to get sgtable, %d\n", ret); + kfree(sgt); + return ERR_PTR(ret); + } - return convert_to_vm_err_msg(ret); + return sgt; } -int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) +struct drm_gem_object * +exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) { - struct exynos_drm_gem_obj *exynos_gem_obj; - struct drm_gem_object *obj; - int ret; - - /* set vm_area_struct. */ - ret = drm_gem_mmap(filp, vma); - if (ret < 0) { - DRM_ERROR("failed to mmap.\n"); - return ret; - } + struct exynos_drm_gem *exynos_gem; - obj = vma->vm_private_data; - exynos_gem_obj = to_exynos_gem_obj(obj); - - ret = check_gem_flags(exynos_gem_obj->flags); - if (ret) { - drm_gem_vm_close(vma); - drm_gem_free_mmap_offset(obj); - return ret; + /* check if the entries in the sg_table are contiguous */ + if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) { + DRM_ERROR("buffer chunks must be mapped contiguously"); + return ERR_PTR(-EINVAL); } - vma->vm_flags &= ~VM_PFNMAP; - vma->vm_flags |= VM_MIXEDMAP; + exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size); + if (IS_ERR(exynos_gem)) + return ERR_CAST(exynos_gem); - update_vm_cache_attr(exynos_gem_obj, vma); + /* + * Buffer has been mapped as contiguous into DMA address space, + * but if there is IOMMU, it can be either CONTIG or NONCONTIG. + * We assume a simplified logic below: + */ + if (is_drm_iommu_supported(dev)) + exynos_gem->flags |= EXYNOS_BO_NONCONTIG; + else + exynos_gem->flags |= EXYNOS_BO_CONTIG; - return ret; + exynos_gem->dma_addr = sg_dma_address(sgt->sgl); + exynos_gem->sgt = sgt; + return &exynos_gem->base; } |
