summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/tegra/gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/tegra/gem.c')
-rw-r--r--drivers/gpu/drm/tegra/gem.c308
1 files changed, 202 insertions, 106 deletions
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 26af8daa9a16..6b14f1e919eb 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -12,94 +12,110 @@
#include <linux/dma-buf.h>
#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_prime.h>
-#include <drm/tegra_drm.h>
#include "drm.h"
#include "gem.h"
-static void tegra_bo_put(struct host1x_bo *bo)
+MODULE_IMPORT_NS("DMA_BUF");
+
+static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
{
- struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+ dma_addr_t next = ~(dma_addr_t)0;
+ unsigned int count = 0, i;
+ struct scatterlist *s;
+
+ for_each_sg(sgl, s, nents, i) {
+ /* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
+ if (!sg_dma_len(s))
+ continue;
+
+ if (sg_dma_address(s) != next) {
+ next = sg_dma_address(s) + sg_dma_len(s);
+ count++;
+ }
+ }
- drm_gem_object_put(&obj->gem);
+ return count;
}
-/* XXX move this into lib/scatterlist.c? */
-static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
- unsigned int nents, gfp_t gfp_mask)
+static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
{
- struct scatterlist *dst;
- unsigned int i;
- int err;
-
- err = sg_alloc_table(sgt, nents, gfp_mask);
- if (err < 0)
- return err;
-
- dst = sgt->sgl;
+ return sg_dma_count_chunks(sgt->sgl, sgt->nents);
+}
- for (i = 0; i < nents; i++) {
- sg_set_page(dst, sg_page(sg), sg->length, 0);
- dst = sg_next(dst);
- sg = sg_next(sg);
- }
+static void tegra_bo_put(struct host1x_bo *bo)
+{
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- return 0;
+ drm_gem_object_put(&obj->gem);
}
-static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
- dma_addr_t *phys)
+static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
+ enum dma_data_direction direction)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- struct sg_table *sgt;
+ struct drm_gem_object *gem = &obj->gem;
+ struct host1x_bo_mapping *map;
int err;
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&map->ref);
+ map->bo = host1x_bo_get(bo);
+ map->direction = direction;
+ map->dev = dev;
+
/*
- * If we've manually mapped the buffer object through the IOMMU, make
- * sure to return the IOVA address of our mapping.
- *
- * Similarly, for buffers that have been allocated by the DMA API the
- * physical address can be used for devices that are not attached to
- * an IOMMU. For these devices, callers must pass a valid pointer via
- * the @phys argument.
- *
- * Imported buffers were also already mapped at import time, so the
- * existing mapping can be reused.
+ * Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
*/
- if (phys) {
- *phys = obj->iova;
- return NULL;
+ if (obj->dma_buf) {
+ struct dma_buf *buf = obj->dma_buf;
+
+ map->attach = dma_buf_attach(buf, dev);
+ if (IS_ERR(map->attach)) {
+ err = PTR_ERR(map->attach);
+ goto free;
+ }
+
+ map->sgt = dma_buf_map_attachment_unlocked(map->attach, direction);
+ if (IS_ERR(map->sgt)) {
+ dma_buf_detach(buf, map->attach);
+ err = PTR_ERR(map->sgt);
+ map->sgt = NULL;
+ goto free;
+ }
+
+ err = sgt_dma_count_chunks(map->sgt);
+ map->size = gem->size;
+
+ goto out;
}
/*
* If we don't have a mapping for this buffer yet, return an SG table
* so that host1x can do the mapping for us via the DMA API.
*/
- sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt)
- return ERR_PTR(-ENOMEM);
+ map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
+ if (!map->sgt) {
+ err = -ENOMEM;
+ goto free;
+ }
if (obj->pages) {
/*
* If the buffer object was allocated from the explicit IOMMU
* API code paths, construct an SG table from the pages.
*/
- err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
- 0, obj->gem.size, GFP_KERNEL);
- if (err < 0)
- goto free;
- } else if (obj->sgt) {
- /*
- * If the buffer object already has an SG table but no pages
- * were allocated for it, it means the buffer was imported and
- * the SG table needs to be copied to avoid overwriting any
- * other potential users of the original SG table.
- */
- err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl,
- obj->sgt->orig_nents, GFP_KERNEL);
+ err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
+ GFP_KERNEL);
if (err < 0)
goto free;
} else {
@@ -108,55 +124,94 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
* not imported, it had to be allocated with the DMA API, so
* the DMA API helper can be used.
*/
- err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
- obj->gem.size);
+ err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
if (err < 0)
goto free;
}
- return sgt;
+ err = dma_map_sgtable(dev, map->sgt, direction, 0);
+ if (err)
+ goto free_sgt;
+
+out:
+ /*
+ * If we've manually mapped the buffer object through the IOMMU, make sure to return the
+ * existing IOVA address of our mapping.
+ */
+ if (!obj->mm) {
+ map->phys = sg_dma_address(map->sgt->sgl);
+ map->chunks = err;
+ } else {
+ map->phys = obj->iova;
+ map->chunks = 1;
+ }
+
+ map->size = gem->size;
+
+ return map;
+free_sgt:
+ sg_free_table(map->sgt);
free:
- kfree(sgt);
+ kfree(map->sgt);
+ kfree(map);
return ERR_PTR(err);
}
-static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
+static void tegra_bo_unpin(struct host1x_bo_mapping *map)
{
- if (sgt) {
- sg_free_table(sgt);
- kfree(sgt);
+ if (map->attach) {
+ dma_buf_unmap_attachment_unlocked(map->attach, map->sgt,
+ map->direction);
+ dma_buf_detach(map->attach->dmabuf, map->attach);
+ } else {
+ dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
+ sg_free_table(map->sgt);
+ kfree(map->sgt);
}
+
+ host1x_bo_put(map->bo);
+ kfree(map);
}
static void *tegra_bo_mmap(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- struct dma_buf_map map;
+ struct iosys_map map = { 0 };
+ void *vaddr;
int ret;
- if (obj->vaddr) {
+ if (obj->vaddr)
return obj->vaddr;
- } else if (obj->gem.import_attach) {
- ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
- return ret ? NULL : map.vaddr;
- } else {
- return vmap(obj->pages, obj->num_pages, VM_MAP,
- pgprot_writecombine(PAGE_KERNEL));
+
+ if (obj->dma_buf) {
+ ret = dma_buf_vmap_unlocked(obj->dma_buf, &map);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return map.vaddr;
}
+
+ vaddr = vmap(obj->pages, obj->num_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ if (!vaddr)
+ return ERR_PTR(-ENOMEM);
+
+ return vaddr;
}
static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(addr);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr);
if (obj->vaddr)
return;
- else if (obj->gem.import_attach)
- dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map);
- else
- vunmap(addr);
+
+ if (obj->dma_buf)
+ return dma_buf_vunmap_unlocked(obj->dma_buf, &map);
+
+ vunmap(addr);
}
static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
@@ -410,33 +465,38 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
if (IS_ERR(bo))
return bo;
- attach = dma_buf_attach(buf, drm->dev);
- if (IS_ERR(attach)) {
- err = PTR_ERR(attach);
- goto free;
- }
-
- get_dma_buf(buf);
+ /*
+ * If we need to use IOMMU API to map the dma-buf into the internally managed
+ * domain, map it first to the DRM device to get an sgt.
+ */
+ if (tegra->domain) {
+ attach = dma_buf_attach(buf, drm->dev);
+ if (IS_ERR(attach)) {
+ err = PTR_ERR(attach);
+ goto free;
+ }
- bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
- if (IS_ERR(bo->sgt)) {
- err = PTR_ERR(bo->sgt);
- goto detach;
- }
+ bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
+ if (IS_ERR(bo->sgt)) {
+ err = PTR_ERR(bo->sgt);
+ goto detach;
+ }
- if (tegra->domain) {
err = tegra_bo_iommu_map(tegra, bo);
if (err < 0)
goto detach;
+
+ bo->gem.import_attach = attach;
}
- bo->gem.import_attach = attach;
+ get_dma_buf(buf);
+ bo->dma_buf = buf;
return bo;
detach:
if (!IS_ERR_OR_NULL(bo->sgt))
- dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
+ dma_buf_unmap_attachment_unlocked(attach, bo->sgt, DMA_TO_DEVICE);
dma_buf_detach(buf, attach);
dma_buf_put(buf);
@@ -449,19 +509,33 @@ free:
void tegra_bo_free_object(struct drm_gem_object *gem)
{
struct tegra_drm *tegra = gem->dev->dev_private;
+ struct host1x_bo_mapping *mapping, *tmp;
struct tegra_bo *bo = to_tegra_bo(gem);
- if (tegra->domain)
+ /* remove all mappings of this buffer object from any caches */
+ list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
+ if (mapping->cache)
+ host1x_bo_unpin(mapping);
+ else
+ dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
+ dev_name(mapping->dev));
+ }
+
+ if (tegra->domain) {
tegra_bo_iommu_unmap(tegra, bo);
- if (gem->import_attach) {
- dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
- DMA_TO_DEVICE);
- drm_prime_gem_destroy(gem, NULL);
- } else {
- tegra_bo_free(gem->dev, bo);
+ if (drm_gem_is_imported(gem)) {
+ dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
+ DMA_TO_DEVICE);
+ dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach);
+ }
}
+ tegra_bo_free(gem->dev, bo);
+
+ if (bo->dma_buf)
+ dma_buf_put(bo->dma_buf);
+
drm_gem_object_release(gem);
kfree(bo);
}
@@ -469,12 +543,13 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
- unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
struct tegra_drm *tegra = drm->dev_private;
struct tegra_bo *bo;
+ int ret;
- args->pitch = round_up(min_pitch, tegra->pitch_align);
- args->size = args->pitch * args->height;
+ ret = drm_mode_size_dumb(drm, args, tegra->pitch_align, 0);
+ if (ret)
+ return ret;
bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
&args->handle);
@@ -520,7 +595,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
* and set the vm_pgoff (used as a fake buffer offset by DRM)
* to 0 as we want to map the whole buffer.
*/
- vma->vm_flags &= ~VM_PFNMAP;
+ vm_flags_clear(vma, VM_PFNMAP);
vma->vm_pgoff = 0;
err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
@@ -534,8 +609,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
} else {
pgprot_t prot = vm_get_page_prot(vma->vm_flags);
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags &= ~VM_PFNMAP;
+ vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
vma->vm_page_prot = pgprot_writecombine(prot);
}
@@ -647,18 +721,27 @@ static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
return __tegra_gem_mmap(gem, vma);
}
-static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map)
+static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map)
{
struct drm_gem_object *gem = buf->priv;
struct tegra_bo *bo = to_tegra_bo(gem);
+ void *vaddr;
- dma_buf_map_set_vaddr(map, bo->vaddr);
+ vaddr = tegra_bo_mmap(&bo->base);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ iosys_map_set_vaddr(map, vaddr);
return 0;
}
-static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct dma_buf_map *map)
+static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map)
{
+ struct drm_gem_object *gem = buf->priv;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+
+ tegra_bo_munmap(&bo->base, map->vaddr);
}
static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
@@ -707,3 +790,16 @@ struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
return &bo->gem;
}
+
+struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
+{
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ gem = drm_gem_object_lookup(file, handle);
+ if (!gem)
+ return NULL;
+
+ bo = to_tegra_bo(gem);
+ return &bo->base;
+}