diff options
Diffstat (limited to 'drivers/gpu/drm/v3d/v3d_bo.c')
| -rw-r--r-- | drivers/gpu/drm/v3d/v3d_bo.c | 97 |
1 files changed, 85 insertions, 12 deletions
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index 8b3229a37c6d..d9547f5117b9 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -13,18 +13,27 @@ * Display engines requiring physically contiguous allocations should * look into Mesa's "renderonly" support (as used by the Mesa pl111 * driver) for an example of how to integrate with V3D. - * - * Long term, we should support evicting pages from the MMU when under - * memory pressure (thus the v3d_bo_get_pages() refcounting), but - * that's not a high priority since our systems tend to not have swap. */ #include <linux/dma-buf.h> -#include <linux/pfn_t.h> +#include <linux/vmalloc.h> + +#include <drm/drm_print.h> #include "v3d_drv.h" #include "uapi/drm/v3d_drm.h" +static enum drm_gem_object_status v3d_gem_status(struct drm_gem_object *obj) +{ + struct v3d_bo *bo = to_v3d_bo(obj); + enum drm_gem_object_status res = 0; + + if (bo->base.pages) + res |= DRM_GEM_OBJECT_RESIDENT; + + return res; +} + /* Called DRM core on the last userspace/kernel unreference of the * BO. */ @@ -33,11 +42,14 @@ void v3d_free_object(struct drm_gem_object *obj) struct v3d_dev *v3d = to_v3d_dev(obj->dev); struct v3d_bo *bo = to_v3d_bo(obj); + if (bo->vaddr) + v3d_put_bo_vaddr(bo); + v3d_mmu_remove_ptes(bo); mutex_lock(&v3d->bo_lock); v3d->bo_stats.num_allocated--; - v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT; + v3d->bo_stats.pages_allocated -= obj->size >> V3D_MMU_PAGE_SHIFT; mutex_unlock(&v3d->bo_lock); spin_lock(&v3d->mm_lock); @@ -59,6 +71,7 @@ static const struct drm_gem_object_funcs v3d_gem_funcs = { .vmap = drm_gem_shmem_object_vmap, .vunmap = drm_gem_shmem_object_vunmap, .mmap = drm_gem_shmem_object_mmap, + .status = v3d_gem_status, .vm_ops = &drm_gem_shmem_vm_ops, }; @@ -91,6 +104,7 @@ v3d_bo_create_finish(struct drm_gem_object *obj) struct v3d_dev *v3d = to_v3d_dev(obj->dev); struct v3d_bo *bo = to_v3d_bo(obj); struct sg_table *sgt; + u64 align; int ret; /* So far we pin the BO in the MMU for its lifetime, so use @@ -100,14 +114,23 @@ v3d_bo_create_finish(struct drm_gem_object *obj) if (IS_ERR(sgt)) return PTR_ERR(sgt); + if (!v3d->gemfs) + align = SZ_4K; + else if (obj->size >= SZ_1M) + align = SZ_1M; + else if (obj->size >= SZ_64K) + align = SZ_64K; + else + align = SZ_4K; + spin_lock(&v3d->mm_lock); /* Allocate the object's space in the GPU's page tables. * Inserting PTEs will happen later, but the offset is for the * lifetime of the BO. */ ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node, - obj->size >> PAGE_SHIFT, - GMP_GRANULARITY >> PAGE_SHIFT, 0, 0); + obj->size >> V3D_MMU_PAGE_SHIFT, + align >> V3D_MMU_PAGE_SHIFT, 0, 0); spin_unlock(&v3d->mm_lock); if (ret) return ret; @@ -115,7 +138,7 @@ v3d_bo_create_finish(struct drm_gem_object *obj) /* Track stats for /debug/dri/n/bo_stats. */ mutex_lock(&v3d->bo_lock); v3d->bo_stats.num_allocated++; - v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT; + v3d->bo_stats.pages_allocated += obj->size >> V3D_MMU_PAGE_SHIFT; mutex_unlock(&v3d->bo_lock); v3d_mmu_insert_ptes(bo); @@ -127,13 +150,16 @@ struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, size_t unaligned_size) { struct drm_gem_shmem_object *shmem_obj; + struct v3d_dev *v3d = to_v3d_dev(dev); struct v3d_bo *bo; int ret; - shmem_obj = drm_gem_shmem_create(dev, unaligned_size); + shmem_obj = drm_gem_shmem_create_with_mnt(dev, unaligned_size, + v3d->gemfs); if (IS_ERR(shmem_obj)) return ERR_CAST(shmem_obj); bo = to_v3d_bo(&shmem_obj->base); + bo->vaddr = NULL; ret = v3d_bo_create_finish(&shmem_obj->base); if (ret) @@ -167,6 +193,20 @@ v3d_prime_import_sg_table(struct drm_device *dev, return obj; } +void v3d_get_bo_vaddr(struct v3d_bo *bo) +{ + struct drm_gem_shmem_object *obj = &bo->base; + + bo->vaddr = vmap(obj->pages, obj->base.size >> PAGE_SHIFT, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); +} + +void v3d_put_bo_vaddr(struct v3d_bo *bo) +{ + vunmap(bo->vaddr); + bo->vaddr = NULL; +} + int v3d_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -183,7 +223,7 @@ int v3d_create_bo_ioctl(struct drm_device *dev, void *data, if (IS_ERR(bo)) return PTR_ERR(bo); - args->offset = bo->node.start << PAGE_SHIFT; + args->offset = bo->node.start << V3D_MMU_PAGE_SHIFT; ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); drm_gem_object_put(&bo->base.base); @@ -228,8 +268,41 @@ int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, } bo = to_v3d_bo(gem_obj); - args->offset = bo->node.start << PAGE_SHIFT; + args->offset = bo->node.start << V3D_MMU_PAGE_SHIFT; drm_gem_object_put(gem_obj); return 0; } + +int +v3d_wait_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret; + struct drm_v3d_wait_bo *args = data; + ktime_t start = ktime_get(); + u64 delta_ns; + unsigned long timeout_jiffies = + nsecs_to_jiffies_timeout(args->timeout_ns); + + if (args->pad != 0) + return -EINVAL; + + ret = drm_gem_dma_resv_wait(file_priv, args->handle, + true, timeout_jiffies); + + /* Decrement the user's timeout, in case we got interrupted + * such that the ioctl will be restarted. + */ + delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start)); + if (delta_ns < args->timeout_ns) + args->timeout_ns -= delta_ns; + else + args->timeout_ns = 0; + + /* Asked to wait beyond the jiffy/scheduler precision? */ + if (ret == -ETIME && args->timeout_ns) + ret = -EAGAIN; + + return ret; +} |
