diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_prime.c')
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_prime.c | 110 |
1 files changed, 68 insertions, 42 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index e90468d5e5c0..caab60fc62f6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -22,63 +22,66 @@ * Authors: Dave Airlie */ -#include <drm/drmP.h> +#include <linux/dma-buf.h> +#include <drm/ttm/ttm_tt.h> -#include "nouveau_drm.h" +#include "nouveau_drv.h" #include "nouveau_gem.h" struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj) { struct nouveau_bo *nvbo = nouveau_gem_object(obj); - int npages = nvbo->bo.num_pages; - return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages); -} - -void *nouveau_gem_prime_vmap(struct drm_gem_object *obj) -{ - struct nouveau_bo *nvbo = nouveau_gem_object(obj); - int ret; - - ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages, - &nvbo->dma_buf_vmap); - if (ret) - return ERR_PTR(ret); - - return nvbo->dma_buf_vmap.virtual; -} - -void nouveau_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - struct nouveau_bo *nvbo = nouveau_gem_object(obj); - - ttm_bo_kunmap(&nvbo->dma_buf_vmap); + return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, + nvbo->bo.ttm->num_pages); } struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, - size_t size, + struct dma_buf_attachment *attach, struct sg_table *sg) { + struct nouveau_drm *drm = nouveau_drm(dev); + struct drm_gem_object *obj; struct nouveau_bo *nvbo; - u32 flags = 0; + struct dma_resv *robj = attach->dmabuf->resv; + u64 size = attach->dmabuf->size; + int align = 0; int ret; - flags = TTM_PL_FLAG_TT; - - ret = nouveau_bo_new(dev, size, 0, flags, 0, 0, - sg, &nvbo); - if (ret) - return ERR_PTR(ret); + dma_resv_lock(robj, NULL); + nvbo = nouveau_bo_alloc(&drm->client, &size, &align, + NOUVEAU_GEM_DOMAIN_GART, 0, 0, true); + if (IS_ERR(nvbo)) { + obj = ERR_CAST(nvbo); + goto unlock; + } nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; - nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); - if (!nvbo->gem) { - nouveau_bo_ref(NULL, &nvbo); - return ERR_PTR(-ENOMEM); + + nvbo->bo.base.funcs = &nouveau_gem_object_funcs; + + /* Initialize the embedded gem-object. We return a single gem-reference + * to the caller, instead of a normal nouveau_bo ttm reference. */ + ret = drm_gem_object_init(dev, &nvbo->bo.base, size); + if (ret) { + drm_gem_object_release(&nvbo->bo.base); + kfree(nvbo); + obj = ERR_PTR(-ENOMEM); + goto unlock; } - nvbo->gem->driver_private = nvbo; - return nvbo->gem; + ret = nouveau_bo_init(nvbo, size, align, NOUVEAU_GEM_DOMAIN_GART, + sg, robj); + if (ret) { + obj = ERR_PTR(ret); + goto unlock; + } + + obj = &nvbo->bo.base; + +unlock: + dma_resv_unlock(robj); + return obj; } int nouveau_gem_prime_pin(struct drm_gem_object *obj) @@ -87,16 +90,39 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj) int ret; /* pin buffer into GTT */ - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT); + ret = nouveau_bo_pin_locked(nvbo, NOUVEAU_GEM_DOMAIN_GART, false); if (ret) - return -EINVAL; + ret = -EINVAL; - return 0; + return ret; } void nouveau_gem_prime_unpin(struct drm_gem_object *obj) { struct nouveau_bo *nvbo = nouveau_gem_object(obj); - nouveau_bo_unpin(nvbo); + nouveau_bo_unpin_locked(nvbo); +} + +struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj, + int flags) +{ + struct nouveau_bo *nvbo = nouveau_gem_object(gobj); + struct ttm_operation_ctx ctx = { + .interruptible = true, + .no_wait_gpu = true, + /* We opt to avoid OOM on system pages allocations */ + .gfp_retry_mayfail = true, + .allow_res_evict = false, + }; + int ret; + + if (nvbo->no_share) + return ERR_PTR(-EPERM); + + ret = ttm_bo_setup_export(&nvbo->bo, &ctx); + if (ret) + return ERR_PTR(ret); + + return drm_gem_prime_export(gobj, flags); } |
