summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/panfrost/panfrost_gem.c
diff options
context:
space:
mode:
authorRob Herring <robh@kernel.org>2019-08-13 09:01:15 -0600
committerRob Herring <robh@kernel.org>2019-08-19 11:34:57 -0500
commit7282f7645d06bf0afe0a3c11ab92d9392528b819 (patch)
tree9b4214f8f244950fb1d32b8d58b05d446ef5373b /drivers/gpu/drm/panfrost/panfrost_gem.c
parent3efdf83ca0f9d3149f8c2201dad86a74fd952f91 (diff)
drm/panfrost: Implement per FD address spaces
Up until now, a single shared GPU address space was used. This is not ideal as there's no protection between processes and doesn't work for supporting the same GPU/CPU VA feature. Most importantly, this will hopefully mitigate Alyssa's fear of WebGL, whatever that is. Most of the changes here are moving struct drm_mm and struct panfrost_mmu objects from the per device struct to the per FD struct. The critical function is panfrost_mmu_as_get() which handles allocating and switching the h/w address spaces. There's 3 states an AS can be in: free, allocated, and in use. When a job runs, it requests an address space and then marks it not in use when job is complete(but stays assigned). The first time thru, we find a free AS in the alloc_mask and assign the AS to the FD. Then the next time thru, we most likely already have our AS and we just mark it in use with a ref count. We need a ref count because we have multiple job slots. If the job/FD doesn't have an AS assigned and there are no free ones, then we pick an allocated one not in use from our LRU list and switch the AS from the old FD to the new one. Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com> Cc: David Airlie <airlied@linux.ie> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Steven Price <steven.price@arm.com> Signed-off-by: Rob Herring <robh@kernel.org> Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com> Reviewed-by: Steven Price <steven.price@arm.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190813150115.30338-1-robh@kernel.org
Diffstat (limited to 'drivers/gpu/drm/panfrost/panfrost_gem.c')
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index e71f27c4041e..e084bc4e9083 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -47,8 +47,8 @@ static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_p
size_t size = obj->size;
u64 align;
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
- struct panfrost_device *pfdev = obj->dev->dev_private;
unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
/*
* Executable buffers cannot cross a 16MB boundary as the program
@@ -61,8 +61,9 @@ static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_p
else
align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
- spin_lock(&pfdev->mm_lock);
- ret = drm_mm_insert_node_generic(&pfdev->mm, &bo->node,
+ bo->mmu = &priv->mmu;
+ spin_lock(&priv->mm_lock);
+ ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
size >> PAGE_SHIFT, align, color, 0);
if (ret)
goto out;
@@ -73,22 +74,22 @@ static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_p
drm_mm_remove_node(&bo->node);
}
out:
- spin_unlock(&pfdev->mm_lock);
+ spin_unlock(&priv->mm_lock);
return ret;
}
static void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
{
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
- struct panfrost_device *pfdev = obj->dev->dev_private;
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
if (bo->is_mapped)
panfrost_mmu_unmap(bo);
- spin_lock(&pfdev->mm_lock);
+ spin_lock(&priv->mm_lock);
if (drm_mm_node_allocated(&bo->node))
drm_mm_remove_node(&bo->node);
- spin_unlock(&pfdev->mm_lock);
+ spin_unlock(&priv->mm_lock);
}
static int panfrost_gem_pin(struct drm_gem_object *obj)