summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_gem_shmem_helper.c
diff options
context:
space:
mode:
authorRob Herring <robh@kernel.org>2019-08-05 08:33:57 -0600
committerRob Herring <robh@kernel.org>2019-08-08 15:54:10 -0600
commit17acb9f35ed736c177f85b8ee711f278e7aff421 (patch)
treed888de0d99ed6638215fec5dcc7073ca25b9ea77 /drivers/gpu/drm/drm_gem_shmem_helper.c
parent3551a9fa2c5d3f3b0a34af67d33a0f2a5fecd94c (diff)
drm/shmem: Add madvise state and purge helpers
Add support to the shmem GEM helpers for tracking madvise state and purging pages. This is based on the msm implementation. The BO provides a list_head, but the list management is handled outside of the shmem helpers as there are different locking requirements. Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Maxime Ripard <maxime.ripard@bootlin.com> Cc: Sean Paul <sean@poorly.run> Cc: David Airlie <airlied@linux.ie> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Eric Anholt <eric@anholt.net> Acked-by: Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com> Signed-off-by: Rob Herring <robh@kernel.org> Link: https://patchwork.freedesktop.org/patch/msgid/20190805143358.21245-1-robh@kernel.org
Diffstat (limited to 'drivers/gpu/drm/drm_gem_shmem_helper.c')
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c57
1 files changed, 57 insertions, 0 deletions
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 2f64667ac805..4b442576de1c 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -75,6 +75,7 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
shmem = to_drm_gem_shmem_obj(obj);
mutex_init(&shmem->pages_lock);
mutex_init(&shmem->vmap_lock);
+ INIT_LIST_HEAD(&shmem->madv_list);
/*
* Our buffers are kept pinned, so allocating them
@@ -362,6 +363,62 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
}
EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
+/* Update madvise status, returns true if not purged, else
+ * false or -errno.
+ */
+int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ mutex_lock(&shmem->pages_lock);
+
+ if (shmem->madv >= 0)
+ shmem->madv = madv;
+
+ madv = shmem->madv;
+
+ mutex_unlock(&shmem->pages_lock);
+
+ return (madv >= 0);
+}
+EXPORT_SYMBOL(drm_gem_shmem_madvise);
+
+void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
+
+ drm_gem_shmem_put_pages_locked(shmem);
+
+ shmem->madv = -1;
+
+ drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
+ drm_gem_free_mmap_offset(obj);
+
+ /* Our goal here is to return as much of the memory as
+ * is possible back to the system as we are called from OOM.
+ * To do this we must instruct the shmfs to drop all of its
+ * backing pages, *now*.
+ */
+ shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
+
+ invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
+ 0, (loff_t)-1);
+}
+EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
+
+void drm_gem_shmem_purge(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ mutex_lock(&shmem->pages_lock);
+ drm_gem_shmem_purge_locked(obj);
+ mutex_unlock(&shmem->pages_lock);
+}
+EXPORT_SYMBOL(drm_gem_shmem_purge);
+
/**
* drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
* @file: DRM file structure to create the dumb buffer for