summaryrefslogtreecommitdiff
path: root/include/drm/drm_gem_shmem_helper.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/drm/drm_gem_shmem_helper.h')
-rw-r--r--include/drm/drm_gem_shmem_helper.h57
1 files changed, 37 insertions, 20 deletions
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index bf0c31aa8fbe..b4f993da3cae 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -37,7 +37,18 @@ struct drm_gem_shmem_object {
* Reference count on the pages table.
* The pages are put when the count reaches zero.
*/
- unsigned int pages_use_count;
+ refcount_t pages_use_count;
+
+ /**
+ * @pages_pin_count:
+ *
+ * Reference count on the pinned pages table.
+ *
+ * Pages are hard-pinned and reside in memory if count
+ * greater than zero. Otherwise, when count is zero, the pages are
+ * allowed to be evicted and purged by memory shrinker.
+ */
+ refcount_t pages_pin_count;
/**
* @madv: State for madvise
@@ -71,7 +82,7 @@ struct drm_gem_shmem_object {
* Reference count on the virtual address.
* The address are un-mapped when the count reaches zero.
*/
- unsigned int vmap_use_count;
+ refcount_t vmap_use_count;
/**
* @pages_mark_dirty_on_put:
@@ -97,27 +108,33 @@ struct drm_gem_shmem_object {
container_of(obj, struct drm_gem_shmem_object, base)
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
+struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
+ size_t size,
+ struct vfsmount *gemfs);
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
-void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem);
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
-int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map);
-void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map);
+int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map);
+void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map);
int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma);
-int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv);
+int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem);
+
+int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv);
static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
{
return (shmem->madv > 0) &&
- !shmem->vmap_use_count && shmem->sgt &&
- !shmem->base.dma_buf && !shmem->base.import_attach;
+ !refcount_read(&shmem->pages_pin_count) && shmem->sgt &&
+ !shmem->base.dma_buf && !drm_gem_is_imported(&shmem->base);
}
-void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
@@ -173,7 +190,7 @@ static inline int drm_gem_shmem_object_pin(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- return drm_gem_shmem_pin(shmem);
+ return drm_gem_shmem_pin_locked(shmem);
}
/**
@@ -187,7 +204,7 @@ static inline void drm_gem_shmem_object_unpin(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- drm_gem_shmem_unpin(shmem);
+ drm_gem_shmem_unpin_locked(shmem);
}
/**
@@ -208,12 +225,12 @@ static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_
}
/*
- * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap()
+ * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap_locked()
* @obj: GEM object
* @map: Returns the kernel virtual address of the SHMEM GEM object's backing store.
*
- * This function wraps drm_gem_shmem_vmap(). Drivers that employ the shmem helpers should
- * use it as their &drm_gem_object_funcs.vmap handler.
+ * This function wraps drm_gem_shmem_vmap_locked(). Drivers that employ the shmem
+ * helpers should use it as their &drm_gem_object_funcs.vmap handler.
*
* Returns:
* 0 on success or a negative error code on failure.
@@ -223,7 +240,7 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- return drm_gem_shmem_vmap(shmem, map);
+ return drm_gem_shmem_vmap_locked(shmem, map);
}
/*
@@ -231,15 +248,15 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
* @obj: GEM object
* @map: Kernel virtual address where the SHMEM GEM object was mapped
*
- * This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should
- * use it as their &drm_gem_object_funcs.vunmap handler.
+ * This function wraps drm_gem_shmem_vunmap_locked(). Drivers that employ the shmem
+ * helpers should use it as their &drm_gem_object_funcs.vunmap handler.
*/
static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj,
struct iosys_map *map)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- drm_gem_shmem_vunmap(shmem, map);
+ drm_gem_shmem_vunmap_locked(shmem, map);
}
/**