diff options
Diffstat (limited to 'include/drm/drm_gem_shmem_helper.h')
| -rw-r--r-- | include/drm/drm_gem_shmem_helper.h | 113 |
1 files changed, 60 insertions, 53 deletions
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index a2201b2488c5..589f7bfe7506 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -27,11 +27,6 @@ struct drm_gem_shmem_object { struct drm_gem_object base; /** - * @pages_lock: Protects the page table and use count - */ - struct mutex pages_lock; - - /** * @pages: Page table */ struct page **pages; @@ -42,7 +37,18 @@ struct drm_gem_shmem_object { * Reference count on the pages table. * The pages are put when the count reaches zero. */ - unsigned int pages_use_count; + refcount_t pages_use_count; + + /** + * @pages_pin_count: + * + * Reference count on the pinned pages table. + * + * Pages are hard-pinned and reside in memory if count + * greater than zero. Otherwise, when count is zero, the pages are + * allowed to be evicted and purged by memory shrinker. + */ + refcount_t pages_pin_count; /** * @madv: State for madvise @@ -61,30 +67,11 @@ struct drm_gem_shmem_object { struct list_head madv_list; /** - * @pages_mark_dirty_on_put: - * - * Mark pages as dirty when they are put. - */ - unsigned int pages_mark_dirty_on_put : 1; - - /** - * @pages_mark_accessed_on_put: - * - * Mark pages as accessed when they are put. - */ - unsigned int pages_mark_accessed_on_put : 1; - - /** * @sgt: Scatter/gather table for imported PRIME buffers */ struct sg_table *sgt; /** - * @vmap_lock: Protects the vmap address and use count - */ - struct mutex vmap_lock; - - /** * @vaddr: Kernel virtual address of the backing memory */ void *vaddr; @@ -95,41 +82,61 @@ struct drm_gem_shmem_object { * Reference count on the virtual address. * The address are un-mapped when the count reaches zero. */ - unsigned int vmap_use_count; + refcount_t vmap_use_count; + + /** + * @pages_mark_dirty_on_put: + * + * Mark pages as dirty when they are put. + */ + bool pages_mark_dirty_on_put : 1; + + /** + * @pages_mark_accessed_on_put: + * + * Mark pages as accessed when they are put. + */ + bool pages_mark_accessed_on_put : 1; /** * @map_wc: map object write-combined (instead of using shmem defaults). */ - bool map_wc; + bool map_wc : 1; }; #define to_drm_gem_shmem_obj(obj) \ container_of(obj, struct drm_gem_shmem_object, base) +int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size); struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size); +struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev, + size_t size, + struct vfsmount *gemfs); +void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); -int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem); -void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); +void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem); int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem); -int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map); -void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map); +int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map); +void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map); int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma); -int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv); +int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem); +void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem); + +int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv); static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem) { return (shmem->madv > 0) && - !shmem->vmap_use_count && shmem->sgt && - !shmem->base.dma_buf && !shmem->base.import_attach; + !refcount_read(&shmem->pages_pin_count) && shmem->sgt && + !shmem->base.dma_buf && !drm_gem_is_imported(&shmem->base); } void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem); -bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem); @@ -185,7 +192,7 @@ static inline int drm_gem_shmem_object_pin(struct drm_gem_object *obj) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - return drm_gem_shmem_pin(shmem); + return drm_gem_shmem_pin_locked(shmem); } /** @@ -199,7 +206,7 @@ static inline void drm_gem_shmem_object_unpin(struct drm_gem_object *obj) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - drm_gem_shmem_unpin(shmem); + drm_gem_shmem_unpin_locked(shmem); } /** @@ -220,12 +227,12 @@ static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_ } /* - * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap() + * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap_locked() * @obj: GEM object * @map: Returns the kernel virtual address of the SHMEM GEM object's backing store. * - * This function wraps drm_gem_shmem_vmap(). Drivers that employ the shmem helpers should - * use it as their &drm_gem_object_funcs.vmap handler. + * This function wraps drm_gem_shmem_vmap_locked(). Drivers that employ the shmem + * helpers should use it as their &drm_gem_object_funcs.vmap handler. * * Returns: * 0 on success or a negative error code on failure. @@ -235,7 +242,7 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - return drm_gem_shmem_vmap(shmem, map); + return drm_gem_shmem_vmap_locked(shmem, map); } /* @@ -243,15 +250,15 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, * @obj: GEM object * @map: Kernel virtual address where the SHMEM GEM object was mapped * - * This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should - * use it as their &drm_gem_object_funcs.vunmap handler. + * This function wraps drm_gem_shmem_vunmap_locked(). Drivers that employ the shmem + * helpers should use it as their &drm_gem_object_funcs.vunmap handler. */ static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - drm_gem_shmem_vunmap(shmem, map); + drm_gem_shmem_vunmap_locked(shmem, map); } /** @@ -282,18 +289,18 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, struct sg_table *sgt); int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); +struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev, + struct dma_buf *buf); /** * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations * - * This macro provides a shortcut for setting the shmem GEM operations in - * the &drm_driver structure. + * This macro provides a shortcut for setting the shmem GEM operations + * in the &drm_driver structure. Drivers that do not require an s/g table + * for imported buffers should use this. */ #define DRM_GEM_SHMEM_DRIVER_OPS \ - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ - .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \ - .gem_prime_mmap = drm_gem_prime_mmap, \ - .dumb_create = drm_gem_shmem_dumb_create + .gem_prime_import = drm_gem_shmem_prime_import_no_map, \ + .dumb_create = drm_gem_shmem_dumb_create #endif /* __DRM_GEM_SHMEM_HELPER_H__ */ |
