summaryrefslogtreecommitdiff
path: root/include/drm/drm_gem.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/drm/drm_gem.h')
-rw-r--r--include/drm/drm_gem.h165
1 files changed, 108 insertions, 57 deletions
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index bc9f6aa2f3fe..8d48d2af2649 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -35,6 +35,7 @@
*/
#include <linux/kref.h>
+#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -48,19 +49,21 @@ struct drm_gem_object;
* enum drm_gem_object_status - bitmask of object state for fdinfo reporting
* @DRM_GEM_OBJECT_RESIDENT: object is resident in memory (ie. not unpinned)
* @DRM_GEM_OBJECT_PURGEABLE: object marked as purgeable by userspace
+ * @DRM_GEM_OBJECT_ACTIVE: object is currently used by an active submission
*
* Bitmask of status used for fdinfo memory stats, see &drm_gem_object_funcs.status
- * and drm_show_fdinfo(). Note that an object can DRM_GEM_OBJECT_PURGEABLE if
- * it still active or not resident, in which case drm_show_fdinfo() will not
+ * and drm_show_fdinfo(). Note that an object can report DRM_GEM_OBJECT_PURGEABLE
+ * and be active or not resident, in which case drm_show_fdinfo() will not
* account for it as purgeable. So drivers do not need to check if the buffer
- * is idle and resident to return this bit. (Ie. userspace can mark a buffer
- * as purgeable even while it is still busy on the GPU.. it does not _actually_
- * become puregeable until it becomes idle. The status gem object func does
- * not need to consider this.)
+ * is idle and resident to return this bit, i.e. userspace can mark a buffer as
+ * purgeable even while it is still busy on the GPU. It will not get reported in
+ * the puregeable stats until it becomes idle. The status gem object func does
+ * not need to consider this.
*/
enum drm_gem_object_status {
DRM_GEM_OBJECT_RESIDENT = BIT(0),
DRM_GEM_OBJECT_PURGEABLE = BIT(1),
+ DRM_GEM_OBJECT_ACTIVE = BIT(2),
};
/**
@@ -123,7 +126,8 @@ struct drm_gem_object_funcs {
/**
* @pin:
*
- * Pin backing buffer in memory. Used by the drm_gem_map_attach() helper.
+ * Pin backing buffer in memory, such that dma-buf importers can
+ * access it. Used by the drm_gem_map_attach() helper.
*
* This callback is optional.
*/
@@ -156,7 +160,8 @@ struct drm_gem_object_funcs {
* @vmap:
*
* Returns a virtual address for the buffer. Used by the
- * drm_gem_dmabuf_vmap() helper.
+ * drm_gem_dmabuf_vmap() helper. Called with a held GEM reservation
+ * lock.
*
* This callback is optional.
*/
@@ -166,7 +171,8 @@ struct drm_gem_object_funcs {
* @vunmap:
*
* Releases the address previously returned by @vmap. Used by the
- * drm_gem_dmabuf_vunmap() helper.
+ * drm_gem_dmabuf_vunmap() helper. Called with a held GEM reservation
+ * lock.
*
* This callback is optional.
*/
@@ -189,7 +195,8 @@ struct drm_gem_object_funcs {
* @evict:
*
* Evicts gem object out from memory. Used by the drm_gem_object_evict()
- * helper. Returns 0 on success, -errno otherwise.
+ * helper. Returns 0 on success, -errno otherwise. Called with a held
+ * GEM reservation lock.
*
* This callback is optional.
*/
@@ -209,6 +216,15 @@ struct drm_gem_object_funcs {
enum drm_gem_object_status (*status)(struct drm_gem_object *obj);
/**
+ * @rss:
+ *
+ * Return resident size of the object in physical memory.
+ *
+ * Called by drm_show_memory_stats().
+ */
+ size_t (*rss)(struct drm_gem_object *obj);
+
+ /**
* @vm_ops:
*
* Virtual memory operations used with mmap.
@@ -382,19 +398,34 @@ struct drm_gem_object {
struct dma_resv _resv;
/**
- * @gpuva:
+ * @gpuva: Fields used by GPUVM to manage mappings pointing to this GEM object.
*
- * Provides the list of GPU VAs attached to this GEM object.
+ * When DRM_GPUVM_IMMEDIATE_MODE is set, this list is protected by the
+ * mutex. Otherwise, the list is protected by the GEMs &dma_resv lock.
*
- * Drivers should lock list accesses with the GEMs &dma_resv lock
- * (&drm_gem_object.resv) or a custom lock if one is provided.
+ * Note that all entries in this list must agree on whether
+ * DRM_GPUVM_IMMEDIATE_MODE is set.
*/
struct {
+ /**
+ * @gpuva.list: list of GPUVM mappings attached to this GEM object.
+ *
+ * Drivers should lock list accesses with either the GEMs
+ * &dma_resv lock (&drm_gem_object.resv) or the
+ * &drm_gem_object.gpuva.lock mutex.
+ */
struct list_head list;
-#ifdef CONFIG_LOCKDEP
- struct lockdep_map *lock_dep_map;
-#endif
+ /**
+ * @gpuva.lock: lock protecting access to &drm_gem_object.gpuva.list
+ * when DRM_GPUVM_IMMEDIATE_MODE is used.
+ *
+ * Only used when DRM_GPUVM_IMMEDIATE_MODE is set. It should be
+ * safe to take this mutex during the fence signalling path, so
+ * do not allocate memory while holding this lock. Otherwise,
+ * the &dma_resv lock should be used.
+ */
+ struct mutex lock;
} gpuva;
/**
@@ -438,7 +469,8 @@ struct drm_gem_object {
.poll = drm_poll,\
.read = drm_read,\
.llseek = noop_llseek,\
- .mmap = drm_gem_mmap
+ .mmap = drm_gem_mmap, \
+ .fop_flags = FOP_UNSIGNED_OFFSET
/**
* DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers
@@ -463,6 +495,9 @@ void drm_gem_object_release(struct drm_gem_object *obj);
void drm_gem_object_free(struct kref *kref);
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
+int drm_gem_object_init_with_mnt(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size,
+ struct vfsmount *gemfs);
void drm_gem_private_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
void drm_gem_private_object_fini(struct drm_gem_object *obj);
@@ -518,8 +553,11 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj);
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed);
-int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
-void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
+void drm_gem_lock(struct drm_gem_object *obj);
+void drm_gem_unlock(struct drm_gem_object *obj);
+
+int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
+void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
int count, struct drm_gem_object ***objs_out);
@@ -537,41 +575,54 @@ void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
void drm_gem_lru_remove(struct drm_gem_object *obj);
void drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj);
void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
-unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
- unsigned int nr_to_scan,
- unsigned long *remaining,
- bool (*shrink)(struct drm_gem_object *obj));
+unsigned long
+drm_gem_lru_scan(struct drm_gem_lru *lru,
+ unsigned int nr_to_scan,
+ unsigned long *remaining,
+ bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket),
+ struct ww_acquire_ctx *ticket);
-int drm_gem_evict(struct drm_gem_object *obj);
+int drm_gem_evict_locked(struct drm_gem_object *obj);
-#ifdef CONFIG_LOCKDEP
/**
- * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list.
- * @obj: the &drm_gem_object
- * @lock: the lock used to protect the gpuva list. The locking primitive
- * must contain a dep_map field.
+ * drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats
*
- * Call this if you're not proctecting access to the gpuva list with the
- * dma-resv lock, but with a custom lock.
+ * This helper should only be used for fdinfo shared memory stats to determine
+ * if a GEM object is shared.
+ *
+ * @obj: obj in question
*/
-#define drm_gem_gpuva_set_lock(obj, lock) \
- if (!WARN((obj)->gpuva.lock_dep_map, \
- "GEM GPUVA lock should be set only once.")) \
- (obj)->gpuva.lock_dep_map = &(lock)->dep_map
-#define drm_gem_gpuva_assert_lock_held(obj) \
- lockdep_assert((obj)->gpuva.lock_dep_map ? \
- lock_is_held((obj)->gpuva.lock_dep_map) : \
+static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_object *obj)
+{
+ return (obj->handle_count > 1) || obj->dma_buf;
+}
+
+/**
+ * drm_gem_is_imported() - Tests if GEM object's buffer has been imported
+ * @obj: the GEM object
+ *
+ * Returns:
+ * True if the GEM object's buffer has been imported, false otherwise
+ */
+static inline bool drm_gem_is_imported(const struct drm_gem_object *obj)
+{
+ return !!obj->import_attach;
+}
+
+#ifdef CONFIG_LOCKDEP
+#define drm_gem_gpuva_assert_lock_held(gpuvm, obj) \
+ lockdep_assert(drm_gpuvm_immediate_mode(gpuvm) ? \
+ lockdep_is_held(&(obj)->gpuva.lock) : \
dma_resv_held((obj)->resv))
#else
-#define drm_gem_gpuva_set_lock(obj, lock) do {} while (0)
-#define drm_gem_gpuva_assert_lock_held(obj) do {} while (0)
+#define drm_gem_gpuva_assert_lock_held(gpuvm, obj) do {} while (0)
#endif
/**
* drm_gem_gpuva_init() - initialize the gpuva list of a GEM object
* @obj: the &drm_gem_object
*
- * This initializes the &drm_gem_object's &drm_gpuva list.
+ * This initializes the &drm_gem_object's &drm_gpuvm_bo list.
*
* Calling this function is only necessary for drivers intending to support the
* &drm_driver_feature DRIVER_GEM_GPUVA.
@@ -584,28 +635,28 @@ static inline void drm_gem_gpuva_init(struct drm_gem_object *obj)
}
/**
- * drm_gem_for_each_gpuva() - iternator to walk over a list of gpuvas
- * @entry__: &drm_gpuva structure to assign to in each iteration step
- * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with
+ * drm_gem_for_each_gpuvm_bo() - iterator to walk over a list of &drm_gpuvm_bo
+ * @entry__: &drm_gpuvm_bo structure to assign to in each iteration step
+ * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with
*
- * This iterator walks over all &drm_gpuva structures associated with the
- * &drm_gpuva_manager.
+ * This iterator walks over all &drm_gpuvm_bo structures associated with the
+ * &drm_gem_object.
*/
-#define drm_gem_for_each_gpuva(entry__, obj__) \
- list_for_each_entry(entry__, &(obj__)->gpuva.list, gem.entry)
+#define drm_gem_for_each_gpuvm_bo(entry__, obj__) \
+ list_for_each_entry(entry__, &(obj__)->gpuva.list, list.entry.gem)
/**
- * drm_gem_for_each_gpuva_safe() - iternator to safely walk over a list of
- * gpuvas
- * @entry__: &drm_gpuva structure to assign to in each iteration step
- * @next__: &next &drm_gpuva to store the next step
- * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with
+ * drm_gem_for_each_gpuvm_bo_safe() - iterator to safely walk over a list of
+ * &drm_gpuvm_bo
+ * @entry__: &drm_gpuvm_bostructure to assign to in each iteration step
+ * @next__: &next &drm_gpuvm_bo to store the next step
+ * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with
*
- * This iterator walks over all &drm_gpuva structures associated with the
+ * This iterator walks over all &drm_gpuvm_bo structures associated with the
* &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence
* it is save against removal of elements.
*/
-#define drm_gem_for_each_gpuva_safe(entry__, next__, obj__) \
- list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, gem.entry)
+#define drm_gem_for_each_gpuvm_bo_safe(entry__, next__, obj__) \
+ list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, list.entry.gem)
#endif /* __DRM_GEM_H__ */