diff options
Diffstat (limited to 'include/drm/drm_gpuvm.h')
| -rw-r--r-- | include/drm/drm_gpuvm.h | 103 |
1 files changed, 80 insertions, 23 deletions
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h index 00d4e43b76b6..fdfc575b2603 100644 --- a/include/drm/drm_gpuvm.h +++ b/include/drm/drm_gpuvm.h @@ -27,6 +27,7 @@ #include <linux/dma-resv.h> #include <linux/list.h> +#include <linux/llist.h> #include <linux/rbtree.h> #include <linux/types.h> @@ -103,7 +104,7 @@ struct drm_gpuva { } va; /** - * @gem: structure containing the &drm_gem_object and it's offset + * @gem: structure containing the &drm_gem_object and its offset */ struct { /** @@ -152,6 +153,7 @@ void drm_gpuva_remove(struct drm_gpuva *va); void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo); void drm_gpuva_unlink(struct drm_gpuva *va); +void drm_gpuva_unlink_defer(struct drm_gpuva *va); struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm, u64 addr, u64 range); @@ -160,15 +162,6 @@ struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm, struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start); struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end); -static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range, - struct drm_gem_object *obj, u64 offset) -{ - va->va.addr = addr; - va->va.range = range; - va->gem.obj = obj; - va->gem.offset = offset; -} - /** * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is * invalidated @@ -206,9 +199,19 @@ enum drm_gpuvm_flags { DRM_GPUVM_RESV_PROTECTED = BIT(0), /** + * @DRM_GPUVM_IMMEDIATE_MODE: use the locking scheme for GEMs designed + * for modifying the GPUVM during the fence signalling path + * + * When set, gpuva.lock is used to protect gpuva.list in all GEM + * objects associated with this GPUVM. Otherwise, the GEMs dma-resv is + * used. + */ + DRM_GPUVM_IMMEDIATE_MODE = BIT(1), + + /** * @DRM_GPUVM_USERBITS: user defined bits */ - DRM_GPUVM_USERBITS = BIT(1), + DRM_GPUVM_USERBITS = BIT(2), }; /** @@ -330,6 +333,11 @@ struct drm_gpuvm { */ spinlock_t lock; } evict; + + /** + * @bo_defer: structure holding vm_bos that need to be destroyed + */ + struct llist_head bo_defer; }; void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, @@ -379,6 +387,19 @@ drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm) } /** + * drm_gpuvm_immediate_mode() - indicates whether &DRM_GPUVM_IMMEDIATE_MODE is + * set + * @gpuvm: the &drm_gpuvm + * + * Returns: true if &DRM_GPUVM_IMMEDIATE_MODE is set, false otherwise. + */ +static inline bool +drm_gpuvm_immediate_mode(struct drm_gpuvm *gpuvm) +{ + return gpuvm->flags & DRM_GPUVM_IMMEDIATE_MODE; +} + +/** * drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv * @gpuvm__: the &drm_gpuvm * @@ -700,6 +721,12 @@ struct drm_gpuvm_bo { * &drm_gpuvms evict list. */ struct list_head evict; + + /** + * @list.entry.bo_defer: List entry to attach to + * the &drm_gpuvms bo_defer list. + */ + struct llist_node bo_defer; } entry; } list; }; @@ -732,6 +759,9 @@ drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo) bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo); +bool drm_gpuvm_bo_put_deferred(struct drm_gpuvm_bo *vm_bo); +void drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm); + struct drm_gpuvm_bo * drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj); @@ -751,9 +781,10 @@ drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict) { struct drm_gpuvm_bo *vm_bo; - drm_gem_gpuva_assert_lock_held(obj); - drm_gem_for_each_gpuvm_bo(vm_bo, obj) + drm_gem_for_each_gpuvm_bo(vm_bo, obj) { + drm_gem_gpuva_assert_lock_held(vm_bo->vm, obj); drm_gpuvm_bo_evict(vm_bo, evict); + } } void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo); @@ -812,6 +843,11 @@ enum drm_gpuva_op_type { * @DRM_GPUVA_OP_PREFETCH: the prefetch op type */ DRM_GPUVA_OP_PREFETCH, + + /** + * @DRM_GPUVA_OP_DRIVER: the driver defined op type + */ + DRM_GPUVA_OP_DRIVER, }; /** @@ -838,7 +874,7 @@ struct drm_gpuva_op_map { } va; /** - * @gem: structure containing the &drm_gem_object and it's offset + * @gem: structure containing the &drm_gem_object and its offset */ struct { /** @@ -1053,10 +1089,23 @@ struct drm_gpuva_ops { */ #define drm_gpuva_next_op(op) list_next_entry(op, entry) +/** + * struct drm_gpuvm_map_req - arguments passed to drm_gpuvm_sm_map[_ops_create]() + */ +struct drm_gpuvm_map_req { + /** + * @map: struct drm_gpuva_op_map + */ + struct drm_gpuva_op_map map; +}; + struct drm_gpuva_ops * drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, - u64 addr, u64 range, - struct drm_gem_object *obj, u64 offset); + const struct drm_gpuvm_map_req *req); +struct drm_gpuva_ops * +drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm, + const struct drm_gpuvm_map_req *req); + struct drm_gpuva_ops * drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm, u64 addr, u64 range); @@ -1074,8 +1123,10 @@ void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm, static inline void drm_gpuva_init_from_op(struct drm_gpuva *va, struct drm_gpuva_op_map *op) { - drm_gpuva_init(va, op->va.addr, op->va.range, - op->gem.obj, op->gem.offset); + va->va.addr = op->va.addr; + va->va.range = op->va.range; + va->gem.obj = op->gem.obj; + va->gem.offset = op->gem.offset; } /** @@ -1184,11 +1235,11 @@ struct drm_gpuvm_ops { /** * @sm_step_unmap: called from &drm_gpuvm_sm_map and - * &drm_gpuvm_sm_unmap to unmap an existent mapping + * &drm_gpuvm_sm_unmap to unmap an existing mapping * - * This callback is called when existent mapping needs to be unmapped. + * This callback is called when existing mapping needs to be unmapped. * This is the case when either a newly requested mapping encloses an - * existent mapping or an unmap of an existent mapping is requested. + * existing mapping or an unmap of an existing mapping is requested. * * The &priv pointer matches the one the driver passed to * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively. @@ -1200,12 +1251,18 @@ struct drm_gpuvm_ops { }; int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, - u64 addr, u64 range, - struct drm_gem_object *obj, u64 offset); + const struct drm_gpuvm_map_req *req); int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv, u64 addr, u64 range); +int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, unsigned int num_fences, + struct drm_gpuvm_map_req *req); + +int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec, + u64 req_addr, u64 req_range); + void drm_gpuva_map(struct drm_gpuvm *gpuvm, struct drm_gpuva *va, struct drm_gpuva_op_map *op); |
