summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_vm.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/xe_vm.h')
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h116
1 files changed, 109 insertions, 7 deletions
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 6df1f1c7f85d..494af6bdc646 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -6,6 +6,7 @@
#ifndef _XE_VM_H_
#define _XE_VM_H_
+#include "xe_assert.h"
#include "xe_bo_types.h"
#include "xe_macros.h"
#include "xe_map.h"
@@ -16,11 +17,13 @@ struct drm_printer;
struct drm_file;
struct ttm_buffer_object;
-struct ttm_validate_buffer;
+
+struct dma_fence;
struct xe_exec_queue;
struct xe_file;
struct xe_sync_entry;
+struct xe_svm_range;
struct drm_exec;
struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
@@ -150,6 +153,11 @@ static inline bool xe_vma_is_null(struct xe_vma *vma)
return vma->gpuva.flags & DRM_GPUVA_SPARSE;
}
+static inline bool xe_vma_is_cpu_addr_mirror(struct xe_vma *vma)
+{
+ return vma->gpuva.flags & XE_VMA_SYSTEM_ALLOCATOR;
+}
+
static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
{
return !xe_vma_bo(vma);
@@ -157,7 +165,8 @@ static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
static inline bool xe_vma_is_userptr(struct xe_vma *vma)
{
- return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
+ return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma) &&
+ !xe_vma_is_cpu_addr_mirror(vma);
}
/**
@@ -207,10 +216,20 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
int xe_vm_userptr_check_repin(struct xe_vm *vm);
-struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
+int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
+struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
+ u8 tile_mask);
+struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
+ struct xe_vma *vma,
+ struct xe_svm_range *range,
+ u8 tile_mask);
+struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
+ struct xe_svm_range *range);
int xe_vm_invalidate_vma(struct xe_vma *vma);
+int xe_vm_validate_protected(struct xe_vm *vm);
+
static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
{
xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
@@ -240,10 +259,14 @@ int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
-int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id);
+int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
-int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
- unsigned int num_shared);
+int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences);
+
+struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
+ struct xe_exec_queue *q, u64 addr,
+ enum xe_cache_level cache_lvl);
/**
* xe_vm_resv() - Return's the vm's reservation object
@@ -256,6 +279,8 @@ static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
return drm_gpuvm_resv(&vm->gpuvm);
}
+void xe_vm_kill(struct xe_vm *vm, bool unlocked);
+
/**
* xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
* @vm: The vm
@@ -270,9 +295,86 @@ static inline void vm_dbg(const struct drm_device *dev,
const char *format, ...)
{ /* noop */ }
#endif
-#endif
struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
+
+/**
+ * xe_vm_set_validating() - Register this task as currently making bos resident
+ * @allow_res_evict: Allow eviction of buffer objects bound to @vm when
+ * validating.
+ * @vm: Pointer to the vm or NULL.
+ *
+ * Register this task as currently making bos resident for the vm. Intended
+ * to avoid eviction by the same task of shared bos bound to the vm.
+ * Call with the vm's resv lock held.
+ *
+ * Return: A pin cookie that should be used for xe_vm_clear_validating().
+ */
+static inline struct pin_cookie xe_vm_set_validating(struct xe_vm *vm,
+ bool allow_res_evict)
+{
+ struct pin_cookie cookie = {};
+
+ if (vm && !allow_res_evict) {
+ xe_vm_assert_held(vm);
+ cookie = lockdep_pin_lock(&xe_vm_resv(vm)->lock.base);
+ /* Pairs with READ_ONCE in xe_vm_is_validating() */
+ WRITE_ONCE(vm->validating, current);
+ }
+
+ return cookie;
+}
+
+/**
+ * xe_vm_clear_validating() - Unregister this task as currently making bos resident
+ * @vm: Pointer to the vm or NULL
+ * @allow_res_evict: Eviction from @vm was allowed. Must be set to the same
+ * value as for xe_vm_set_validation().
+ * @cookie: Cookie obtained from xe_vm_set_validating().
+ *
+ * Register this task as currently making bos resident for the vm. Intended
+ * to avoid eviction by the same task of shared bos bound to the vm.
+ * Call with the vm's resv lock held.
+ */
+static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict,
+ struct pin_cookie cookie)
+{
+ if (vm && !allow_res_evict) {
+ lockdep_unpin_lock(&xe_vm_resv(vm)->lock.base, cookie);
+ /* Pairs with READ_ONCE in xe_vm_is_validating() */
+ WRITE_ONCE(vm->validating, NULL);
+ }
+}
+
+/**
+ * xe_vm_is_validating() - Whether bos bound to the vm are currently being made resident
+ * by the current task.
+ * @vm: Pointer to the vm.
+ *
+ * If this function returns %true, we should be in a vm resv locked region, since
+ * the current process is the same task that called xe_vm_set_validating().
+ * The function asserts that that's indeed the case.
+ *
+ * Return: %true if the task is currently making bos resident, %false otherwise.
+ */
+static inline bool xe_vm_is_validating(struct xe_vm *vm)
+{
+ /* Pairs with WRITE_ONCE in xe_vm_is_validating() */
+ if (READ_ONCE(vm->validating) == current) {
+ xe_vm_assert_held(vm);
+ return true;
+ }
+ return false;
+}
+
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
+#else
+static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
+{
+}
+#endif
+#endif