summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_userptr.c
blob: 91d09af71cede5199d3619333bdd2ac74bde6802 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2025 Intel Corporation
 */

#include "xe_userptr.h"

#include <linux/mm.h>

#include "xe_trace_bo.h"

/**
 * xe_vma_userptr_check_repin() - Advisory check for repin needed
 * @uvma: The userptr vma
 *
 * Check if the userptr vma has been invalidated since last successful
 * repin. The check is advisory only and can the function can be called
 * without the vm->svm.gpusvm.notifier_lock held. There is no guarantee that the
 * vma userptr will remain valid after a lockless check, so typically
 * the call needs to be followed by a proper check under the notifier_lock.
 *
 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
 */
int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
{
	return mmu_interval_check_retry(&uvma->userptr.notifier,
					uvma->userptr.pages.notifier_seq) ?
		-EAGAIN : 0;
}

/**
 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
 * that need repinning.
 * @vm: The VM.
 *
 * This function checks for whether the VM has userptrs that need repinning,
 * and provides a release-type barrier on the svm.gpusvm.notifier_lock after
 * checking.
 *
 * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
 */
int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
{
	lockdep_assert_held_read(&vm->svm.gpusvm.notifier_lock);

	return (list_empty(&vm->userptr.repin_list) &&
		list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
}

int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
{
	struct xe_vma *vma = &uvma->vma;
	struct xe_vm *vm = xe_vma_vm(vma);
	struct xe_device *xe = vm->xe;
	struct drm_gpusvm_ctx ctx = {
		.read_only = xe_vma_read_only(vma),
	};

	lockdep_assert_held(&vm->lock);
	xe_assert(xe, xe_vma_is_userptr(vma));

	if (vma->gpuva.flags & XE_VMA_DESTROYED)
		return 0;

	return drm_gpusvm_get_pages(&vm->svm.gpusvm, &uvma->userptr.pages,
				    uvma->userptr.notifier.mm,
				    &uvma->userptr.notifier,
				    xe_vma_userptr(vma),
				    xe_vma_userptr(vma) + xe_vma_size(vma),
				    &ctx);
}

static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
{
	struct xe_userptr *userptr = &uvma->userptr;
	struct xe_vma *vma = &uvma->vma;
	struct dma_resv_iter cursor;
	struct dma_fence *fence;
	struct drm_gpusvm_ctx ctx = {
		.in_notifier = true,
		.read_only = xe_vma_read_only(vma),
	};
	long err;

	/*
	 * Tell exec and rebind worker they need to repin and rebind this
	 * userptr.
	 */
	if (!xe_vm_in_fault_mode(vm) &&
	    !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
		spin_lock(&vm->userptr.invalidated_lock);
		list_move_tail(&userptr->invalidate_link,
			       &vm->userptr.invalidated);
		spin_unlock(&vm->userptr.invalidated_lock);
	}

	/*
	 * Preempt fences turn into schedule disables, pipeline these.
	 * Note that even in fault mode, we need to wait for binds and
	 * unbinds to complete, and those are attached as BOOKMARK fences
	 * to the vm.
	 */
	dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
			    DMA_RESV_USAGE_BOOKKEEP);
	dma_resv_for_each_fence_unlocked(&cursor, fence)
		dma_fence_enable_sw_signaling(fence);
	dma_resv_iter_end(&cursor);

	err = dma_resv_wait_timeout(xe_vm_resv(vm),
				    DMA_RESV_USAGE_BOOKKEEP,
				    false, MAX_SCHEDULE_TIMEOUT);
	XE_WARN_ON(err <= 0);

	if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
		err = xe_vm_invalidate_vma(vma);
		XE_WARN_ON(err);
	}

	drm_gpusvm_unmap_pages(&vm->svm.gpusvm, &uvma->userptr.pages,
			       xe_vma_size(vma) >> PAGE_SHIFT, &ctx);
}

static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
				   const struct mmu_notifier_range *range,
				   unsigned long cur_seq)
{
	struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
	struct xe_vma *vma = &uvma->vma;
	struct xe_vm *vm = xe_vma_vm(vma);

	xe_assert(vm->xe, xe_vma_is_userptr(vma));
	trace_xe_vma_userptr_invalidate(vma);

	if (!mmu_notifier_range_blockable(range))
		return false;

	vm_dbg(&xe_vma_vm(vma)->xe->drm,
	       "NOTIFIER: addr=0x%016llx, range=0x%016llx",
		xe_vma_start(vma), xe_vma_size(vma));

	down_write(&vm->svm.gpusvm.notifier_lock);
	mmu_interval_set_seq(mni, cur_seq);

	__vma_userptr_invalidate(vm, uvma);
	up_write(&vm->svm.gpusvm.notifier_lock);
	trace_xe_vma_userptr_invalidate_complete(vma);

	return true;
}

static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
	.invalidate = vma_userptr_invalidate,
};

#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
/**
 * xe_vma_userptr_force_invalidate() - force invalidate a userptr
 * @uvma: The userptr vma to invalidate
 *
 * Perform a forced userptr invalidation for testing purposes.
 */
void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
{
	struct xe_vm *vm = xe_vma_vm(&uvma->vma);

	/* Protect against concurrent userptr pinning */
	lockdep_assert_held(&vm->lock);
	/* Protect against concurrent notifiers */
	lockdep_assert_held(&vm->svm.gpusvm.notifier_lock);
	/*
	 * Protect against concurrent instances of this function and
	 * the critical exec sections
	 */
	xe_vm_assert_held(vm);

	if (!mmu_interval_read_retry(&uvma->userptr.notifier,
				     uvma->userptr.pages.notifier_seq))
		uvma->userptr.pages.notifier_seq -= 2;
	__vma_userptr_invalidate(vm, uvma);
}
#endif

int xe_vm_userptr_pin(struct xe_vm *vm)
{
	struct xe_userptr_vma *uvma, *next;
	int err = 0;

	xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
	lockdep_assert_held_write(&vm->lock);

	/* Collect invalidated userptrs */
	spin_lock(&vm->userptr.invalidated_lock);
	xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
	list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
				 userptr.invalidate_link) {
		list_del_init(&uvma->userptr.invalidate_link);
		list_add_tail(&uvma->userptr.repin_link,
			      &vm->userptr.repin_list);
	}
	spin_unlock(&vm->userptr.invalidated_lock);

	/* Pin and move to bind list */
	list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
				 userptr.repin_link) {
		err = xe_vma_userptr_pin_pages(uvma);
		if (err == -EFAULT) {
			list_del_init(&uvma->userptr.repin_link);
			/*
			 * We might have already done the pin once already, but
			 * then had to retry before the re-bind happened, due
			 * some other condition in the caller, but in the
			 * meantime the userptr got dinged by the notifier such
			 * that we need to revalidate here, but this time we hit
			 * the EFAULT. In such a case make sure we remove
			 * ourselves from the rebind list to avoid going down in
			 * flames.
			 */
			if (!list_empty(&uvma->vma.combined_links.rebind))
				list_del_init(&uvma->vma.combined_links.rebind);

			/* Wait for pending binds */
			xe_vm_lock(vm, false);
			dma_resv_wait_timeout(xe_vm_resv(vm),
					      DMA_RESV_USAGE_BOOKKEEP,
					      false, MAX_SCHEDULE_TIMEOUT);

			down_read(&vm->svm.gpusvm.notifier_lock);
			err = xe_vm_invalidate_vma(&uvma->vma);
			up_read(&vm->svm.gpusvm.notifier_lock);
			xe_vm_unlock(vm);
			if (err)
				break;
		} else {
			if (err)
				break;

			list_del_init(&uvma->userptr.repin_link);
			list_move_tail(&uvma->vma.combined_links.rebind,
				       &vm->rebind_list);
		}
	}

	if (err) {
		down_write(&vm->svm.gpusvm.notifier_lock);
		spin_lock(&vm->userptr.invalidated_lock);
		list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
					 userptr.repin_link) {
			list_del_init(&uvma->userptr.repin_link);
			list_move_tail(&uvma->userptr.invalidate_link,
				       &vm->userptr.invalidated);
		}
		spin_unlock(&vm->userptr.invalidated_lock);
		up_write(&vm->svm.gpusvm.notifier_lock);
	}
	return err;
}

/**
 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
 * that need repinning.
 * @vm: The VM.
 *
 * This function does an advisory check for whether the VM has userptrs that
 * need repinning.
 *
 * Return: 0 if there are no indications of userptrs needing repinning,
 * -EAGAIN if there are.
 */
int xe_vm_userptr_check_repin(struct xe_vm *vm)
{
	return (list_empty_careful(&vm->userptr.repin_list) &&
		list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
}

int xe_userptr_setup(struct xe_userptr_vma *uvma, unsigned long start,
		     unsigned long range)
{
	struct xe_userptr *userptr = &uvma->userptr;
	int err;

	INIT_LIST_HEAD(&userptr->invalidate_link);
	INIT_LIST_HEAD(&userptr->repin_link);

	err = mmu_interval_notifier_insert(&userptr->notifier, current->mm,
					   start, range,
					   &vma_userptr_notifier_ops);
	if (err)
		return err;

	userptr->pages.notifier_seq = LONG_MAX;

	return 0;
}

void xe_userptr_remove(struct xe_userptr_vma *uvma)
{
	struct xe_vm *vm = xe_vma_vm(&uvma->vma);
	struct xe_userptr *userptr = &uvma->userptr;

	drm_gpusvm_free_pages(&vm->svm.gpusvm, &uvma->userptr.pages,
			      xe_vma_size(&uvma->vma) >> PAGE_SHIFT);

	/*
	 * Since userptr pages are not pinned, we can't remove
	 * the notifier until we're sure the GPU is not accessing
	 * them anymore
	 */
	mmu_interval_notifier_remove(&userptr->notifier);
}

void xe_userptr_destroy(struct xe_userptr_vma *uvma)
{
	struct xe_vm *vm = xe_vma_vm(&uvma->vma);

	spin_lock(&vm->userptr.invalidated_lock);
	xe_assert(vm->xe, list_empty(&uvma->userptr.repin_link));
	list_del(&uvma->userptr.invalidate_link);
	spin_unlock(&vm->userptr.invalidated_lock);
}