1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
|
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2024 Intel Corporation
*/
#ifndef _XE_SVM_H_
#define _XE_SVM_H_
#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
#include <drm/drm_pagemap.h>
#include <drm/drm_gpusvm.h>
#define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
struct xe_bo;
struct xe_gt;
struct xe_tile;
struct xe_vm;
struct xe_vma;
struct xe_vram_region;
/** struct xe_svm_range - SVM range */
struct xe_svm_range {
/** @base: base drm_gpusvm_range */
struct drm_gpusvm_range base;
/**
* @garbage_collector_link: Link into VM's garbage collect SVM range
* list. Protected by VM's garbage collect lock.
*/
struct list_head garbage_collector_link;
/**
* @tile_present: Tile mask of binding is present for this range.
* Protected by GPU SVM notifier lock.
*/
u8 tile_present;
/**
* @tile_invalidated: Tile mask of binding is invalidated for this
* range. Protected by GPU SVM notifier lock.
*/
u8 tile_invalidated;
};
/**
* xe_svm_range_pages_valid() - SVM range pages valid
* @range: SVM range
*
* Return: True if SVM range pages are valid, False otherwise
*/
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
{
return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
}
int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
int xe_svm_init(struct xe_vm *vm);
void xe_svm_fini(struct xe_vm *vm);
void xe_svm_close(struct xe_vm *vm);
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
struct xe_gt *gt, u64 fault_addr,
bool atomic);
bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
int xe_svm_bo_evict(struct xe_bo *bo);
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
const struct drm_gpusvm_ctx *ctx);
struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
struct xe_vma *vma, struct drm_gpusvm_ctx *ctx);
int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
struct drm_gpusvm_ctx *ctx);
bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
bool preferred_region_is_vram);
void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range);
bool xe_svm_range_validate(struct xe_vm *vm,
struct xe_svm_range *range,
u8 tile_mask, bool devmem_preferred);
u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
/**
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
* @range: SVM range
*
* Return: True if SVM range has a DMA mapping, False otherwise
*/
static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
{
lockdep_assert_held(&range->base.gpusvm->notifier_lock);
return range->base.flags.has_dma_mapping;
}
/**
* to_xe_range - Convert a drm_gpusvm_range pointer to a xe_svm_range
* @r: Pointer to the drm_gpusvm_range structure
*
* This function takes a pointer to a drm_gpusvm_range structure and
* converts it to a pointer to the containing xe_svm_range structure.
*
* Return: Pointer to the xe_svm_range structure
*/
static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
{
return container_of(r, struct xe_svm_range, base);
}
/**
* xe_svm_range_start() - SVM range start address
* @range: SVM range
*
* Return: start address of range.
*/
static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
{
return drm_gpusvm_range_start(&range->base);
}
/**
* xe_svm_range_end() - SVM range end address
* @range: SVM range
*
* Return: end address of range.
*/
static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
{
return drm_gpusvm_range_end(&range->base);
}
/**
* xe_svm_range_size() - SVM range size
* @range: SVM range
*
* Return: Size of range.
*/
static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
{
return drm_gpusvm_range_size(&range->base);
}
#define xe_svm_assert_in_notifier(vm__) \
lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
#define xe_svm_notifier_lock(vm__) \
drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
#define xe_svm_notifier_unlock(vm__) \
drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
void xe_svm_flush(struct xe_vm *vm);
#else
#include <linux/interval_tree.h>
struct drm_pagemap_device_addr;
struct drm_gpusvm_ctx;
struct drm_gpusvm_range;
struct xe_bo;
struct xe_gt;
struct xe_vm;
struct xe_vma;
struct xe_tile;
struct xe_vram_region;
#define XE_INTERCONNECT_VRAM 1
struct xe_svm_range {
struct {
struct interval_tree_node itree;
const struct drm_pagemap_device_addr *dma_addr;
} base;
u32 tile_present;
u32 tile_invalidated;
};
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
{
return false;
}
static inline
int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
{
return 0;
}
static inline
int xe_svm_init(struct xe_vm *vm)
{
return 0;
}
static inline
void xe_svm_fini(struct xe_vm *vm)
{
}
static inline
void xe_svm_close(struct xe_vm *vm)
{
}
static inline
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
struct xe_gt *gt, u64 fault_addr,
bool atomic)
{
return 0;
}
static inline
bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
{
return false;
}
static inline
int xe_svm_bo_evict(struct xe_bo *bo)
{
return 0;
}
static inline
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
{
}
static inline int
xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
const struct drm_gpusvm_ctx *ctx)
{
return -EOPNOTSUPP;
}
static inline
struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
{
return ERR_PTR(-EINVAL);
}
static inline
int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
struct drm_gpusvm_ctx *ctx)
{
return -EINVAL;
}
static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
{
return NULL;
}
static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
{
return 0;
}
static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
{
return 0;
}
static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
{
return 0;
}
static inline
bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
u32 region)
{
return false;
}
static inline
void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
{
}
static inline
bool xe_svm_range_validate(struct xe_vm *vm,
struct xe_svm_range *range,
u8 tile_mask, bool devmem_preferred)
{
return false;
}
static inline
u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma)
{
return ULONG_MAX;
}
#define xe_svm_assert_in_notifier(...) do {} while (0)
#define xe_svm_range_has_dma_mapping(...) false
static inline void xe_svm_notifier_lock(struct xe_vm *vm)
{
}
static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
{
}
static inline void xe_svm_flush(struct xe_vm *vm)
{
}
#endif
#endif
|