summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
blob: a2b179568970e741c463c53491624b02b85370af (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
#ifndef __NVKM_VMM_H__
#define __NVKM_VMM_H__
#include "priv.h"
#include <core/memory.h>
enum nvkm_memory_target;

struct nvkm_vmm_pt {
	/* Some GPUs have a mapping level with a dual page tables to
	 * support large and small pages in the same address-range.
	 *
	 * We track the state of both page tables in one place, which
	 * is why there's multiple PT pointers/refcounts here.
	 */
	struct nvkm_mmu_pt *pt[2];
	u32 refs[2];

	/* Page size handled by this PT.
	 *
	 * Tesla backend needs to know this when writinge PDEs,
	 * otherwise unnecessary.
	 */
	u8 page;

	/* Entire page table sparse.
	 *
	 * Used to propagate sparseness to child page tables.
	 */
	bool sparse:1;

	/* Tracking for page directories.
	 *
	 * The array is indexed by PDE, and will either point to the
	 * child page table, or indicate the PDE is marked as sparse.
	 **/
#define NVKM_VMM_PDE_INVALID(pde) IS_ERR_OR_NULL(pde)
#define NVKM_VMM_PDE_SPARSED(pde) IS_ERR(pde)
#define NVKM_VMM_PDE_SPARSE       ERR_PTR(-EBUSY)
	struct nvkm_vmm_pt **pde;

	/* Tracking for dual page tables.
	 *
	 * There's one entry for each LPTE, keeping track of whether
	 * there are valid SPTEs in the same address-range.
	 *
	 * This information is used to manage LPTE state transitions.
	 */
#define NVKM_VMM_PTE_SPARSE 0x80
#define NVKM_VMM_PTE_VALID  0x40
#define NVKM_VMM_PTE_SPTES  0x3f
	u8 pte[];
};

typedef void (*nvkm_vmm_pxe_func)(struct nvkm_vmm *,
				  struct nvkm_mmu_pt *, u32 ptei, u32 ptes);
typedef void (*nvkm_vmm_pde_func)(struct nvkm_vmm *,
				  struct nvkm_vmm_pt *, u32 pdei);
typedef void (*nvkm_vmm_pte_func)(struct nvkm_vmm *, struct nvkm_mmu_pt *,
				  u32 ptei, u32 ptes, struct nvkm_vmm_map *);

struct nvkm_vmm_desc_func {
	nvkm_vmm_pxe_func invalid;
	nvkm_vmm_pxe_func unmap;
	nvkm_vmm_pxe_func sparse;

	nvkm_vmm_pde_func pde;

	nvkm_vmm_pte_func mem;
	nvkm_vmm_pte_func dma;
	nvkm_vmm_pte_func sgl;

	nvkm_vmm_pte_func pfn;
	bool (*pfn_clear)(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32 ptei, u32 ptes);
	nvkm_vmm_pxe_func pfn_unmap;
};

extern const struct nvkm_vmm_desc_func gf100_vmm_pgd;
void gf100_vmm_pgd_pde(struct nvkm_vmm *, struct nvkm_vmm_pt *, u32);
extern const struct nvkm_vmm_desc_func gf100_vmm_pgt;
void gf100_vmm_pgt_unmap(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32);
void gf100_vmm_pgt_mem(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
		       struct nvkm_vmm_map *);
void gf100_vmm_pgt_dma(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
		       struct nvkm_vmm_map *);
void gf100_vmm_pgt_sgl(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
		       struct nvkm_vmm_map *);

void gk104_vmm_lpt_invalid(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32);

struct nvkm_vmm_desc {
	enum {
		PGD,
		PGT,
		SPT,
		LPT,
	} type;
	u8 bits;	/* VMA bits covered by PT. */
	u8 size;	/* Bytes-per-PTE. */
	u32 align;	/* PT address alignment. */
	const struct nvkm_vmm_desc_func *func;
};

extern const struct nvkm_vmm_desc nv50_vmm_desc_12[];
extern const struct nvkm_vmm_desc nv50_vmm_desc_16[];

extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[];
extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[];
extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[];
extern const struct nvkm_vmm_desc gk104_vmm_desc_17_17[];

extern const struct nvkm_vmm_desc gm200_vmm_desc_16_12[];
extern const struct nvkm_vmm_desc gm200_vmm_desc_16_16[];
extern const struct nvkm_vmm_desc gm200_vmm_desc_17_12[];
extern const struct nvkm_vmm_desc gm200_vmm_desc_17_17[];

extern const struct nvkm_vmm_desc gp100_vmm_desc_12[];
extern const struct nvkm_vmm_desc gp100_vmm_desc_16[];

struct nvkm_vmm_page {
	u8 shift;
	const struct nvkm_vmm_desc *desc;
#define NVKM_VMM_PAGE_SPARSE                                               0x01
#define NVKM_VMM_PAGE_VRAM                                                 0x02
#define NVKM_VMM_PAGE_HOST                                                 0x04
#define NVKM_VMM_PAGE_COMP                                                 0x08
#define NVKM_VMM_PAGE_Sxxx                                (NVKM_VMM_PAGE_SPARSE)
#define NVKM_VMM_PAGE_xVxx                                  (NVKM_VMM_PAGE_VRAM)
#define NVKM_VMM_PAGE_SVxx             (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_VRAM)
#define NVKM_VMM_PAGE_xxHx                                  (NVKM_VMM_PAGE_HOST)
#define NVKM_VMM_PAGE_SxHx             (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_HOST)
#define NVKM_VMM_PAGE_xVHx             (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_HOST)
#define NVKM_VMM_PAGE_SVHx             (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_HOST)
#define NVKM_VMM_PAGE_xVxC             (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_COMP)
#define NVKM_VMM_PAGE_SVxC             (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_COMP)
#define NVKM_VMM_PAGE_xxHC             (NVKM_VMM_PAGE_xxHx | NVKM_VMM_PAGE_COMP)
#define NVKM_VMM_PAGE_SxHC             (NVKM_VMM_PAGE_SxHx | NVKM_VMM_PAGE_COMP)
	u8 type;
};

struct nvkm_vmm_func {
	int (*join)(struct nvkm_vmm *, struct nvkm_memory *inst);
	void (*part)(struct nvkm_vmm *, struct nvkm_memory *inst);

	int (*aper)(enum nvkm_memory_target);
	int (*valid)(struct nvkm_vmm *, void *argv, u32 argc,
		     struct nvkm_vmm_map *);
	void (*flush)(struct nvkm_vmm *, int depth);

	int (*mthd)(struct nvkm_vmm *, struct nvkm_client *,
		    u32 mthd, void *argv, u32 argc);

	void (*invalidate_pdb)(struct nvkm_vmm *, u64 addr);

	u64 page_block;
	const struct nvkm_vmm_page page[];
};

struct nvkm_vmm_join {
	struct nvkm_memory *inst;
	struct list_head head;
};

int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
		  u32 pd_header, bool managed, u64 addr, u64 size,
		  struct lock_class_key *, const char *name,
		  struct nvkm_vmm **);
struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *,
				     u64 addr, u64 size);
int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref,
			bool sparse, u8 page, u8 align, u64 size,
			struct nvkm_vma **pvma);
void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *);
void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *, bool pfn);
void nvkm_vmm_unmap_region(struct nvkm_vmm *, struct nvkm_vma *);

#define NVKM_VMM_PFN_ADDR                                 0xfffffffffffff000ULL
#define NVKM_VMM_PFN_ADDR_SHIFT                                              12
#define NVKM_VMM_PFN_APER                                 0x00000000000000f0ULL
#define NVKM_VMM_PFN_HOST                                 0x0000000000000000ULL
#define NVKM_VMM_PFN_VRAM                                 0x0000000000000010ULL
#define NVKM_VMM_PFN_W                                    0x0000000000000002ULL
#define NVKM_VMM_PFN_V                                    0x0000000000000001ULL
#define NVKM_VMM_PFN_NONE                                 0x0000000000000000ULL

int nvkm_vmm_pfn_map(struct nvkm_vmm *, u8 page, u64 addr, u64 size, u64 *pfn);
int nvkm_vmm_pfn_unmap(struct nvkm_vmm *, u64 addr, u64 size);

struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail);

int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
		  bool, u64, u64, void *, u32, struct lock_class_key *,
		  const char *, struct nvkm_vmm **);
int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);

int nv50_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
void nv50_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
int nv50_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
void nv50_vmm_flush(struct nvkm_vmm *, int);

int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
		   struct nvkm_mmu *, bool, u64, u64, void *, u32,
		   struct lock_class_key *, const char *, struct nvkm_vmm **);
int gf100_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
int gf100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
void gf100_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
int gf100_vmm_aper(enum nvkm_memory_target);
int gf100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
void gf100_vmm_flush(struct nvkm_vmm *, int);
void gf100_vmm_invalidate(struct nvkm_vmm *, u32 type);
void gf100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr);

int gk20a_vmm_aper(enum nvkm_memory_target);

int gm200_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
		   struct nvkm_mmu *, bool, u64, u64, void *, u32,
		   struct lock_class_key *, const char *, struct nvkm_vmm **);
int gm200_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
int gm200_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);

int gp100_vmm_new_(const struct nvkm_vmm_func *,
		   struct nvkm_mmu *, bool, u64, u64, void *, u32,
		   struct lock_class_key *, const char *, struct nvkm_vmm **);
int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
int gp100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
void gp100_vmm_flush(struct nvkm_vmm *, int);
int gp100_vmm_mthd(struct nvkm_vmm *, struct nvkm_client *, u32, void *, u32);
void gp100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr);

int gv100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);

int nv04_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
		 struct lock_class_key *, const char *, struct nvkm_vmm **);
int nv41_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
		 struct lock_class_key *, const char *, struct nvkm_vmm **);
int nv44_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
		 struct lock_class_key *, const char *, struct nvkm_vmm **);
int nv50_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
		 struct lock_class_key *, const char *, struct nvkm_vmm **);
int mcp77_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
		  struct lock_class_key *, const char *, struct nvkm_vmm **);
int g84_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
		struct lock_class_key *, const char *, struct nvkm_vmm **);
int gf100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
		  struct lock_class_key *, const char *, struct nvkm_vmm **);
int gk104_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
		  struct lock_class_key *, const char *, struct nvkm_vmm **);
int gk20a_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
		  struct lock_class_key *, const char *, struct nvkm_vmm **);
int gm200_vmm_new_fixed(struct nvkm_mmu *, bool, u64, u64, void *, u32,
			struct lock_class_key *, const char *,
			struct nvkm_vmm **);
int gm200_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
		  struct lock_class_key *, const char *,
		  struct nvkm_vmm **);
int gm20b_vmm_new_fixed(struct nvkm_mmu *, bool, u64, u64, void *, u32,
			struct lock_class_key *, const char *,
			struct nvkm_vmm **);
int gm20b_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
		  struct lock_class_key *, const char *,
		  struct nvkm_vmm **);
int gp100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
		  struct lock_class_key *, const char *,
		  struct nvkm_vmm **);
int gp10b_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
		  struct lock_class_key *, const char *,
		  struct nvkm_vmm **);
int gv100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
		  struct lock_class_key *, const char *,
		  struct nvkm_vmm **);
int tu102_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
		  struct lock_class_key *, const char *,
		  struct nvkm_vmm **);

#define VMM_PRINT(l,v,p,f,a...) do {                                           \
	struct nvkm_vmm *_vmm = (v);                                           \
	if (CONFIG_NOUVEAU_DEBUG >= (l) && _vmm->debug >= (l)) {               \
		nvkm_printk_(&_vmm->mmu->subdev, 0, p, "%s: "f"\n",            \
			     _vmm->name, ##a);                                 \
	}                                                                      \
} while(0)
#define VMM_DEBUG(v,f,a...) VMM_PRINT(NV_DBG_DEBUG, (v), info, f, ##a)
#define VMM_TRACE(v,f,a...) VMM_PRINT(NV_DBG_TRACE, (v), info, f, ##a)
#define VMM_SPAM(v,f,a...)  VMM_PRINT(NV_DBG_SPAM , (v),  dbg, f, ##a)

#define VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,BASE,SIZE,NEXT) do {            \
	nvkm_kmap((PT)->memory);                                               \
	while (PTEN) {                                                         \
		u64 _ptes = ((SIZE) - MAP->off) >> MAP->page->shift;           \
		u64 _addr = ((BASE) + MAP->off);                               \
                                                                               \
		if (_ptes > PTEN) {                                            \
			MAP->off += PTEN << MAP->page->shift;                  \
			_ptes = PTEN;                                          \
		} else {                                                       \
			MAP->off = 0;                                          \
			NEXT;                                                  \
		}                                                              \
                                                                               \
		VMM_SPAM(VMM, "ITER %08x %08x PTE(s)", PTEI, (u32)_ptes);      \
                                                                               \
		FILL(VMM, PT, PTEI, _ptes, MAP, _addr);                        \
		PTEI += _ptes;                                                 \
		PTEN -= _ptes;                                                 \
	}                                                                      \
	nvkm_done((PT)->memory);                                               \
} while(0)

#define VMM_MAP_ITER_MEM(VMM,PT,PTEI,PTEN,MAP,FILL)                            \
	VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,                                \
		     ((u64)MAP->mem->offset << NVKM_RAM_MM_SHIFT),             \
		     ((u64)MAP->mem->length << NVKM_RAM_MM_SHIFT),             \
		     (MAP->mem = MAP->mem->next))
#define VMM_MAP_ITER_DMA(VMM,PT,PTEI,PTEN,MAP,FILL)                            \
	VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,                                \
		     *MAP->dma, PAGE_SIZE, MAP->dma++)
#define VMM_MAP_ITER_SGL(VMM,PT,PTEI,PTEN,MAP,FILL)                            \
	VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,                                \
		     sg_dma_address(MAP->sgl), sg_dma_len(MAP->sgl),           \
		     (MAP->sgl = sg_next(MAP->sgl)))

#define VMM_FO(m,o,d,c,b) nvkm_fo##b((m)->memory, (o), (d), (c))
#define VMM_WO(m,o,d,c,b) nvkm_wo##b((m)->memory, (o), (d))
#define VMM_XO(m,v,o,d,c,b,fn,f,a...) do {                                     \
	const u32 _pteo = (o); u##b _data = (d);                               \
	VMM_SPAM((v), "   %010llx "f, (m)->addr + _pteo, _data, ##a);          \
	VMM_##fn((m), (m)->base + _pteo, _data, (c), b);                       \
} while(0)

#define VMM_WO032(m,v,o,d) VMM_XO((m),(v),(o),(d),  1, 32, WO, "%08x")
#define VMM_FO032(m,v,o,d,c)                                                   \
	VMM_XO((m),(v),(o),(d),(c), 32, FO, "%08x %08x", (c))

#define VMM_WO064(m,v,o,d) VMM_XO((m),(v),(o),(d),  1, 64, WO, "%016llx")
#define VMM_FO064(m,v,o,d,c)                                                   \
	VMM_XO((m),(v),(o),(d),(c), 64, FO, "%016llx %08x", (c))

#define VMM_XO128(m,v,o,lo,hi,c,f,a...) do {                                   \
	u32 _pteo = (o), _ptes = (c);                                          \
	const u64 _addr = (m)->addr + _pteo;                                   \
	VMM_SPAM((v), "   %010llx %016llx%016llx"f, _addr, (hi), (lo), ##a);   \
	while (_ptes--) {                                                      \
		nvkm_wo64((m)->memory, (m)->base + _pteo + 0, (lo));           \
		nvkm_wo64((m)->memory, (m)->base + _pteo + 8, (hi));           \
		_pteo += 0x10;                                                 \
	}                                                                      \
} while(0)

#define VMM_WO128(m,v,o,lo,hi) VMM_XO128((m),(v),(o),(lo),(hi), 1, "")
#define VMM_FO128(m,v,o,lo,hi,c) do {                                          \
	nvkm_kmap((m)->memory);                                                \
	VMM_XO128((m),(v),(o),(lo),(hi),(c), " %08x", (c));                    \
	nvkm_done((m)->memory);                                                \
} while(0)
#endif