diff options
author | Matthew Auld <matthew.auld@intel.com> | 2023-10-06 09:46:16 +0100 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2023-12-21 11:42:58 -0500 |
commit | e814389ff180514001df424f48645cf30f4a2a1e (patch) | |
tree | 21569930d4a99bfc796ecc484b7754ca18fd3159 /drivers/gpu/drm/xe/xe_migrate.c | |
parent | 406be3cc186eec67367b87a2af91cb598ff8e239 (diff) |
drm/xe: directly use pat_index for pte_encode
In a future patch userspace will be able to directly set the pat_index
as part of vm_bind. To support this we need to get away from using
xe_cache_level in the low level routines and rather just use the
pat_index directly.
v2: Rebase
v3: Some missed conversions, also prefer tile_to_xe() (Niranjana)
v4: remove leftover const (Lucas)
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Cc: Pallavi Mishra <pallavi.mishra@intel.com>
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: Matt Roper <matthew.d.roper@intel.com>
Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Pallavi Mishra <pallavi.mishra@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_migrate.c')
-rw-r--r-- | drivers/gpu/drm/xe/xe_migrate.c | 19 |
1 files changed, 11 insertions, 8 deletions
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 4dc52ac26d52..134b078b6fee 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -163,6 +163,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, struct xe_vm *vm) { struct xe_device *xe = tile_to_xe(tile); + u16 pat_index = xe->pat.idx[XE_CACHE_WB]; u8 id = tile->id; u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level; u32 map_ofs, level, i; @@ -194,7 +195,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, return ret; } - entry = vm->pt_ops->pde_encode_bo(bo, bo->size - XE_PAGE_SIZE, XE_CACHE_WB); + entry = vm->pt_ops->pde_encode_bo(bo, bo->size - XE_PAGE_SIZE, pat_index); xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry); map_ofs = (num_entries - num_level) * XE_PAGE_SIZE; @@ -202,7 +203,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, /* Map the entire BO in our level 0 pt */ for (i = 0, level = 0; i < num_entries; level++) { entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE, - XE_CACHE_WB, 0); + pat_index, 0); xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry); @@ -221,7 +222,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : XE_PAGE_SIZE) { entry = vm->pt_ops->pte_encode_bo(batch, i, - XE_CACHE_WB, 0); + pat_index, 0); xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry); @@ -246,7 +247,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, flags = XE_PDE_64K; entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (level - 1) * - XE_PAGE_SIZE, XE_CACHE_WB); + XE_PAGE_SIZE, pat_index); xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64, entry | flags); } @@ -254,7 +255,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, /* Write PDE's that point to our BO. */ for (i = 0; i < num_entries - num_level; i++) { entry = vm->pt_ops->pde_encode_bo(bo, i * XE_PAGE_SIZE, - XE_CACHE_WB); + pat_index); xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE + (i + 1) * 8, u64, entry); @@ -266,7 +267,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, level = 2; ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8; - flags = vm->pt_ops->pte_encode_addr(xe, 0, XE_CACHE_WB, level, + flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, true, 0); /* @@ -464,6 +465,7 @@ static void emit_pte(struct xe_migrate *m, struct xe_res_cursor *cur, u32 size, struct xe_bo *bo) { + u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB]; u32 ptes; u64 ofs = at_pt * XE_PAGE_SIZE; u64 cur_ofs; @@ -507,7 +509,7 @@ static void emit_pte(struct xe_migrate *m, } addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe, - addr, XE_CACHE_WB, + addr, pat_index, 0, devmem, flags); bb->cs[bb->len++] = lower_32_bits(addr); bb->cs[bb->len++] = upper_32_bits(addr); @@ -1226,6 +1228,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m, bool first_munmap_rebind = vma && vma->gpuva.flags & XE_VMA_FIRST_REBIND; struct xe_exec_queue *q_override = !q ? m->q : q; + u16 pat_index = xe->pat.idx[XE_CACHE_WB]; /* Use the CPU if no in syncs and engine is idle */ if (no_in_syncs(syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) { @@ -1297,7 +1300,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m, xe_tile_assert(tile, pt_bo->size == SZ_4K); - addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, XE_CACHE_WB, 0); + addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, pat_index, 0); bb->cs[bb->len++] = lower_32_bits(addr); bb->cs[bb->len++] = upper_32_bits(addr); } |