summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_migrate.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/xe_migrate.c')
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c640
1 files changed, 367 insertions, 273 deletions
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 70480c305602..278bc96cf593 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -10,11 +10,13 @@
#include <drm/drm_managed.h>
#include <drm/ttm/ttm_tt.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
-#include "generated/xe_wa_oob.h"
+#include <generated/xe_wa_oob.h>
+
+#include "instructions/xe_gpu_commands.h"
#include "instructions/xe_mi_commands.h"
-#include "regs/xe_gpu_commands.h"
+#include "regs/xe_gtt_defs.h"
#include "tests/xe_test.h"
#include "xe_assert.h"
#include "xe_bb.h"
@@ -30,9 +32,8 @@
#include "xe_res_cursor.h"
#include "xe_sched_job.h"
#include "xe_sync.h"
-#include "xe_trace.h"
+#include "xe_trace_bo.h"
#include "xe_vm.h"
-#include "xe_wa.h"
/**
* struct xe_migrate - migrate context.
@@ -68,20 +69,30 @@ struct xe_migrate {
#define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */
#define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */
-#define NUM_KERNEL_PDE 17
+#define NUM_KERNEL_PDE 15
#define NUM_PT_SLOTS 32
#define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
+#define MAX_NUM_PTE 512
+#define IDENTITY_OFFSET 256ULL
+
+/*
+ * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
+ * legal value accepted. Since that instruction field is always stored in
+ * (val-2) format, this translates to 0x400 dwords for the true maximum length
+ * of the instruction. Subtracting the instruction header (1 dword) and
+ * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values.
+ */
+#define MAX_PTE_PER_SDI 0x1FE
/**
- * xe_tile_migrate_engine() - Get this tile's migrate engine.
+ * xe_tile_migrate_exec_queue() - Get this tile's migrate exec queue.
* @tile: The tile.
*
- * Returns the default migrate engine of this tile.
- * TODO: Perhaps this function is slightly misplaced, and even unneeded?
+ * Returns the default migrate exec queue of this tile.
*
- * Return: The default migrate engine
+ * Return: The default migrate exec queue
*/
-struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile)
+struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile)
{
return tile->migrate->q;
}
@@ -110,14 +121,64 @@ static u64 xe_migrate_vm_addr(u64 slot, u32 level)
return (slot + 1ULL) << xe_pt_shift(level + 1);
}
-static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr)
+static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte)
{
/*
* Remove the DPA to get a correct offset into identity table for the
* migrate offset
*/
+ u64 identity_offset = IDENTITY_OFFSET;
+
+ if (GRAPHICS_VER(xe) >= 20 && is_comp_pte)
+ identity_offset += DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
+
addr -= xe->mem.vram.dpa_base;
- return addr + (256ULL << xe_pt_shift(2));
+ return addr + (identity_offset << xe_pt_shift(2));
+}
+
+static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo,
+ u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs)
+{
+ u64 pos, ofs, flags;
+ u64 entry;
+ /* XXX: Unclear if this should be usable_size? */
+ u64 vram_limit = xe->mem.vram.actual_physical_size +
+ xe->mem.vram.dpa_base;
+ u32 level = 2;
+
+ ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8;
+ flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
+ true, 0);
+
+ xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M));
+
+ /*
+ * Use 1GB pages when possible, last chunk always use 2M
+ * pages as mixing reserved memory (stolen, WOCPM) with a single
+ * mapping is not allowed on certain platforms.
+ */
+ for (pos = xe->mem.vram.dpa_base; pos < vram_limit;
+ pos += SZ_1G, ofs += 8) {
+ if (pos + SZ_1G >= vram_limit) {
+ entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs,
+ pat_index);
+ xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
+
+ flags = vm->pt_ops->pte_encode_addr(xe, 0,
+ pat_index,
+ level - 1,
+ true, 0);
+
+ for (ofs = pt_2m_ofs; pos < vram_limit;
+ pos += SZ_2M, ofs += 8)
+ xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
+ break; /* Ensure pos == vram_limit assert correct */
+ }
+
+ xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
+ }
+
+ xe_assert(xe, pos == vram_limit);
}
static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
@@ -127,9 +188,12 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
u16 pat_index = xe->pat.idx[XE_CACHE_WB];
u8 id = tile->id;
u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
+#define VRAM_IDENTITY_MAP_COUNT 2
+ u32 num_setup = num_level + VRAM_IDENTITY_MAP_COUNT;
+#undef VRAM_IDENTITY_MAP_COUNT
u32 map_ofs, level, i;
struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
- u64 entry;
+ u64 entry, pt29_ofs;
/* Can't bump NUM_PT_SLOTS too high */
BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
@@ -144,15 +208,18 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
bo = xe_bo_create_pin_map(vm->xe, tile, vm,
num_entries * XE_PAGE_SIZE,
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_PINNED_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_PAGETABLE);
if (IS_ERR(bo))
return PTR_ERR(bo);
- entry = vm->pt_ops->pde_encode_bo(bo, bo->size - XE_PAGE_SIZE, pat_index);
+ /* PT30 & PT31 reserved for 2M identity map */
+ pt29_ofs = bo->size - 3 * XE_PAGE_SIZE;
+ entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs, pat_index);
xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
- map_ofs = (num_entries - num_level) * XE_PAGE_SIZE;
+ map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE;
/* Map the entire BO in our level 0 pt */
for (i = 0, level = 0; i < num_entries; level++) {
@@ -201,12 +268,12 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
} else {
u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
- m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
+ m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
if (xe->info.has_usm) {
batch = tile->primary_gt->usm.bb_pool->bo;
batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
- m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
+ m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
}
}
@@ -216,15 +283,15 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (vm->flags & XE_VM_FLAG_64K && level == 1)
flags = XE_PDE_64K;
- entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (level - 1) *
+ entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
XE_PAGE_SIZE, pat_index);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
entry | flags);
}
/* Write PDE's that point to our BO. */
- for (i = 0; i < num_entries - num_level; i++) {
- entry = vm->pt_ops->pde_encode_bo(bo, i * XE_PAGE_SIZE,
+ for (i = 0; i < map_ofs / PAGE_SIZE; i++) {
+ entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE,
pat_index);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
@@ -240,29 +307,36 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Identity map the entire vram at 256GiB offset */
if (IS_DGFX(xe)) {
- u64 pos, ofs, flags;
+ u64 pt30_ofs = bo->size - 2 * XE_PAGE_SIZE;
- level = 2;
- ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
- flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
- true, 0);
+ xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
+ pat_index, pt30_ofs);
+ xe_assert(xe, xe->mem.vram.actual_physical_size <=
+ (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
/*
- * Use 1GB pages, it shouldn't matter the physical amount of
- * vram is less, when we don't access it.
+ * Identity map the entire vram for compressed pat_index for xe2+
+ * if flat ccs is enabled.
*/
- for (pos = xe->mem.vram.dpa_base;
- pos < xe->mem.vram.actual_physical_size + xe->mem.vram.dpa_base;
- pos += SZ_1G, ofs += 8)
- xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
+ if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) {
+ u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
+ u64 vram_offset = IDENTITY_OFFSET +
+ DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
+ u64 pt31_ofs = bo->size - XE_PAGE_SIZE;
+
+ xe_assert(xe, xe->mem.vram.actual_physical_size <= (MAX_NUM_PTE -
+ IDENTITY_OFFSET - IDENTITY_OFFSET / 2) * SZ_1G);
+ xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset,
+ comp_pat_index, pt31_ofs);
+ }
}
/*
* Example layout created above, with root level = 3:
* [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's
* [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's
- * [PT9...PT28]: Userspace PT's for VM_BIND, 4 KiB PTE's
- * [PT29 = PDE 0] [PT30 = PDE 1] [PT31 = PDE 2]
+ * [PT9...PT26]: Userspace PT's for VM_BIND, 4 KiB PTE's
+ * [PT27 = PDE 0] [PT28 = PDE 1] [PT29 = PDE 2] [PT30 & PT31 = 2M vram identity map]
*
* This makes the lowest part of the VM point to the pagetables.
* Hence the lowest 2M in the vm should point to itself, with a few writes
@@ -280,7 +354,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
#define VM_SA_UPDATE_UNIT_SIZE (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
#define NUM_VMUSA_WRITES_PER_UNIT (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
drm_suballoc_manager_init(&m->vm_update_sa,
- (map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
+ (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
NUM_VMUSA_UNIT_PER_PAGE, 0);
m->pt_bo = bo;
@@ -288,10 +362,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
}
/*
- * Due to workaround 16017236439, odd instance hardware copy engines are
- * faster than even instance ones.
- * This function returns the mask involving all fast copy engines and the
- * reserved copy engine to be used as logical mask for migrate engine.
* Including the reserved copy engine is required to avoid deadlocks due to
* migrate jobs servicing the faults gets stuck behind the job that faulted.
*/
@@ -305,14 +375,18 @@ static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
if (hwe->class != XE_ENGINE_CLASS_COPY)
continue;
- if (!XE_WA(gt, 16017236439) ||
- xe_gt_is_usm_hwe(gt, hwe) || hwe->instance & 1)
+ if (xe_gt_is_usm_hwe(gt, hwe))
logical_mask |= BIT(hwe->logical_instance);
}
return logical_mask;
}
+static bool xe_migrate_needs_ccs_emit(struct xe_device *xe)
+{
+ return xe_device_has_flat_ccs(xe) && !(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe));
+}
+
/**
* xe_migrate_init() - Initialize a migrate context
* @tile: Back-pointer to the tile we're initializing for.
@@ -357,15 +431,19 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
if (!hwe || !logical_mask)
return ERR_PTR(-EINVAL);
+ /*
+ * XXX: Currently only reserving 1 (likely slow) BCS instance on
+ * PVC, may want to revisit if performance is needed.
+ */
m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
EXEC_QUEUE_FLAG_KERNEL |
EXEC_QUEUE_FLAG_PERMANENT |
- EXEC_QUEUE_FLAG_HIGH_PRIORITY);
+ EXEC_QUEUE_FLAG_HIGH_PRIORITY, 0);
} else {
m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
XE_ENGINE_CLASS_COPY,
EXEC_QUEUE_FLAG_KERNEL |
- EXEC_QUEUE_FLAG_PERMANENT);
+ EXEC_QUEUE_FLAG_PERMANENT, 0);
}
if (IS_ERR(m->q)) {
xe_vm_close_and_put(vm);
@@ -373,13 +451,16 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
}
mutex_init(&m->job_mutex);
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&m->job_mutex);
+ fs_reclaim_release(GFP_KERNEL);
err = drmm_add_action_or_reset(&xe->drm, xe_migrate_fini, m);
if (err)
return ERR_PTR(err);
if (IS_DGFX(xe)) {
- if (xe_device_has_flat_ccs(xe))
+ if (xe_migrate_needs_ccs_emit(xe))
/* min chunk size corresponds to 4K of CCS Metadata */
m->min_chunk_size = SZ_4K * SZ_64K /
xe_device_ccs_bytes(xe, SZ_64K);
@@ -433,31 +514,37 @@ static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
return cur->size >= size;
}
+#define PTE_UPDATE_FLAG_IS_VRAM BIT(0)
+#define PTE_UPDATE_FLAG_IS_COMP_PTE BIT(1)
+
static u32 pte_update_size(struct xe_migrate *m,
- bool is_vram,
+ u32 flags,
struct ttm_resource *res,
struct xe_res_cursor *cur,
u64 *L0, u64 *L0_ofs, u32 *L0_pt,
u32 cmd_size, u32 pt_ofs, u32 avail_pts)
{
u32 cmds = 0;
+ bool is_vram = PTE_UPDATE_FLAG_IS_VRAM & flags;
+ bool is_comp_pte = PTE_UPDATE_FLAG_IS_COMP_PTE & flags;
*L0_pt = pt_ofs;
if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
/* Offset into identity map. */
*L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
- cur->start + vram_region_gpu_offset(res));
+ cur->start + vram_region_gpu_offset(res),
+ is_comp_pte);
cmds += cmd_size;
} else {
/* Clip L0 to available size */
u64 size = min(*L0, (u64)avail_pts * SZ_2M);
- u64 num_4k_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
+ u32 num_4k_pages = (size + XE_PAGE_SIZE - 1) >> XE_PTE_SHIFT;
*L0 = size;
*L0_ofs = xe_migrate_vm_addr(pt_ofs, 0);
/* MI_STORE_DATA_IMM */
- cmds += 3 * DIV_ROUND_UP(num_4k_pages, 0x1ff);
+ cmds += 3 * DIV_ROUND_UP(num_4k_pages, MAX_PTE_PER_SDI);
/* PDE qwords */
cmds += num_4k_pages * 2;
@@ -479,7 +566,7 @@ static void emit_pte(struct xe_migrate *m,
struct xe_vm *vm = m->q->vm;
u16 pat_index;
u32 ptes;
- u64 ofs = at_pt * XE_PAGE_SIZE;
+ u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
u64 cur_ofs;
/* Indirect access needs compression enabled uncached PAT index */
@@ -492,7 +579,7 @@ static void emit_pte(struct xe_migrate *m,
ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
while (ptes) {
- u32 chunk = min(0x1ffU, ptes);
+ u32 chunk = min(MAX_PTE_PER_SDI, ptes);
bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
bb->cs[bb->len++] = ofs;
@@ -605,12 +692,6 @@ static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
bb->cs[bb->len++] = upper_32_bits(src_ofs);
}
-static int job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
- enum dma_resv_usage usage)
-{
- return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage);
-}
-
static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
{
return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
@@ -625,7 +706,7 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
struct xe_gt *gt = m->tile->primary_gt;
u32 flush_flags = 0;
- if (xe_device_has_flat_ccs(gt_to_xe(gt)) && !copy_ccs && dst_is_indirect) {
+ if (!copy_ccs && dst_is_indirect) {
/*
* If the src is already in vram, then it should already
* have been cleared by us, or has been populated by the
@@ -701,6 +782,8 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
bool copy_ccs = xe_device_has_flat_ccs(xe) &&
xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
+ bool use_comp_pat = xe_device_has_flat_ccs(xe) &&
+ GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram;
/* Copying CCS between two different BOs is not supported yet. */
if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
@@ -727,10 +810,11 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
struct xe_sched_job *job;
struct xe_bb *bb;
- u32 flush_flags;
+ u32 flush_flags = 0;
u32 update_idx;
u64 ccs_ofs, ccs_size;
u32 ccs_pt;
+ u32 pte_flags;
bool usm = xe->info.has_usm;
u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
@@ -743,17 +827,20 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
src_L0 = min(src_L0, dst_L0);
- batch_size += pte_update_size(m, src_is_vram, src, &src_it, &src_L0,
+ pte_flags = src_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
+ pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
+ batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
&src_L0_ofs, &src_L0_pt, 0, 0,
avail_pts);
- batch_size += pte_update_size(m, dst_is_vram, dst, &dst_it, &src_L0,
+ pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
+ batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0,
&dst_L0_ofs, &dst_L0_pt, 0,
avail_pts, avail_pts);
if (copy_system_ccs) {
ccs_size = xe_device_ccs_bytes(xe, src_L0);
- batch_size += pte_update_size(m, false, NULL, &ccs_it, &ccs_size,
+ batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size,
&ccs_ofs, &ccs_pt, 0,
2 * avail_pts,
avail_pts);
@@ -762,7 +849,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
/* Add copy commands size here */
batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
- ((xe_device_has_flat_ccs(xe) ? EMIT_COPY_CCS_DW : 0));
+ ((xe_migrate_needs_ccs_emit(xe) ? EMIT_COPY_CCS_DW : 0));
bb = xe_bb_new(gt, batch_size, usm);
if (IS_ERR(bb)) {
@@ -791,13 +878,13 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
if (!copy_only_ccs)
emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
- flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
- IS_DGFX(xe) ? src_is_vram : src_is_pltt,
- dst_L0_ofs,
- IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
- src_L0, ccs_ofs, copy_ccs);
+ if (xe_migrate_needs_ccs_emit(xe))
+ flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
+ IS_DGFX(xe) ? src_is_vram : src_is_pltt,
+ dst_L0_ofs,
+ IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
+ src_L0, ccs_ofs, copy_ccs);
- mutex_lock(&m->job_mutex);
job = xe_bb_create_migration_job(m->q, bb,
xe_migrate_batch_base(m, usm),
update_idx);
@@ -808,15 +895,16 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
xe_sched_job_add_migrate_flush(job, flush_flags);
if (!fence) {
- err = job_add_deps(job, src_bo->ttm.base.resv,
- DMA_RESV_USAGE_BOOKKEEP);
+ err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP);
if (!err && src_bo != dst_bo)
- err = job_add_deps(job, dst_bo->ttm.base.resv,
- DMA_RESV_USAGE_BOOKKEEP);
+ err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP);
if (err)
goto err_job;
}
+ mutex_lock(&m->job_mutex);
xe_sched_job_arm(job);
dma_fence_put(fence);
fence = dma_fence_get(&job->drm.s_fence->finished);
@@ -834,7 +922,6 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
err_job:
xe_sched_job_put(job);
err:
- mutex_unlock(&m->job_mutex);
xe_bb_free(bb, NULL);
err_sync:
@@ -924,8 +1011,8 @@ static bool has_service_copy_support(struct xe_gt *gt)
* all of the actual service copy engines (BCS1-BCS8) have been fused
* off.
*/
- return gt->info.__engine_mask & GENMASK(XE_HW_ENGINE_BCS8,
- XE_HW_ENGINE_BCS1);
+ return gt->info.engine_mask & GENMASK(XE_HW_ENGINE_BCS8,
+ XE_HW_ENGINE_BCS1);
}
static u32 emit_clear_cmd_len(struct xe_gt *gt)
@@ -951,9 +1038,11 @@ static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
* @m: The migration context.
* @bo: The buffer object @dst is currently bound to.
* @dst: The dst TTM resource to be cleared.
+ * @clear_flags: flags to specify which data to clear: CCS, BO, or both.
*
- * Clear the contents of @dst to zero. On flat CCS devices,
- * the CCS metadata is cleared to zero as well on VRAM destinations.
+ * Clear the contents of @dst to zero when XE_MIGRATE_CLEAR_FLAG_BO_DATA is set.
+ * On flat CCS devices, the CCS metadata is cleared to zero with XE_MIGRATE_CLEAR_FLAG_CCS_DATA.
+ * Set XE_MIGRATE_CLEAR_FLAG_FULL to clear bo as well as CCS metadata.
* TODO: Eliminate the @bo argument.
*
* Return: Pointer to a dma_fence representing the last clear batch, or
@@ -962,18 +1051,26 @@ static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
*/
struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
struct xe_bo *bo,
- struct ttm_resource *dst)
+ struct ttm_resource *dst,
+ u32 clear_flags)
{
bool clear_vram = mem_type_is_vram(dst->mem_type);
+ bool clear_bo_data = XE_MIGRATE_CLEAR_FLAG_BO_DATA & clear_flags;
+ bool clear_ccs = XE_MIGRATE_CLEAR_FLAG_CCS_DATA & clear_flags;
struct xe_gt *gt = m->tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
- bool clear_system_ccs = (xe_bo_needs_ccs_pages(bo) && !IS_DGFX(xe)) ? true : false;
+ bool clear_only_system_ccs = false;
struct dma_fence *fence = NULL;
u64 size = bo->size;
struct xe_res_cursor src_it;
struct ttm_resource *src = dst;
int err;
- int pass = 0;
+
+ if (WARN_ON(!clear_bo_data && !clear_ccs))
+ return NULL;
+
+ if (!clear_bo_data && clear_ccs && !IS_DGFX(xe))
+ clear_only_system_ccs = true;
if (!clear_vram)
xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it);
@@ -988,22 +1085,22 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
struct xe_sched_job *job;
struct xe_bb *bb;
u32 batch_size, update_idx;
+ u32 pte_flags;
bool usm = xe->info.has_usm;
u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
clear_L0 = xe_migrate_res_sizes(m, &src_it);
- drm_dbg(&xe->drm, "Pass %u, size: %llu\n", pass++, clear_L0);
-
/* Calculate final sizes and batch size.. */
+ pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
batch_size = 2 +
- pte_update_size(m, clear_vram, src, &src_it,
+ pte_update_size(m, pte_flags, src, &src_it,
&clear_L0, &clear_L0_ofs, &clear_L0_pt,
- clear_system_ccs ? 0 : emit_clear_cmd_len(gt), 0,
+ clear_bo_data ? emit_clear_cmd_len(gt) : 0, 0,
avail_pts);
- if (xe_device_has_flat_ccs(xe))
+ if (xe_migrate_needs_ccs_emit(xe))
batch_size += EMIT_COPY_CCS_DW;
/* Clear commands */
@@ -1022,22 +1119,21 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it))
xe_res_next(&src_it, clear_L0);
else
- emit_pte(m, bb, clear_L0_pt, clear_vram, clear_system_ccs,
+ emit_pte(m, bb, clear_L0_pt, clear_vram, clear_only_system_ccs,
&src_it, clear_L0, dst);
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
- if (!clear_system_ccs)
+ if (clear_bo_data)
emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
- if (xe_device_has_flat_ccs(xe)) {
+ if (xe_migrate_needs_ccs_emit(xe)) {
emit_copy_ccs(gt, bb, clear_L0_ofs, true,
m->cleared_mem_ofs, false, clear_L0);
flush_flags = MI_FLUSH_DW_CCS;
}
- mutex_lock(&m->job_mutex);
job = xe_bb_create_migration_job(m->q, bb,
xe_migrate_batch_base(m, usm),
update_idx);
@@ -1054,12 +1150,13 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
* fences, which are always tracked as
* DMA_RESV_USAGE_KERNEL.
*/
- err = job_add_deps(job, bo->ttm.base.resv,
- DMA_RESV_USAGE_KERNEL);
+ err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
+ DMA_RESV_USAGE_KERNEL);
if (err)
goto err_job;
}
+ mutex_lock(&m->job_mutex);
xe_sched_job_arm(job);
dma_fence_put(fence);
fence = dma_fence_get(&job->drm.s_fence->finished);
@@ -1076,7 +1173,6 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
err_job:
xe_sched_job_put(job);
err:
- mutex_unlock(&m->job_mutex);
xe_bb_free(bb, NULL);
err_sync:
/* Sync partial copies if any. FIXME: job_mutex? */
@@ -1088,13 +1184,14 @@ err_sync:
return ERR_PTR(err);
}
- if (clear_system_ccs)
+ if (clear_ccs)
bo->ccs_cleared = true;
return fence;
}
static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
+ const struct xe_vm_pgtable_update_op *pt_op,
const struct xe_vm_pgtable_update *update,
struct xe_migrate_pt_update *pt_update)
{
@@ -1111,16 +1208,16 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
* This shouldn't be possible in practice.. might change when 16K
* pages are used. Hence the assert.
*/
- xe_tile_assert(tile, update->qwords <= 0x1ff);
+ xe_tile_assert(tile, update->qwords < MAX_NUM_PTE);
if (!ppgtt_ofs)
ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
xe_bo_addr(update->pt_bo, 0,
- XE_PAGE_SIZE));
+ XE_PAGE_SIZE), false);
do {
u64 addr = ppgtt_ofs + ofs * 8;
- chunk = min(update->qwords, 0x1ffU);
+ chunk = min(size, MAX_PTE_PER_SDI);
/* Ensure populatefn can do memset64 by aligning bb->cs */
if (!(bb->len & 1))
@@ -1129,8 +1226,12 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
- ops->populate(pt_update, tile, NULL, bb->cs + bb->len, ofs, chunk,
- update);
+ if (pt_op->bind)
+ ops->populate(pt_update, tile, NULL, bb->cs + bb->len,
+ ofs, chunk, update);
+ else
+ ops->clear(pt_update, tile, NULL, bb->cs + bb->len,
+ ofs, chunk, update);
bb->len += chunk * 2;
ofs += chunk;
@@ -1155,114 +1256,58 @@ struct migrate_test_params {
static struct dma_fence *
xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
- struct xe_vm *vm, struct xe_bo *bo,
- const struct xe_vm_pgtable_update *updates,
- u32 num_updates, bool wait_vm,
struct xe_migrate_pt_update *pt_update)
{
XE_TEST_DECLARE(struct migrate_test_params *test =
to_migrate_test_params
(xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
- struct dma_fence *fence;
+ struct xe_vm *vm = pt_update->vops->vm;
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &pt_update->vops->pt_update_ops[pt_update->tile_id];
int err;
- u32 i;
+ u32 i, j;
if (XE_TEST_ONLY(test && test->force_gpu))
return ERR_PTR(-ETIME);
- if (bo && !dma_resv_test_signaled(bo->ttm.base.resv,
- DMA_RESV_USAGE_KERNEL))
- return ERR_PTR(-ETIME);
-
- if (wait_vm && !dma_resv_test_signaled(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP))
- return ERR_PTR(-ETIME);
-
if (ops->pre_commit) {
pt_update->job = NULL;
err = ops->pre_commit(pt_update);
if (err)
return ERR_PTR(err);
}
- for (i = 0; i < num_updates; i++) {
- const struct xe_vm_pgtable_update *update = &updates[i];
-
- ops->populate(pt_update, m->tile, &update->pt_bo->vmap, NULL,
- update->ofs, update->qwords, update);
- }
-
- if (vm) {
- trace_xe_vm_cpu_bind(vm);
- xe_device_wmb(vm->xe);
- }
-
- fence = dma_fence_get_stub();
-
- return fence;
-}
-
-static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs)
-{
- struct dma_fence *fence;
- int i;
-
- for (i = 0; i < num_syncs; i++) {
- fence = syncs[i].fence;
- if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &fence->flags))
- return false;
- }
- if (q) {
- fence = xe_exec_queue_last_fence_get(q, vm);
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- dma_fence_put(fence);
- return false;
+ for (i = 0; i < pt_update_ops->num_ops; ++i) {
+ const struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+
+ for (j = 0; j < pt_op->num_entries; j++) {
+ const struct xe_vm_pgtable_update *update =
+ &pt_op->entries[j];
+
+ if (pt_op->bind)
+ ops->populate(pt_update, m->tile,
+ &update->pt_bo->vmap, NULL,
+ update->ofs, update->qwords,
+ update);
+ else
+ ops->clear(pt_update, m->tile,
+ &update->pt_bo->vmap, NULL,
+ update->ofs, update->qwords, update);
}
- dma_fence_put(fence);
}
- return true;
+ trace_xe_vm_cpu_bind(vm);
+ xe_device_wmb(vm->xe);
+
+ return dma_fence_get_stub();
}
-/**
- * xe_migrate_update_pgtables() - Pipelined page-table update
- * @m: The migrate context.
- * @vm: The vm we'll be updating.
- * @bo: The bo whose dma-resv we will await before updating, or NULL if userptr.
- * @q: The exec queue to be used for the update or NULL if the default
- * migration engine is to be used.
- * @updates: An array of update descriptors.
- * @num_updates: Number of descriptors in @updates.
- * @syncs: Array of xe_sync_entry to await before updating. Note that waits
- * will block the engine timeline.
- * @num_syncs: Number of entries in @syncs.
- * @pt_update: Pointer to a struct xe_migrate_pt_update, which contains
- * pointers to callback functions and, if subclassed, private arguments to
- * those.
- *
- * Perform a pipelined page-table update. The update descriptors are typically
- * built under the same lock critical section as a call to this function. If
- * using the default engine for the updates, they will be performed in the
- * order they grab the job_mutex. If different engines are used, external
- * synchronization is needed for overlapping updates to maintain page-table
- * consistency. Note that the meaing of "overlapping" is that the updates
- * touch the same page-table, which might be a higher-level page-directory.
- * If no pipelining is needed, then updates may be performed by the cpu.
- *
- * Return: A dma_fence that, when signaled, indicates the update completion.
- */
-struct dma_fence *
-xe_migrate_update_pgtables(struct xe_migrate *m,
- struct xe_vm *vm,
- struct xe_bo *bo,
- struct xe_exec_queue *q,
- const struct xe_vm_pgtable_update *updates,
- u32 num_updates,
- struct xe_sync_entry *syncs, u32 num_syncs,
- struct xe_migrate_pt_update *pt_update)
+static struct dma_fence *
+__xe_migrate_update_pgtables(struct xe_migrate *m,
+ struct xe_migrate_pt_update *pt_update,
+ struct xe_vm_pgtable_update_ops *pt_update_ops)
{
const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
struct xe_tile *tile = m->tile;
@@ -1271,63 +1316,58 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
struct xe_sched_job *job;
struct dma_fence *fence;
struct drm_suballoc *sa_bo = NULL;
- struct xe_vma *vma = pt_update->vma;
struct xe_bb *bb;
- u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0;
+ u32 i, j, batch_size = 0, ppgtt_ofs, update_idx, page_ofs = 0;
+ u32 num_updates = 0, current_update = 0;
u64 addr;
int err = 0;
- bool usm = !q && xe->info.has_usm;
- bool first_munmap_rebind = vma &&
- vma->gpuva.flags & XE_VMA_FIRST_REBIND;
- struct xe_exec_queue *q_override = !q ? m->q : q;
- u16 pat_index = xe->pat.idx[XE_CACHE_WB];
+ bool is_migrate = pt_update_ops->q == m->q;
+ bool usm = is_migrate && xe->info.has_usm;
+
+ for (i = 0; i < pt_update_ops->num_ops; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
+ struct xe_vm_pgtable_update *updates = pt_op->entries;
+
+ num_updates += pt_op->num_entries;
+ for (j = 0; j < pt_op->num_entries; ++j) {
+ u32 num_cmds = DIV_ROUND_UP(updates[j].qwords,
+ MAX_PTE_PER_SDI);
- /* Use the CPU if no in syncs and engine is idle */
- if (no_in_syncs(vm, q, syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) {
- fence = xe_migrate_update_pgtables_cpu(m, vm, bo, updates,
- num_updates,
- first_munmap_rebind,
- pt_update);
- if (!IS_ERR(fence) || fence == ERR_PTR(-EAGAIN))
- return fence;
+ /* align noop + MI_STORE_DATA_IMM cmd prefix */
+ batch_size += 4 * num_cmds + updates[j].qwords * 2;
+ }
}
/* fixed + PTE entries */
if (IS_DGFX(xe))
- batch_size = 2;
+ batch_size += 2;
else
- batch_size = 6 + num_updates * 2;
-
- for (i = 0; i < num_updates; i++) {
- u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, 0x1ff);
+ batch_size += 6 * (num_updates / MAX_PTE_PER_SDI + 1) +
+ num_updates * 2;
- /* align noop + MI_STORE_DATA_IMM cmd prefix */
- batch_size += 4 * num_cmds + updates[i].qwords * 2;
- }
-
- /*
- * XXX: Create temp bo to copy from, if batch_size becomes too big?
- *
- * Worst case: Sum(2 * (each lower level page size) + (top level page size))
- * Should be reasonably bound..
- */
- xe_tile_assert(tile, batch_size < SZ_128K);
-
- bb = xe_bb_new(gt, batch_size, !q && xe->info.has_usm);
+ bb = xe_bb_new(gt, batch_size, usm);
if (IS_ERR(bb))
return ERR_CAST(bb);
/* For sysmem PTE's, need to map them in our hole.. */
if (!IS_DGFX(xe)) {
+ u16 pat_index = xe->pat.idx[XE_CACHE_WB];
+ u32 ptes, ofs;
+
ppgtt_ofs = NUM_KERNEL_PDE - 1;
- if (q) {
- xe_tile_assert(tile, num_updates <= NUM_VMUSA_WRITES_PER_UNIT);
+ if (!is_migrate) {
+ u32 num_units = DIV_ROUND_UP(num_updates,
+ NUM_VMUSA_WRITES_PER_UNIT);
- sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
+ if (num_units > m->vm_update_sa.size) {
+ err = -ENOBUFS;
+ goto err_bb;
+ }
+ sa_bo = drm_suballoc_new(&m->vm_update_sa, num_units,
GFP_KERNEL, true, 0);
if (IS_ERR(sa_bo)) {
err = PTR_ERR(sa_bo);
- goto err;
+ goto err_bb;
}
ppgtt_ofs = NUM_KERNEL_PDE +
@@ -1339,18 +1379,49 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
}
/* Map our PT's to gtt */
- bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(num_updates);
- bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
- bb->cs[bb->len++] = 0; /* upper_32_bits */
-
- for (i = 0; i < num_updates; i++) {
- struct xe_bo *pt_bo = updates[i].pt_bo;
+ i = 0;
+ j = 0;
+ ptes = num_updates;
+ ofs = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
+ while (ptes) {
+ u32 chunk = min(MAX_PTE_PER_SDI, ptes);
+ u32 idx = 0;
+
+ bb->cs[bb->len++] = MI_STORE_DATA_IMM |
+ MI_SDI_NUM_QW(chunk);
+ bb->cs[bb->len++] = ofs;
+ bb->cs[bb->len++] = 0; /* upper_32_bits */
+
+ for (; i < pt_update_ops->num_ops; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+ struct xe_vm_pgtable_update *updates = pt_op->entries;
+
+ for (; j < pt_op->num_entries; ++j, ++current_update, ++idx) {
+ struct xe_vm *vm = pt_update->vops->vm;
+ struct xe_bo *pt_bo = updates[j].pt_bo;
+
+ if (idx == chunk)
+ goto next_cmd;
+
+ xe_tile_assert(tile, pt_bo->size == SZ_4K);
+
+ /* Map a PT at most once */
+ if (pt_bo->update_index < 0)
+ pt_bo->update_index = current_update;
+
+ addr = vm->pt_ops->pte_encode_bo(pt_bo, 0,
+ pat_index, 0);
+ bb->cs[bb->len++] = lower_32_bits(addr);
+ bb->cs[bb->len++] = upper_32_bits(addr);
+ }
- xe_tile_assert(tile, pt_bo->size == SZ_4K);
+ j = 0;
+ }
- addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, pat_index, 0);
- bb->cs[bb->len++] = lower_32_bits(addr);
- bb->cs[bb->len++] = upper_32_bits(addr);
+next_cmd:
+ ptes -= chunk;
+ ofs += chunk * sizeof(u64);
}
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
@@ -1358,66 +1429,57 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
(page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
- for (i = 0; i < num_updates; i++)
- write_pgtable(tile, bb, addr + i * XE_PAGE_SIZE,
- &updates[i], pt_update);
+ for (i = 0; i < pt_update_ops->num_ops; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+ struct xe_vm_pgtable_update *updates = pt_op->entries;
+
+ for (j = 0; j < pt_op->num_entries; ++j) {
+ struct xe_bo *pt_bo = updates[j].pt_bo;
+
+ write_pgtable(tile, bb, addr +
+ pt_bo->update_index * XE_PAGE_SIZE,
+ pt_op, &updates[j], pt_update);
+ }
+ }
} else {
/* phys pages, no preamble required */
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
- for (i = 0; i < num_updates; i++)
- write_pgtable(tile, bb, 0, &updates[i], pt_update);
- }
+ for (i = 0; i < pt_update_ops->num_ops; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+ struct xe_vm_pgtable_update *updates = pt_op->entries;
- if (!q)
- mutex_lock(&m->job_mutex);
+ for (j = 0; j < pt_op->num_entries; ++j)
+ write_pgtable(tile, bb, 0, pt_op, &updates[j],
+ pt_update);
+ }
+ }
- job = xe_bb_create_migration_job(q ?: m->q, bb,
+ job = xe_bb_create_migration_job(pt_update_ops->q, bb,
xe_migrate_batch_base(m, usm),
update_idx);
if (IS_ERR(job)) {
err = PTR_ERR(job);
- goto err_bb;
+ goto err_sa;
}
- /* Wait on BO move */
- if (bo) {
- err = job_add_deps(job, bo->ttm.base.resv,
- DMA_RESV_USAGE_KERNEL);
- if (err)
- goto err_job;
- }
-
- /*
- * Munmap style VM unbind, need to wait for all jobs to be complete /
- * trigger preempts before moving forward
- */
- if (first_munmap_rebind) {
- err = job_add_deps(job, xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP);
- if (err)
- goto err_job;
- }
-
- err = xe_sched_job_last_fence_add_dep(job, vm);
- for (i = 0; !err && i < num_syncs; i++)
- err = xe_sync_entry_add_deps(&syncs[i], job);
-
- if (err)
- goto err_job;
-
if (ops->pre_commit) {
pt_update->job = job;
err = ops->pre_commit(pt_update);
if (err)
goto err_job;
}
+ if (is_migrate)
+ mutex_lock(&m->job_mutex);
+
xe_sched_job_arm(job);
fence = dma_fence_get(&job->drm.s_fence->finished);
xe_sched_job_push(job);
- if (!q)
+ if (is_migrate)
mutex_unlock(&m->job_mutex);
xe_bb_free(bb, fence);
@@ -1427,16 +1489,48 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
err_job:
xe_sched_job_put(job);
+err_sa:
+ drm_suballoc_free(sa_bo, NULL);
err_bb:
- if (!q)
- mutex_unlock(&m->job_mutex);
xe_bb_free(bb, NULL);
-err:
- drm_suballoc_free(sa_bo, NULL);
return ERR_PTR(err);
}
/**
+ * xe_migrate_update_pgtables() - Pipelined page-table update
+ * @m: The migrate context.
+ * @pt_update: PT update arguments
+ *
+ * Perform a pipelined page-table update. The update descriptors are typically
+ * built under the same lock critical section as a call to this function. If
+ * using the default engine for the updates, they will be performed in the
+ * order they grab the job_mutex. If different engines are used, external
+ * synchronization is needed for overlapping updates to maintain page-table
+ * consistency. Note that the meaning of "overlapping" is that the updates
+ * touch the same page-table, which might be a higher-level page-directory.
+ * If no pipelining is needed, then updates may be performed by the cpu.
+ *
+ * Return: A dma_fence that, when signaled, indicates the update completion.
+ */
+struct dma_fence *
+xe_migrate_update_pgtables(struct xe_migrate *m,
+ struct xe_migrate_pt_update *pt_update)
+
+{
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &pt_update->vops->pt_update_ops[pt_update->tile_id];
+ struct dma_fence *fence;
+
+ fence = xe_migrate_update_pgtables_cpu(m, pt_update);
+
+ /* -ETIME indicates a job is needed, anything else is legit error */
+ if (!IS_ERR(fence) || PTR_ERR(fence) != -ETIME)
+ return fence;
+
+ return __xe_migrate_update_pgtables(m, pt_update, pt_update_ops);
+}
+
+/**
* xe_migrate_wait() - Complete all operations using the xe_migrate context
* @m: Migrate context to wait for.
*