diff options
author | Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> | 2023-08-22 10:33:32 -0700 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2023-12-21 11:40:27 -0500 |
commit | 1c66c0f391da32534cf143e6a0f6391776aa9bf8 (patch) | |
tree | 9d3299852a7b8937db2db6f696ed9258e6133873 /drivers/gpu | |
parent | 486b2ef2768222bb4210709ccf5443c3e381346e (diff) |
drm/xe: fix submissions without vm
Kernel queues can submit privileged batches directly in GGTT, so they
don't always need a vm. The submission front-end already supports
creating and submitting jobs without a vm, but some parts of the
back-end assume the vm is always there. Fix this by handling a lack of
vm in the back-end as well.
v2: s/XE_BUG_ON/XE_WARN_ON, s/engine/exec_queue
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20230822173334.1664332-2-daniele.ceraolospurio@intel.com
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/xe/xe_guc_submit.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_ring_ops.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_sched_job.c | 3 |
3 files changed, 8 insertions, 5 deletions
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 55c7b13d15ec..87f2972b7c20 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -1136,7 +1136,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) ge->q = q; init_waitqueue_head(&ge->suspend_wait); - timeout = xe_vm_no_dma_fences(q->vm) ? MAX_SCHEDULE_TIMEOUT : + timeout = (q->vm && xe_vm_no_dma_fences(q->vm)) ? MAX_SCHEDULE_TIMEOUT : q->hwe->eclass->sched_props.job_timeout_ms; err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops, NULL, q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES, diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c index 9e23293ec4d3..2b4127ea1eab 100644 --- a/drivers/gpu/drm/xe/xe_ring_ops.c +++ b/drivers/gpu/drm/xe/xe_ring_ops.c @@ -213,7 +213,7 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc u32 ppgtt_flag = get_ppgtt_flag(job); struct xe_vm *vm = job->q->vm; - if (vm->batch_invalidate_tlb) { + if (vm && vm->batch_invalidate_tlb) { dw[i++] = preparser_disable(true); i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), seqno, true, dw, i); @@ -273,13 +273,13 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc, i = emit_aux_table_inv(gt, VE0_AUX_INV, dw, i); } - if (vm->batch_invalidate_tlb) + if (vm && vm->batch_invalidate_tlb) i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), seqno, true, dw, i); dw[i++] = preparser_disable(false); - if (!vm->batch_invalidate_tlb) + if (!vm || !vm->batch_invalidate_tlb) i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), seqno, dw, i); @@ -318,7 +318,7 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job, mask_flags = PIPE_CONTROL_3D_ENGINE_FLAGS; /* See __xe_pt_bind_vma() for a discussion on TLB invalidations. */ - i = emit_pipe_invalidate(mask_flags, vm->batch_invalidate_tlb, dw, i); + i = emit_pipe_invalidate(mask_flags, vm && vm->batch_invalidate_tlb, dw, i); /* hsdes: 1809175790 */ if (has_aux_ccs(xe)) diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c index de2851d24c96..0479d059dc77 100644 --- a/drivers/gpu/drm/xe/xe_sched_job.c +++ b/drivers/gpu/drm/xe/xe_sched_job.c @@ -87,6 +87,9 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q, int i, j; u32 width; + /* only a kernel context can submit a vm-less job */ + XE_WARN_ON(!q->vm && !(q->flags & EXEC_QUEUE_FLAG_KERNEL)); + /* Migration and kernel engines have their own locking */ if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM | EXEC_QUEUE_FLAG_WA))) { |