summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_execlist.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/xe_execlist.c')
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c76
1 files changed, 50 insertions, 26 deletions
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index dece2785933c..788f56b066b6 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -9,7 +9,6 @@
#include "instructions/xe_mi_commands.h"
#include "regs/xe_engine_regs.h"
-#include "regs/xe_gpu_commands.h"
#include "regs/xe_gt_regs.h"
#include "regs/xe_lrc_layout.h"
#include "xe_assert.h"
@@ -18,6 +17,7 @@
#include "xe_exec_queue.h"
#include "xe_gt.h"
#include "xe_hw_fence.h"
+#include "xe_irq.h"
#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_mmio.h"
@@ -45,8 +45,10 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
u32 ctx_id)
{
struct xe_gt *gt = hwe->gt;
+ struct xe_mmio *mmio = &gt->mmio;
struct xe_device *xe = gt_to_xe(gt);
u64 lrc_desc;
+ u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
lrc_desc = xe_lrc_descriptor(lrc);
@@ -59,7 +61,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
}
if (hwe->class == XE_ENGINE_CLASS_COMPUTE)
- xe_mmio_write32(hwe->gt, RCU_MODE,
+ xe_mmio_write32(mmio, RCU_MODE,
_MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail);
@@ -77,17 +79,19 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
*/
wmb();
- xe_mmio_write32(gt, RING_HWS_PGA(hwe->mmio_base),
+ xe_mmio_write32(mmio, RING_HWS_PGA(hwe->mmio_base),
xe_bo_ggtt_addr(hwe->hwsp));
- xe_mmio_read32(gt, RING_HWS_PGA(hwe->mmio_base));
- xe_mmio_write32(gt, RING_MODE(hwe->mmio_base),
- _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
+ xe_mmio_read32(mmio, RING_HWS_PGA(hwe->mmio_base));
- xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base),
+ if (xe_device_has_msix(gt_to_xe(hwe->gt)))
+ ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
+ xe_mmio_write32(mmio, RING_MODE(hwe->mmio_base), ring_mode);
+
+ xe_mmio_write32(mmio, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base),
lower_32_bits(lrc_desc));
- xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_HI(hwe->mmio_base),
+ xe_mmio_write32(mmio, RING_EXECLIST_SQ_CONTENTS_HI(hwe->mmio_base),
upper_32_bits(lrc_desc));
- xe_mmio_write32(gt, RING_EXECLIST_CONTROL(hwe->mmio_base),
+ xe_mmio_write32(mmio, RING_EXECLIST_CONTROL(hwe->mmio_base),
EL_CTRL_LOAD);
}
@@ -110,7 +114,7 @@ static void __xe_execlist_port_start(struct xe_execlist_port *port,
port->last_ctx_id = 1;
}
- __start_lrc(port->hwe, exl->q->lrc, port->last_ctx_id);
+ __start_lrc(port->hwe, exl->q->lrc[0], port->last_ctx_id);
port->running_exl = exl;
exl->has_run = true;
}
@@ -124,14 +128,14 @@ static void __xe_execlist_port_idle(struct xe_execlist_port *port)
if (!port->running_exl)
return;
- xe_lrc_write_ring(&port->hwe->kernel_lrc, noop, sizeof(noop));
- __start_lrc(port->hwe, &port->hwe->kernel_lrc, 0);
+ xe_lrc_write_ring(port->lrc, noop, sizeof(noop));
+ __start_lrc(port->hwe, port->lrc, 0);
port->running_exl = NULL;
}
static bool xe_execlist_is_idle(struct xe_execlist_exec_queue *exl)
{
- struct xe_lrc *lrc = exl->q->lrc;
+ struct xe_lrc *lrc = exl->q->lrc[0];
return lrc->ring.tail == lrc->ring.old_tail;
}
@@ -169,8 +173,8 @@ static u64 read_execlist_status(struct xe_hw_engine *hwe)
struct xe_gt *gt = hwe->gt;
u32 hi, lo;
- lo = xe_mmio_read32(gt, RING_EXECLIST_STATUS_LO(hwe->mmio_base));
- hi = xe_mmio_read32(gt, RING_EXECLIST_STATUS_HI(hwe->mmio_base));
+ lo = xe_mmio_read32(&gt->mmio, RING_EXECLIST_STATUS_LO(hwe->mmio_base));
+ hi = xe_mmio_read32(&gt->mmio, RING_EXECLIST_STATUS_HI(hwe->mmio_base));
return lo | (u64)hi << 32;
}
@@ -255,14 +259,22 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
{
struct drm_device *drm = &xe->drm;
struct xe_execlist_port *port;
- int i;
+ int i, err;
port = drmm_kzalloc(drm, sizeof(*port), GFP_KERNEL);
- if (!port)
- return ERR_PTR(-ENOMEM);
+ if (!port) {
+ err = -ENOMEM;
+ goto err;
+ }
port->hwe = hwe;
+ port->lrc = xe_lrc_create(hwe, NULL, SZ_16K, XE_IRQ_DEFAULT_MSIX, 0);
+ if (IS_ERR(port->lrc)) {
+ err = PTR_ERR(port->lrc);
+ goto err;
+ }
+
spin_lock_init(&port->lock);
for (i = 0; i < ARRAY_SIZE(port->active); i++)
INIT_LIST_HEAD(&port->active[i]);
@@ -278,16 +290,21 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
add_timer(&port->irq_fail);
return port;
+
+err:
+ return ERR_PTR(err);
}
void xe_execlist_port_destroy(struct xe_execlist_port *port)
{
- del_timer(&port->irq_fail);
+ timer_delete(&port->irq_fail);
/* Prevent an interrupt while we're destroying */
spin_lock_irq(&gt_to_xe(port->hwe->gt)->irq.lock);
port->hwe->irq_handler = NULL;
spin_unlock_irq(&gt_to_xe(port->hwe->gt)->irq.lock);
+
+ xe_lrc_put(port->lrc);
}
static struct dma_fence *
@@ -300,13 +317,14 @@ execlist_run_job(struct drm_sched_job *drm_job)
q->ring_ops->emit_job(job);
xe_execlist_make_active(exl);
- return dma_fence_get(job->fence);
+ return job->fence;
}
static void execlist_job_free(struct drm_sched_job *drm_job)
{
struct xe_sched_job *job = to_xe_sched_job(drm_job);
+ xe_exec_queue_update_run_ticks(job->q);
xe_sched_job_put(job);
}
@@ -318,6 +336,15 @@ static const struct drm_sched_backend_ops drm_sched_ops = {
static int execlist_exec_queue_init(struct xe_exec_queue *q)
{
struct drm_gpu_scheduler *sched;
+ const struct drm_sched_init_args args = {
+ .ops = &drm_sched_ops,
+ .num_rqs = 1,
+ .credit_limit = q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES,
+ .hang_limit = XE_SCHED_HANG_LIMIT,
+ .timeout = XE_SCHED_JOB_TIMEOUT,
+ .name = q->hwe->name,
+ .dev = gt_to_xe(q->gt)->drm.dev,
+ };
struct xe_execlist_exec_queue *exl;
struct xe_device *xe = gt_to_xe(q->gt);
int err;
@@ -332,11 +359,7 @@ static int execlist_exec_queue_init(struct xe_exec_queue *q)
exl->q = q;
- err = drm_sched_init(&exl->sched, &drm_sched_ops, NULL, 1,
- q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES,
- XE_SCHED_HANG_LIMIT, XE_SCHED_JOB_TIMEOUT,
- NULL, NULL, q->hwe->name,
- gt_to_xe(q->gt)->drm.dev);
+ err = drm_sched_init(&exl->sched, &args);
if (err)
goto err_free;
@@ -422,10 +445,11 @@ static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
return 0;
}
-static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
+static int execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
{
/* NIY */
+ return 0;
}
static void execlist_exec_queue_resume(struct xe_exec_queue *q)