summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_exec_queue.c
diff options
context:
space:
mode:
authorUmesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>2024-05-17 13:43:07 -0700
committerLucas De Marchi <lucas.demarchi@intel.com>2024-05-21 06:33:40 -0700
commit6109f24f87d75122cf6de50901115cbee4285ce2 (patch)
tree18ed178be1530bbac4e48a2b19add8240dda1f28 /drivers/gpu/drm/xe/xe_exec_queue.c
parentf2f6b667c67daee6fe2c51b5cec3bb0f1b4c1ce0 (diff)
drm/xe: Add helper to accumulate exec queue runtime
Add a helper to accumulate per-client runtime of all its exec queues. This is called every time a sched job is finished. v2: - Use guc_exec_queue_free_job() and execlist_job_free() to accumulate runtime when job is finished since xe_sched_job_completed() is not a notification that job finished. - Stop trying to update runtime from xe_exec_queue_fini() - that is redundant and may happen after xef is closed, leading to a use-after-free - Do not special case the first timestamp read: the default LRC sets CTX_TIMESTAMP to zero, so even the first sample should be a valid one. - Handle the parallel submission case by multiplying the runtime by width. v3: Update comments Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com> Reviewed-by: Matt Roper <matthew.d.roper@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240517204310.88854-6-lucas.demarchi@intel.com Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_exec_queue.c')
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c37
1 files changed, 37 insertions, 0 deletions
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 395de93579fa..fa6dc996eca8 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -769,6 +769,43 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
q->lrc[0].fence_ctx.next_seqno - 1;
}
+/**
+ * xe_exec_queue_update_runtime() - Update runtime for this exec queue from hw
+ * @q: The exec queue
+ *
+ * Update the timestamp saved by HW for this exec queue and save runtime
+ * calculated by using the delta from last update. On multi-lrc case, only the
+ * first is considered.
+ */
+void xe_exec_queue_update_runtime(struct xe_exec_queue *q)
+{
+ struct xe_file *xef;
+ struct xe_lrc *lrc;
+ u32 old_ts, new_ts;
+
+ /*
+ * Jobs that are run during driver load may use an exec_queue, but are
+ * not associated with a user xe file, so avoid accumulating busyness
+ * for kernel specific work.
+ */
+ if (!q->vm || !q->vm->xef)
+ return;
+
+ xef = q->vm->xef;
+
+ /*
+ * Only sample the first LRC. For parallel submission, all of them are
+ * scheduled together and we compensate that below by multiplying by
+ * width - this may introduce errors if that premise is not true and
+ * they don't exit 100% aligned. On the other hand, looping through
+ * the LRCs and reading them in different time could also introduce
+ * errors.
+ */
+ lrc = &q->lrc[0];
+ new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
+ xef->runtime[q->class] += (new_ts - old_ts) * q->width;
+}
+
void xe_exec_queue_kill(struct xe_exec_queue *q)
{
struct xe_exec_queue *eq = q, *next;