summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_trace.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/xe_trace.h')
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h416
1 files changed, 126 insertions, 290 deletions
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 846f14507d5f..b4a3577df70c 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -12,8 +12,6 @@
#include <linux/tracepoint.h>
#include <linux/types.h>
-#include "xe_bo.h"
-#include "xe_bo_types.h"
#include "xe_exec_queue_types.h"
#include "xe_gpu_scheduler_types.h"
#include "xe_gt_tlb_invalidation_types.h"
@@ -22,110 +20,65 @@
#include "xe_sched_job.h"
#include "xe_vm.h"
+#define __dev_name_xe(xe) dev_name((xe)->drm.dev)
+#define __dev_name_tile(tile) __dev_name_xe(tile_to_xe((tile)))
+#define __dev_name_gt(gt) __dev_name_xe(gt_to_xe((gt)))
+#define __dev_name_eq(q) __dev_name_gt((q)->gt)
+
DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
- TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
- TP_ARGS(fence),
+ TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+ TP_ARGS(xe, fence),
TP_STRUCT__entry(
+ __string(dev, __dev_name_xe(xe))
__field(struct xe_gt_tlb_invalidation_fence *, fence)
__field(int, seqno)
),
TP_fast_assign(
+ __assign_str(dev);
__entry->fence = fence;
__entry->seqno = fence->seqno;
),
- TP_printk("fence=%p, seqno=%d",
- __entry->fence, __entry->seqno)
+ TP_printk("dev=%s, fence=%p, seqno=%d",
+ __get_str(dev), __entry->fence, __entry->seqno)
);
DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_create,
- TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
- TP_ARGS(fence)
+ TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+ TP_ARGS(xe, fence)
);
DEFINE_EVENT(xe_gt_tlb_invalidation_fence,
xe_gt_tlb_invalidation_fence_work_func,
- TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
- TP_ARGS(fence)
+ TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+ TP_ARGS(xe, fence)
);
DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_cb,
- TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
- TP_ARGS(fence)
+ TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+ TP_ARGS(xe, fence)
);
DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_send,
- TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
- TP_ARGS(fence)
+ TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+ TP_ARGS(xe, fence)
);
DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_recv,
- TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
- TP_ARGS(fence)
+ TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+ TP_ARGS(xe, fence)
);
DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_signal,
- TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
- TP_ARGS(fence)
+ TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+ TP_ARGS(xe, fence)
);
DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_timeout,
- TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
- TP_ARGS(fence)
-);
-
-DECLARE_EVENT_CLASS(xe_bo,
- TP_PROTO(struct xe_bo *bo),
- TP_ARGS(bo),
-
- TP_STRUCT__entry(
- __field(size_t, size)
- __field(u32, flags)
- __field(struct xe_vm *, vm)
- ),
-
- TP_fast_assign(
- __entry->size = bo->size;
- __entry->flags = bo->flags;
- __entry->vm = bo->vm;
- ),
-
- TP_printk("size=%zu, flags=0x%02x, vm=%p",
- __entry->size, __entry->flags, __entry->vm)
-);
-
-DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
- TP_PROTO(struct xe_bo *bo),
- TP_ARGS(bo)
-);
-
-TRACE_EVENT(xe_bo_move,
- TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement,
- bool move_lacks_source),
- TP_ARGS(bo, new_placement, old_placement, move_lacks_source),
- TP_STRUCT__entry(
- __field(struct xe_bo *, bo)
- __field(size_t, size)
- __field(u32, new_placement)
- __field(u32, old_placement)
- __array(char, device_id, 12)
- __field(bool, move_lacks_source)
- ),
-
- TP_fast_assign(
- __entry->bo = bo;
- __entry->size = bo->size;
- __entry->new_placement = new_placement;
- __entry->old_placement = old_placement;
- strscpy(__entry->device_id, dev_name(xe_bo_device(__entry->bo)->drm.dev), 12);
- __entry->move_lacks_source = move_lacks_source;
- ),
- TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
- __entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
- xe_mem_type_to_name[__entry->old_placement],
- xe_mem_type_to_name[__entry->new_placement], __entry->device_id)
+ TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+ TP_ARGS(xe, fence)
);
DECLARE_EVENT_CLASS(xe_exec_queue,
@@ -133,6 +86,7 @@ DECLARE_EVENT_CLASS(xe_exec_queue,
TP_ARGS(q),
TP_STRUCT__entry(
+ __string(dev, __dev_name_eq(q))
__field(enum xe_engine_class, class)
__field(u32, logical_mask)
__field(u8, gt_id)
@@ -143,6 +97,7 @@ DECLARE_EVENT_CLASS(xe_exec_queue,
),
TP_fast_assign(
+ __assign_str(dev);
__entry->class = q->class;
__entry->logical_mask = q->logical_mask;
__entry->gt_id = q->gt->info.id;
@@ -152,8 +107,8 @@ DECLARE_EVENT_CLASS(xe_exec_queue,
__entry->flags = q->flags;
),
- TP_printk("%d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x",
- __entry->class, __entry->logical_mask,
+ TP_printk("dev=%s, %d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x",
+ __get_str(dev), __entry->class, __entry->logical_mask,
__entry->gt_id, __entry->width, __entry->guc_id,
__entry->guc_state, __entry->flags)
);
@@ -253,28 +208,35 @@ DECLARE_EVENT_CLASS(xe_sched_job,
TP_ARGS(job),
TP_STRUCT__entry(
+ __string(dev, __dev_name_eq(job->q))
__field(u32, seqno)
+ __field(u32, lrc_seqno)
+ __field(u8, gt_id)
__field(u16, guc_id)
__field(u32, guc_state)
__field(u32, flags)
__field(int, error)
- __field(u64, fence)
+ __field(struct dma_fence *, fence)
__field(u64, batch_addr)
),
TP_fast_assign(
+ __assign_str(dev);
__entry->seqno = xe_sched_job_seqno(job);
+ __entry->lrc_seqno = xe_sched_job_lrc_seqno(job);
+ __entry->gt_id = job->q->gt->info.id;
__entry->guc_id = job->q->guc->id;
__entry->guc_state =
atomic_read(&job->q->guc->state);
__entry->flags = job->q->flags;
- __entry->error = job->fence->error;
- __entry->fence = (unsigned long)job->fence;
- __entry->batch_addr = (u64)job->batch_addr[0];
+ __entry->error = job->fence ? job->fence->error : 0;
+ __entry->fence = job->fence;
+ __entry->batch_addr = (u64)job->ptrs[0].batch_addr;
),
- TP_printk("fence=0x%016llx, seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
- __entry->fence, __entry->seqno, __entry->guc_id,
+ TP_printk("dev=%s, fence=%p, seqno=%u, lrc_seqno=%u, gt=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
+ __get_str(dev), __entry->fence, __entry->seqno,
+ __entry->lrc_seqno, __entry->gt_id, __entry->guc_id,
__entry->batch_addr, __entry->guc_state,
__entry->flags, __entry->error)
);
@@ -319,17 +281,22 @@ DECLARE_EVENT_CLASS(xe_sched_msg,
TP_ARGS(msg),
TP_STRUCT__entry(
+ __string(dev, __dev_name_eq(((struct xe_exec_queue *)msg->private_data)))
__field(u32, opcode)
__field(u16, guc_id)
+ __field(u8, gt_id)
),
TP_fast_assign(
+ __assign_str(dev);
__entry->opcode = msg->opcode;
__entry->guc_id =
((struct xe_exec_queue *)msg->private_data)->guc->id;
+ __entry->gt_id =
+ ((struct xe_exec_queue *)msg->private_data)->gt->info.id;
),
- TP_printk("guc_id=%d, opcode=%u", __entry->guc_id,
+ TP_printk("dev=%s, gt=%u guc_id=%d, opcode=%u", __get_str(dev), __entry->gt_id, __entry->guc_id,
__entry->opcode)
);
@@ -348,19 +315,21 @@ DECLARE_EVENT_CLASS(xe_hw_fence,
TP_ARGS(fence),
TP_STRUCT__entry(
+ __string(dev, __dev_name_xe(fence->xe))
__field(u64, ctx)
__field(u32, seqno)
__field(struct xe_hw_fence *, fence)
),
TP_fast_assign(
+ __assign_str(dev);
__entry->ctx = fence->dma.context;
__entry->seqno = fence->dma.seqno;
__entry->fence = fence;
),
- TP_printk("ctx=0x%016llx, fence=%p, seqno=%u",
- __entry->ctx, __entry->fence, __entry->seqno)
+ TP_printk("dev=%s, ctx=0x%016llx, fence=%p, seqno=%u",
+ __get_str(dev), __entry->ctx, __entry->fence, __entry->seqno)
);
DEFINE_EVENT(xe_hw_fence, xe_hw_fence_create,
@@ -378,247 +347,114 @@ DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
TP_ARGS(fence)
);
-DEFINE_EVENT(xe_hw_fence, xe_hw_fence_free,
- TP_PROTO(struct xe_hw_fence *fence),
- TP_ARGS(fence)
-);
-
-DECLARE_EVENT_CLASS(xe_vma,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma),
-
- TP_STRUCT__entry(
- __field(struct xe_vma *, vma)
- __field(u32, asid)
- __field(u64, start)
- __field(u64, end)
- __field(u64, ptr)
- ),
-
- TP_fast_assign(
- __entry->vma = vma;
- __entry->asid = xe_vma_vm(vma)->usm.asid;
- __entry->start = xe_vma_start(vma);
- __entry->end = xe_vma_end(vma) - 1;
- __entry->ptr = xe_vma_userptr(vma);
- ),
-
- TP_printk("vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
- __entry->vma, __entry->asid, __entry->start,
- __entry->end, __entry->ptr)
-)
-
-DEFINE_EVENT(xe_vma, xe_vma_flush,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
-);
-
-DEFINE_EVENT(xe_vma, xe_vma_pagefault,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
-);
+TRACE_EVENT(xe_reg_rw,
+ TP_PROTO(struct xe_mmio *mmio, bool write, u32 reg, u64 val, int len),
-DEFINE_EVENT(xe_vma, xe_vma_acc,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
-);
+ TP_ARGS(mmio, write, reg, val, len),
-DEFINE_EVENT(xe_vma, xe_vma_fail,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
-);
+ TP_STRUCT__entry(
+ __string(dev, __dev_name_tile(mmio->tile))
+ __field(u64, val)
+ __field(u32, reg)
+ __field(u16, write)
+ __field(u16, len)
+ ),
-DEFINE_EVENT(xe_vma, xe_vma_bind,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
-);
+ TP_fast_assign(
+ __assign_str(dev);
+ __entry->val = val;
+ __entry->reg = reg;
+ __entry->write = write;
+ __entry->len = len;
+ ),
-DEFINE_EVENT(xe_vma, xe_vma_pf_bind,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
+ TP_printk("dev=%s, %s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
+ __get_str(dev), __entry->write ? "write" : "read",
+ __entry->reg, __entry->len,
+ (u32)(__entry->val & 0xffffffff),
+ (u32)(__entry->val >> 32))
);
-DEFINE_EVENT(xe_vma, xe_vma_unbind,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
-);
-
-DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_worker,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
-);
-
-DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_exec,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
-);
-
-DEFINE_EVENT(xe_vma, xe_vma_rebind_worker,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
-);
-
-DEFINE_EVENT(xe_vma, xe_vma_rebind_exec,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
-);
-
-DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
-);
-
-DEFINE_EVENT(xe_vma, xe_vma_invalidate,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
-);
-
-DEFINE_EVENT(xe_vma, xe_vma_evict,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
-);
-
-DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate_complete,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
-);
-
-DECLARE_EVENT_CLASS(xe_vm,
- TP_PROTO(struct xe_vm *vm),
- TP_ARGS(vm),
+DECLARE_EVENT_CLASS(xe_pm_runtime,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller),
TP_STRUCT__entry(
- __field(struct xe_vm *, vm)
- __field(u32, asid)
+ __string(dev, __dev_name_xe(xe))
+ __field(void *, caller)
),
TP_fast_assign(
- __entry->vm = vm;
- __entry->asid = vm->usm.asid;
+ __assign_str(dev);
+ __entry->caller = caller;
),
- TP_printk("vm=%p, asid=0x%05x", __entry->vm,
- __entry->asid)
+ TP_printk("dev=%s caller_function=%pS", __get_str(dev), __entry->caller)
);
-DEFINE_EVENT(xe_vm, xe_vm_kill,
- TP_PROTO(struct xe_vm *vm),
- TP_ARGS(vm)
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
);
-DEFINE_EVENT(xe_vm, xe_vm_create,
- TP_PROTO(struct xe_vm *vm),
- TP_ARGS(vm)
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_put,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
);
-DEFINE_EVENT(xe_vm, xe_vm_free,
- TP_PROTO(struct xe_vm *vm),
- TP_ARGS(vm)
+DEFINE_EVENT(xe_pm_runtime, xe_pm_resume,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
);
-DEFINE_EVENT(xe_vm, xe_vm_cpu_bind,
- TP_PROTO(struct xe_vm *vm),
- TP_ARGS(vm)
+DEFINE_EVENT(xe_pm_runtime, xe_pm_suspend,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
);
-DEFINE_EVENT(xe_vm, xe_vm_restart,
- TP_PROTO(struct xe_vm *vm),
- TP_ARGS(vm)
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_resume,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
);
-DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_enter,
- TP_PROTO(struct xe_vm *vm),
- TP_ARGS(vm)
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_suspend,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
);
-DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_retry,
- TP_PROTO(struct xe_vm *vm),
- TP_ARGS(vm)
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get_ioctl,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
);
-DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit,
- TP_PROTO(struct xe_vm *vm),
- TP_ARGS(vm)
-);
+TRACE_EVENT(xe_eu_stall_data_read,
+ TP_PROTO(u8 slice, u8 subslice,
+ u32 read_ptr, u32 write_ptr,
+ size_t read_size, size_t total_size),
+ TP_ARGS(slice, subslice,
+ read_ptr, write_ptr,
+ read_size, total_size),
-/* GuC */
-DECLARE_EVENT_CLASS(xe_guc_ct_flow_control,
- TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
- TP_ARGS(_head, _tail, size, space, len),
-
- TP_STRUCT__entry(
- __field(u32, _head)
- __field(u32, _tail)
- __field(u32, size)
- __field(u32, space)
- __field(u32, len)
+ TP_STRUCT__entry(__field(u8, slice)
+ __field(u8, subslice)
+ __field(u32, read_ptr)
+ __field(u32, write_ptr)
+ __field(size_t, read_size)
+ __field(size_t, total_size)
),
- TP_fast_assign(
- __entry->_head = _head;
- __entry->_tail = _tail;
- __entry->size = size;
- __entry->space = space;
- __entry->len = len;
+ TP_fast_assign(__entry->slice = slice;
+ __entry->subslice = subslice;
+ __entry->read_ptr = read_ptr;
+ __entry->write_ptr = write_ptr;
+ __entry->read_size = read_size;
+ __entry->total_size = total_size;
),
- TP_printk("h2g flow control: head=%u, tail=%u, size=%u, space=%u, len=%u",
- __entry->_head, __entry->_tail, __entry->size,
- __entry->space, __entry->len)
-);
-
-DEFINE_EVENT(xe_guc_ct_flow_control, xe_guc_ct_h2g_flow_control,
- TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
- TP_ARGS(_head, _tail, size, space, len)
-);
-
-DEFINE_EVENT_PRINT(xe_guc_ct_flow_control, xe_guc_ct_g2h_flow_control,
- TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
- TP_ARGS(_head, _tail, size, space, len),
-
- TP_printk("g2h flow control: head=%u, tail=%u, size=%u, space=%u, len=%u",
- __entry->_head, __entry->_tail, __entry->size,
- __entry->space, __entry->len)
-);
-
-DECLARE_EVENT_CLASS(xe_guc_ctb,
- TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
- TP_ARGS(gt_id, action, len, _head, tail),
-
- TP_STRUCT__entry(
- __field(u8, gt_id)
- __field(u32, action)
- __field(u32, len)
- __field(u32, tail)
- __field(u32, _head)
- ),
-
- TP_fast_assign(
- __entry->gt_id = gt_id;
- __entry->action = action;
- __entry->len = len;
- __entry->tail = tail;
- __entry->_head = _head;
- ),
-
- TP_printk("gt%d: H2G CTB: action=0x%x, len=%d, tail=%d, head=%d\n",
- __entry->gt_id, __entry->action, __entry->len,
- __entry->tail, __entry->_head)
-);
-
-DEFINE_EVENT(xe_guc_ctb, xe_guc_ctb_h2g,
- TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
- TP_ARGS(gt_id, action, len, _head, tail)
-);
-
-DEFINE_EVENT_PRINT(xe_guc_ctb, xe_guc_ctb_g2h,
- TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
- TP_ARGS(gt_id, action, len, _head, tail),
-
- TP_printk("gt%d: G2H CTB: action=0x%x, len=%d, tail=%d, head=%d\n",
- __entry->gt_id, __entry->action, __entry->len,
- __entry->tail, __entry->_head)
-
+ TP_printk("slice: %u subslice: %u read ptr: 0x%x write ptr: 0x%x read size: %zu total read size: %zu",
+ __entry->slice, __entry->subslice,
+ __entry->read_ptr, __entry->write_ptr,
+ __entry->read_size, __entry->total_size)
);
#endif