diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-03 11:44:24 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-03 11:44:24 -0700 |
commit | 2f34c1231bfc9f2550f934acb268ac7315fb3837 (patch) | |
tree | ff8114b3b4ec4723a11b041c6b74c389e9f0eeb9 /drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | |
parent | a3719f34fdb664ffcfaec2160ef20fca7becf2ee (diff) | |
parent | 8b03d1ed2c43a2ba5ef3381322ee4515b97381bf (diff) |
Merge tag 'drm-for-v4.12' of git://people.freedesktop.org/~airlied/linux
Pull drm u pdates from Dave Airlie:
"This is the main drm pull request for v4.12. Apart from two fixes
pulls, everything should have been in drm-next for at least 2 weeks.
The biggest thing in here is AMD released the public headers for their
upcoming VEGA GPUs. These as always are quite a sizeable chunk of
header files. They've also added initial non-display support for those
GPUs, though they aren't available in production yet.
Otherwise it's pretty much normal.
New bridge drivers:
- megachips-stdpxxxx-ge-b850v3-fw LVDS->DP++
- generic LVDS bridge support.
Core:
- Displayport link train failure reporting to userspace
- debugfs interface cleaned up
- subsystem TODO in kerneldoc now
- Extended fbdev support (flipping and vblank wait)
- drm_platform removed
- EDP CRC support in helper
- HF-VSDB SCDC support in EDID parser
- Lots of code cleanups and header extraction
- Thunderbolt external GPU awareness
- Atomic helper improvements
- Documentation improvements
panel:
- Sitronix and Samsung new panel support
amdgpu:
- Preliminary vega10 support
- Multi-level page table support
- GPU sensor support for userspace
- PRT support for sparse buffers
- SR-IOV improvements
- Non-contig VRAM CPU mapping
i915:
- Atomic modesetting enabled by default on Gen5+
- LSPCON improvements
- Atomic state handling for cdclk
- GPU reset improvements
- In-kernel unit tests
- Geminilake improvements and color manager support
- Designware i2c fixes
- vblank evasion improvements
- Hotplug safe connector iterators
- GVT scheduler QoS support
- GVT Kabylake support
nouveau:
- Acceleration support for Pascal (GP10x).
- Rearchitecture of code handling proprietary signed firmware
- Fix GTX 970 with odd MMU configuration
- GP10B support
- GP107 acceleration support
vmwgfx:
- Atomic modesetting support for vmwgfx
omapdrm:
- Support for render nodes
- Refactor omapdss code
- Fix some probe ordering issues
- Fix too dark RGB565 rendering
sunxi:
- prelim rework for multiple pipes.
mali-dp:
- Color management support
- Plane scaling
- Power management improvements
imx-drm:
- Prefetch Resolve Engine/Gasket on i.MX6QP
- Deferred plane disabling
- Separate alpha support
mediatek:
- Mediatek SoC MT2701 support
rcar-du:
- Gen3 HDMI support
msm:
- 4k support for newer chips
- OPP bindings for gpu
- prep work for per-process pagetables
vc4:
- HDMI audio support
- fixes
qxl:
- minor fixes.
dw-hdmi:
- PHY improvements
- CSC fixes
- Amlogic GX SoC support"
* tag 'drm-for-v4.12' of git://people.freedesktop.org/~airlied/linux: (1778 commits)
drm/nouveau/fb/gf100-: Fix 32 bit wraparound in new ram detection
drm/nouveau/secboot/gm20b: fix the error return code in gm20b_secboot_tegra_read_wpr()
drm/nouveau/kms: Increase max retries in scanout position queries.
drm/nouveau/bios/bitP: check that table is long enough for optional pointers
drm/nouveau/fifo/nv40: no ctxsw for pre-nv44 mpeg engine
drm: mali-dp: use div_u64 for expensive 64-bit divisions
drm/i915: Confirm the request is still active before adding it to the await
drm/i915: Avoid busy-spinning on VLV_GLTC_PW_STATUS mmio
drm/i915/selftests: Allocate inode/file dynamically
drm/i915: Fix system hang with EI UP masked on Haswell
drm/i915: checking for NULL instead of IS_ERR() in mock selftests
drm/i915: Perform link quality check unconditionally during long pulse
drm/i915: Fix use after free in lpe_audio_platdev_destroy()
drm/i915: Use the right mapping_gfp_mask for final shmem allocation
drm/i915: Make legacy cursor updates more unsynced
drm/i915: Apply a cond_resched() to the saturated signaler
drm/i915: Park the signaler before sleeping
drm: mali-dp: Check the mclk rate and allow up/down scaling
drm: mali-dp: Enable image enhancement when scaling
drm: mali-dp: Add plane upscaling support
...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 106 |
1 files changed, 74 insertions, 32 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index a18ae1e97860..ee9d0f346d75 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -11,6 +11,9 @@ #define TRACE_SYSTEM amdgpu #define TRACE_INCLUDE_FILE amdgpu_trace +#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \ + job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished) + TRACE_EVENT(amdgpu_mm_rreg, TP_PROTO(unsigned did, uint32_t reg, uint32_t value), TP_ARGS(did, reg, value), @@ -49,6 +52,43 @@ TRACE_EVENT(amdgpu_mm_wreg, (unsigned long)__entry->value) ); +TRACE_EVENT(amdgpu_iv, + TP_PROTO(struct amdgpu_iv_entry *iv), + TP_ARGS(iv), + TP_STRUCT__entry( + __field(unsigned, client_id) + __field(unsigned, src_id) + __field(unsigned, ring_id) + __field(unsigned, vm_id) + __field(unsigned, vm_id_src) + __field(uint64_t, timestamp) + __field(unsigned, timestamp_src) + __field(unsigned, pas_id) + __array(unsigned, src_data, 4) + ), + TP_fast_assign( + __entry->client_id = iv->client_id; + __entry->src_id = iv->src_id; + __entry->ring_id = iv->ring_id; + __entry->vm_id = iv->vm_id; + __entry->vm_id_src = iv->vm_id_src; + __entry->timestamp = iv->timestamp; + __entry->timestamp_src = iv->timestamp_src; + __entry->pas_id = iv->pas_id; + __entry->src_data[0] = iv->src_data[0]; + __entry->src_data[1] = iv->src_data[1]; + __entry->src_data[2] = iv->src_data[2]; + __entry->src_data[3] = iv->src_data[3]; + ), + TP_printk("client_id:%u src_id:%u ring:%u vm_id:%u timestamp: %llu pas_id:%u src_data: %08x %08x %08x %08x\n", + __entry->client_id, __entry->src_id, + __entry->ring_id, __entry->vm_id, + __entry->timestamp, __entry->pas_id, + __entry->src_data[0], __entry->src_data[1], + __entry->src_data[2], __entry->src_data[3]) +); + + TRACE_EVENT(amdgpu_bo_create, TP_PROTO(struct amdgpu_bo *bo), TP_ARGS(bo), @@ -70,7 +110,7 @@ TRACE_EVENT(amdgpu_bo_create, __entry->visible = bo->flags; ), - TP_printk("bo=%p,pages=%u,type=%d,prefered=%d,allowed=%d,visible=%d", + TP_printk("bo=%p, pages=%u, type=%d, prefered=%d, allowed=%d, visible=%d", __entry->bo, __entry->pages, __entry->type, __entry->prefer, __entry->allow, __entry->visible) ); @@ -101,50 +141,51 @@ TRACE_EVENT(amdgpu_cs_ioctl, TP_PROTO(struct amdgpu_job *job), TP_ARGS(job), TP_STRUCT__entry( - __field(struct amdgpu_device *, adev) - __field(struct amd_sched_job *, sched_job) - __field(struct amdgpu_ib *, ib) + __field(uint64_t, sched_job_id) + __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) + __field(unsigned int, context) + __field(unsigned int, seqno) __field(struct dma_fence *, fence) __field(char *, ring_name) __field(u32, num_ibs) ), TP_fast_assign( - __entry->adev = job->adev; - __entry->sched_job = &job->base; - __entry->ib = job->ibs; - __entry->fence = &job->base.s_fence->finished; + __entry->sched_job_id = job->base.id; + __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) + __entry->context = job->base.s_fence->finished.context; + __entry->seqno = job->base.s_fence->finished.seqno; __entry->ring_name = job->ring->name; __entry->num_ibs = job->num_ibs; ), - TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", - __entry->adev, __entry->sched_job, __entry->ib, - __entry->fence, __entry->ring_name, __entry->num_ibs) + TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", + __entry->sched_job_id, __get_str(timeline), __entry->context, + __entry->seqno, __entry->ring_name, __entry->num_ibs) ); TRACE_EVENT(amdgpu_sched_run_job, TP_PROTO(struct amdgpu_job *job), TP_ARGS(job), TP_STRUCT__entry( - __field(struct amdgpu_device *, adev) - __field(struct amd_sched_job *, sched_job) - __field(struct amdgpu_ib *, ib) - __field(struct dma_fence *, fence) + __field(uint64_t, sched_job_id) + __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) + __field(unsigned int, context) + __field(unsigned int, seqno) __field(char *, ring_name) __field(u32, num_ibs) ), TP_fast_assign( - __entry->adev = job->adev; - __entry->sched_job = &job->base; - __entry->ib = job->ibs; - __entry->fence = &job->base.s_fence->finished; + __entry->sched_job_id = job->base.id; + __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) + __entry->context = job->base.s_fence->finished.context; + __entry->seqno = job->base.s_fence->finished.seqno; __entry->ring_name = job->ring->name; __entry->num_ibs = job->num_ibs; ), - TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", - __entry->adev, __entry->sched_job, __entry->ib, - __entry->fence, __entry->ring_name, __entry->num_ibs) + TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", + __entry->sched_job_id, __get_str(timeline), __entry->context, + __entry->seqno, __entry->ring_name, __entry->num_ibs) ); @@ -184,9 +225,9 @@ TRACE_EVENT(amdgpu_vm_bo_map, ), TP_fast_assign( - __entry->bo = bo_va->bo; - __entry->start = mapping->it.start; - __entry->last = mapping->it.last; + __entry->bo = bo_va ? bo_va->bo : NULL; + __entry->start = mapping->start; + __entry->last = mapping->last; __entry->offset = mapping->offset; __entry->flags = mapping->flags; ), @@ -209,8 +250,8 @@ TRACE_EVENT(amdgpu_vm_bo_unmap, TP_fast_assign( __entry->bo = bo_va->bo; - __entry->start = mapping->it.start; - __entry->last = mapping->it.last; + __entry->start = mapping->start; + __entry->last = mapping->last; __entry->offset = mapping->offset; __entry->flags = mapping->flags; ), @@ -229,8 +270,8 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping, ), TP_fast_assign( - __entry->soffset = mapping->it.start; - __entry->eoffset = mapping->it.last + 1; + __entry->soffset = mapping->start; + __entry->eoffset = mapping->last + 1; __entry->flags = mapping->flags; ), TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x", @@ -321,7 +362,7 @@ TRACE_EVENT(amdgpu_bo_list_set, __entry->bo = bo; __entry->bo_size = amdgpu_bo_size(bo); ), - TP_printk("list=%p, bo=%p, bo_size = %Ld", + TP_printk("list=%p, bo=%p, bo_size=%Ld", __entry->list, __entry->bo, __entry->bo_size) @@ -339,7 +380,7 @@ TRACE_EVENT(amdgpu_cs_bo_status, __entry->total_bo = total_bo; __entry->total_size = total_size; ), - TP_printk("total bo size = %Ld, total bo count = %Ld", + TP_printk("total_bo_size=%Ld, total_bo_count=%Ld", __entry->total_bo, __entry->total_size) ); @@ -359,11 +400,12 @@ TRACE_EVENT(amdgpu_ttm_bo_move, __entry->new_placement = new_placement; __entry->old_placement = old_placement; ), - TP_printk("bo=%p from:%d to %d with size = %Ld", + TP_printk("bo=%p, from=%d, to=%d, size=%Ld", __entry->bo, __entry->old_placement, __entry->new_placement, __entry->bo_size) ); +#undef AMDGPU_JOB_GET_TIMELINE_NAME #endif /* This part must be outside protection */ |