summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_ringbuffer.h
diff options
context:
space:
mode:
authorAntonino Maniscalco <antomani103@gmail.com>2024-10-03 18:12:55 +0200
committerRob Clark <robdclark@chromium.org>2024-10-03 13:21:52 -0700
commite7ae83da4a289d4bf3b0fb62aadbe8c81c0dbde7 (patch)
tree5649810e7b932b69fe73d57dee67963968f043df /drivers/gpu/drm/msm/msm_ringbuffer.h
parent91389b4e3263eaa8549f20d73beeed77f5616f4c (diff)
drm/msm/a6xx: Implement preemption for a7xx targets
This patch implements preemption feature for A6xx targets, this allows the GPU to switch to a higher priority ringbuffer if one is ready. A6XX hardware as such supports multiple levels of preemption granularities, ranging from coarse grained(ringbuffer level) to a more fine grained such as draw-call level or a bin boundary level preemption. This patch enables the basic preemption level, with more fine grained preemption support to follow. Reviewed-by: Akhil P Oommen <quic_akhilpo@quicinc.com> Tested-by: Rob Clark <robdclark@gmail.com> Tested-by: Neil Armstrong <neil.armstrong@linaro.org> # on SM8650-QRD Tested-by: Neil Armstrong <neil.armstrong@linaro.org> # on SM8550-QRD Tested-by: Neil Armstrong <neil.armstrong@linaro.org> # on SM8450-HDK Signed-off-by: Sharat Masetty <smasetty@codeaurora.org> Signed-off-by: Antonino Maniscalco <antomani103@gmail.com> Patchwork: https://patchwork.freedesktop.org/patch/618021/ Signed-off-by: Rob Clark <robdclark@chromium.org>
Diffstat (limited to 'drivers/gpu/drm/msm/msm_ringbuffer.h')
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h7
1 files changed, 7 insertions, 0 deletions
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index 174f83137a49..d1e49f701c81 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -36,6 +36,7 @@ struct msm_rbmemptrs {
volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
volatile u64 ttbr0;
+ volatile u32 context_idr;
};
struct msm_cp_state {
@@ -101,6 +102,12 @@ struct msm_ringbuffer {
*/
spinlock_t preempt_lock;
+ /*
+ * Whether we skipped writing wptr and it needs to be updated in the
+ * future when the ring becomes current.
+ */
+ bool restore_wptr;
+
/**
* cur_ctx_seqno:
*