From 4cd82aa39bda20a9237b8f42676796d0a5ee9bfc Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Fri, 1 Oct 2021 08:58:15 -0700 Subject: drm/msm: A bit more docs + cleanup msm_file_private is more gpu related, and in the next commit it will need access to other GPU specific #defines. While we're at it, add some comments. Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/msm_gpu.h | 58 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/msm/msm_gpu.h') diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 0e4b45bff2e6..42f85c523cfe 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -257,6 +257,26 @@ struct msm_gpu_perfcntr { */ #define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN) +/** + * struct msm_file_private - per-drm_file context + * + * @queuelock: synchronizes access to submitqueues list + * @submitqueues: list of &msm_gpu_submitqueue created by userspace + * @queueid: counter incremented each time a submitqueue is created, + * used to assign &msm_gpu_submitqueue.id + * @aspace: the per-process GPU address-space + * @ref: reference count + * @seqno: unique per process seqno + */ +struct msm_file_private { + rwlock_t queuelock; + struct list_head submitqueues; + int queueid; + struct msm_gem_address_space *aspace; + struct kref ref; + int seqno; +}; + /** * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority * @@ -304,6 +324,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, } /** + * struct msm_gpu_submitqueues - Userspace created context. + * * A submitqueue is associated with a gl context or vk queue (or equiv) * in userspace. * @@ -321,7 +343,7 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, * seqno, protected by submitqueue lock * @lock: submitqueue lock * @ref: reference count - * @entity: the submit job-queue + * @entity: the submit job-queue */ struct msm_gpu_submitqueue { int id; @@ -421,6 +443,40 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val) int msm_gpu_pm_suspend(struct msm_gpu *gpu); int msm_gpu_pm_resume(struct msm_gpu *gpu); +int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx); +struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx, + u32 id); +int msm_submitqueue_create(struct drm_device *drm, + struct msm_file_private *ctx, + u32 prio, u32 flags, u32 *id); +int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx, + struct drm_msm_submitqueue_query *args); +int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id); +void msm_submitqueue_close(struct msm_file_private *ctx); + +void msm_submitqueue_destroy(struct kref *kref); + +static inline void __msm_file_private_destroy(struct kref *kref) +{ + struct msm_file_private *ctx = container_of(kref, + struct msm_file_private, ref); + + msm_gem_address_space_put(ctx->aspace); + kfree(ctx); +} + +static inline void msm_file_private_put(struct msm_file_private *ctx) +{ + kref_put(&ctx->ref, __msm_file_private_destroy); +} + +static inline struct msm_file_private *msm_file_private_get( + struct msm_file_private *ctx) +{ + kref_get(&ctx->ref); + return ctx; +} + void msm_devfreq_init(struct msm_gpu *gpu); void msm_devfreq_cleanup(struct msm_gpu *gpu); void msm_devfreq_resume(struct msm_gpu *gpu); -- cgit From 68002469e571ae3db095e4ade1cfef64903f8fa1 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Fri, 1 Oct 2021 09:42:05 -0700 Subject: drm/msm: One sched entity per process per priority Some userspace apps make assumptions that rendering against multiple contexts within the same process (from the same thread, with appropriate MakeCurrent() calls) provides sufficient synchronization without any external synchronization (ie. glFenceSync()/glWaitSync()). Since a submitqueue maps to a gl/vk context, having multiple sched entities of the same priority only works with implicit sync enabled. To fix this, limit things to a single sched entity per priority level per process. An alternative would be sharing submitqueues between contexts in userspace, but tracking of per-context faults (ie. GL_EXT_robustness) is already done at the submitqueue level, so this is not an option. Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/msm_gpu.h | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gpu.h') diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 42f85c523cfe..030f82f149c2 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -275,6 +275,19 @@ struct msm_file_private { struct msm_gem_address_space *aspace; struct kref ref; int seqno; + + /** + * entities: + * + * Table of per-priority-level sched entities used by submitqueues + * associated with this &drm_file. Because some userspace apps + * make assumptions about rendering from multiple gl contexts + * (of the same priority) within the process happening in FIFO + * order without requiring any fencing beyond MakeCurrent(), we + * create at most one &drm_sched_entity per-process per-priority- + * level. + */ + struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS]; }; /** @@ -355,7 +368,7 @@ struct msm_gpu_submitqueue { struct idr fence_idr; struct mutex lock; struct kref ref; - struct drm_sched_entity entity; + struct drm_sched_entity *entity; }; struct msm_gpu_state_bo { @@ -456,14 +469,7 @@ void msm_submitqueue_close(struct msm_file_private *ctx); void msm_submitqueue_destroy(struct kref *kref); -static inline void __msm_file_private_destroy(struct kref *kref) -{ - struct msm_file_private *ctx = container_of(kref, - struct msm_file_private, ref); - - msm_gem_address_space_put(ctx->aspace); - kfree(ctx); -} +void __msm_file_private_destroy(struct kref *kref); static inline void msm_file_private_put(struct msm_file_private *ctx) { -- cgit