diff options
Diffstat (limited to 'drivers/gpu/drm/xe/xe_sync.c')
-rw-r--r-- | drivers/gpu/drm/xe/xe_sync.c | 133 |
1 files changed, 87 insertions, 46 deletions
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c index e4c220cf9115..f87276df18f2 100644 --- a/drivers/gpu/drm/xe/xe_sync.c +++ b/drivers/gpu/drm/xe/xe_sync.c @@ -12,14 +12,14 @@ #include <drm/drm_print.h> #include <drm/drm_syncobj.h> -#include <drm/xe_drm.h> +#include <uapi/drm/xe_drm.h> #include "xe_device_types.h" #include "xe_exec_queue.h" #include "xe_macros.h" #include "xe_sched_job_types.h" -struct user_fence { +struct xe_user_fence { struct xe_device *xe; struct kref refcount; struct dma_fence_cb cb; @@ -27,39 +27,45 @@ struct user_fence { struct mm_struct *mm; u64 __user *addr; u64 value; + int signalled; }; static void user_fence_destroy(struct kref *kref) { - struct user_fence *ufence = container_of(kref, struct user_fence, + struct xe_user_fence *ufence = container_of(kref, struct xe_user_fence, refcount); mmdrop(ufence->mm); kfree(ufence); } -static void user_fence_get(struct user_fence *ufence) +static void user_fence_get(struct xe_user_fence *ufence) { kref_get(&ufence->refcount); } -static void user_fence_put(struct user_fence *ufence) +static void user_fence_put(struct xe_user_fence *ufence) { kref_put(&ufence->refcount, user_fence_destroy); } -static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr, - u64 value) +static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr, + u64 value) { - struct user_fence *ufence; + struct xe_user_fence *ufence; + u64 __user *ptr = u64_to_user_ptr(addr); + u64 __maybe_unused prefetch_val; - ufence = kmalloc(sizeof(*ufence), GFP_KERNEL); + if (get_user(prefetch_val, ptr)) + return ERR_PTR(-EFAULT); + + ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); if (!ufence) - return NULL; + return ERR_PTR(-ENOMEM); ufence->xe = xe; kref_init(&ufence->refcount); - ufence->addr = u64_to_user_ptr(addr); + ufence->addr = ptr; ufence->value = value; ufence->mm = current->mm; mmgrab(ufence->mm); @@ -69,7 +75,7 @@ static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr, static void user_fence_worker(struct work_struct *w) { - struct user_fence *ufence = container_of(w, struct user_fence, worker); + struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker); if (mmget_not_zero(ufence->mm)) { kthread_use_mm(ufence->mm); @@ -77,13 +83,20 @@ static void user_fence_worker(struct work_struct *w) XE_WARN_ON("Copy to user failed"); kthread_unuse_mm(ufence->mm); mmput(ufence->mm); + } else { + drm_dbg(&ufence->xe->drm, "mmget_not_zero() failed, ufence wasn't signaled\n"); } + /* + * Wake up waiters only after updating the ufence state, allowing the UMD + * to safely reuse the same ufence without encountering -EBUSY errors. + */ + WRITE_ONCE(ufence->signalled, 1); wake_up_all(&ufence->xe->ufence_wq); user_fence_put(ufence); } -static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence) +static void kick_ufence(struct xe_user_fence *ufence, struct dma_fence *fence) { INIT_WORK(&ufence->worker, user_fence_worker); queue_work(ufence->xe->ordered_wq, &ufence->worker); @@ -92,7 +105,7 @@ static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence) static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { - struct user_fence *ufence = container_of(cb, struct user_fence, cb); + struct xe_user_fence *ufence = container_of(cb, struct xe_user_fence, cb); kick_ufence(ufence, fence); } @@ -181,8 +194,8 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, } else { sync->ufence = user_fence_create(xe, sync_in.addr, sync_in.timeline_value); - if (XE_IOCTL_DBG(xe, !sync->ufence)) - return -ENOMEM; + if (XE_IOCTL_DBG(xe, IS_ERR(sync->ufence))) + return PTR_ERR(sync->ufence); } break; @@ -197,33 +210,18 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, return 0; } - -int xe_sync_entry_wait(struct xe_sync_entry *sync) -{ - if (sync->fence) - dma_fence_wait(sync->fence, true); - - return 0; -} +ALLOW_ERROR_INJECTION(xe_sync_entry_parse, ERRNO); int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job) { - int err; - - if (sync->fence) { - err = drm_sched_job_add_dependency(&job->drm, - dma_fence_get(sync->fence)); - if (err) { - dma_fence_put(sync->fence); - return err; - } - } + if (sync->fence) + return drm_sched_job_add_dependency(&job->drm, + dma_fence_get(sync->fence)); return 0; } -void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job, - struct dma_fence *fence) +void xe_sync_entry_signal(struct xe_sync_entry *sync, struct dma_fence *fence) { if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL)) return; @@ -252,10 +250,6 @@ void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job, user_fence_put(sync->ufence); dma_fence_put(fence); } - } else if (sync->type == DRM_XE_SYNC_TYPE_USER_FENCE) { - job->user_fence.used = true; - job->user_fence.addr = sync->addr; - job->user_fence.value = sync->timeline_value; } } @@ -263,10 +257,8 @@ void xe_sync_entry_cleanup(struct xe_sync_entry *sync) { if (sync->syncobj) drm_syncobj_put(sync->syncobj); - if (sync->fence) - dma_fence_put(sync->fence); - if (sync->chain_fence) - dma_fence_put(&sync->chain_fence->base); + dma_fence_put(sync->fence); + dma_fence_chain_free(sync->chain_fence); if (sync->ufence) user_fence_put(sync->ufence); } @@ -307,7 +299,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync, /* Easy case... */ if (!num_in_fence) { fence = xe_exec_queue_last_fence_get(q, vm); - dma_fence_get(fence); return fence; } @@ -322,7 +313,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync, } } fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm); - dma_fence_get(fences[current_fence - 1]); cf = dma_fence_array_create(num_in_fence, fences, vm->composite_fence_ctx, vm->composite_fence_seqno++, @@ -342,3 +332,54 @@ err_out: return ERR_PTR(-ENOMEM); } + +/** + * __xe_sync_ufence_get() - Get user fence from user fence + * @ufence: input user fence + * + * Get a user fence reference from user fence + * + * Return: xe_user_fence pointer with reference + */ +struct xe_user_fence *__xe_sync_ufence_get(struct xe_user_fence *ufence) +{ + user_fence_get(ufence); + + return ufence; +} + +/** + * xe_sync_ufence_get() - Get user fence from sync + * @sync: input sync + * + * Get a user fence reference from sync. + * + * Return: xe_user_fence pointer with reference + */ +struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync) +{ + user_fence_get(sync->ufence); + + return sync->ufence; +} + +/** + * xe_sync_ufence_put() - Put user fence reference + * @ufence: user fence reference + * + */ +void xe_sync_ufence_put(struct xe_user_fence *ufence) +{ + user_fence_put(ufence); +} + +/** + * xe_sync_ufence_get_status() - Get user fence status + * @ufence: user fence + * + * Return: 1 if signalled, 0 not signalled, <0 on error + */ +int xe_sync_ufence_get_status(struct xe_user_fence *ufence) +{ + return READ_ONCE(ufence->signalled); +} |