diff options
author | Matthew Brost <matthew.brost@intel.com> | 2024-01-09 17:24:37 -0800 |
---|---|---|
committer | Matthew Brost <matthew.brost@intel.com> | 2024-01-10 15:11:22 -0800 |
commit | 1fd77ceaf0d843af2b7fde83e447b0738d0404cb (patch) | |
tree | c269fd05f9c3ba4c24572801bc8272beb9944e2a /drivers/gpu/drm/xe/xe_gt_pagefault.c | |
parent | 86f41f4333e31b62d143c5e38c0c58c85193c4c8 (diff) |
drm/xe: Invert page fault queue head / tail
Convention for queues in Linux is the producer moves the head and
consumer moves the tail. Fix the page fault queue to conform to this
convention.
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_gt_pagefault.c')
-rw-r--r-- | drivers/gpu/drm/xe/xe_gt_pagefault.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index 0a61e4413679..3ca715e2ec19 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -282,9 +282,9 @@ static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf) bool ret = false; spin_lock_irq(&pf_queue->lock); - if (pf_queue->head != pf_queue->tail) { + if (pf_queue->tail != pf_queue->head) { desc = (const struct xe_guc_pagefault_desc *) - (pf_queue->data + pf_queue->head); + (pf_queue->data + pf_queue->tail); pf->fault_level = FIELD_GET(PFD_FAULT_LEVEL, desc->dw0); pf->trva_fault = FIELD_GET(XE2_PFD_TRVA_FAULT, desc->dw0); @@ -302,7 +302,7 @@ static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf) pf->page_addr |= FIELD_GET(PFD_VIRTUAL_ADDR_LO, desc->dw2) << PFD_VIRTUAL_ADDR_LO_SHIFT; - pf_queue->head = (pf_queue->head + PF_MSG_LEN_DW) % + pf_queue->tail = (pf_queue->tail + PF_MSG_LEN_DW) % PF_QUEUE_NUM_DW; ret = true; } @@ -315,7 +315,7 @@ static bool pf_queue_full(struct pf_queue *pf_queue) { lockdep_assert_held(&pf_queue->lock); - return CIRC_SPACE(pf_queue->tail, pf_queue->head, PF_QUEUE_NUM_DW) <= + return CIRC_SPACE(pf_queue->head, pf_queue->tail, PF_QUEUE_NUM_DW) <= PF_MSG_LEN_DW; } @@ -342,8 +342,8 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len) spin_lock_irqsave(&pf_queue->lock, flags); full = pf_queue_full(pf_queue); if (!full) { - memcpy(pf_queue->data + pf_queue->tail, msg, len * sizeof(u32)); - pf_queue->tail = (pf_queue->tail + len) % PF_QUEUE_NUM_DW; + memcpy(pf_queue->data + pf_queue->head, msg, len * sizeof(u32)); + pf_queue->head = (pf_queue->head + len) % PF_QUEUE_NUM_DW; queue_work(gt->usm.pf_wq, &pf_queue->worker); } else { drm_warn(&xe->drm, "PF Queue full, shouldn't be possible"); @@ -389,7 +389,7 @@ static void pf_queue_work_func(struct work_struct *w) send_pagefault_reply(>->uc.guc, &reply); if (time_after(jiffies, threshold) && - pf_queue->head != pf_queue->tail) { + pf_queue->tail != pf_queue->head) { queue_work(gt->usm.pf_wq, w); break; } |