diff options
author | Justin Tee <justin.tee@broadcom.com> | 2023-04-17 12:15:57 -0700 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2023-05-08 07:16:05 -0400 |
commit | a7b94c159210cdb9393fa6e69f8d085e43c0607b (patch) | |
tree | b8e7021723c39e63b0981036f36980c6e7fe9b07 /drivers/scsi/lpfc/lpfc_init.c | |
parent | 5fc849d8056d6f51bc8cd43cbcd85d4e71aa1ee2 (diff) |
scsi: lpfc: Replace blk_irq_poll intr handler with threaded IRQ
It has been determined that the threaded IRQ API accomplishes effectively
the same performance metrics as blk_irq_poll. As blk_irq_poll is mostly
scheduled by the softirqd and handled in softirq context, this is not
entirely desired from a Fibre Channel driver context. A threaded IRQ model
fits cleaner. This patch replaces the blk_irq_poll logic with threaded
IRQ.
Signed-off-by: Justin Tee <justin.tee@broadcom.com>
Link: https://lore.kernel.org/r/20230417191558.83100-7-justintee8345@gmail.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 26 |
1 files changed, 15 insertions, 11 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 867b4c788f08..088bd75fb5d7 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1279,7 +1279,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) /* * lpfc_idle_stat_delay_work - idle_stat tracking * - * This routine tracks per-cq idle_stat and determines polling decisions. + * This routine tracks per-eq idle_stat and determines polling decisions. * * Return codes: * None @@ -1290,7 +1290,7 @@ lpfc_idle_stat_delay_work(struct work_struct *work) struct lpfc_hba *phba = container_of(to_delayed_work(work), struct lpfc_hba, idle_stat_delay_work); - struct lpfc_queue *cq; + struct lpfc_queue *eq; struct lpfc_sli4_hdw_queue *hdwq; struct lpfc_idle_stat *idle_stat; u32 i, idle_percent; @@ -1306,10 +1306,10 @@ lpfc_idle_stat_delay_work(struct work_struct *work) for_each_present_cpu(i) { hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; - cq = hdwq->io_cq; + eq = hdwq->hba_eq; - /* Skip if we've already handled this cq's primary CPU */ - if (cq->chann != i) + /* Skip if we've already handled this eq's primary CPU */ + if (eq->chann != i) continue; idle_stat = &phba->sli4_hba.idle_stat[i]; @@ -1333,9 +1333,9 @@ lpfc_idle_stat_delay_work(struct work_struct *work) idle_percent = 100 - idle_percent; if (idle_percent < 15) - cq->poll_mode = LPFC_QUEUE_WORK; + eq->poll_mode = LPFC_QUEUE_WORK; else - cq->poll_mode = LPFC_IRQ_POLL; + eq->poll_mode = LPFC_THREADED_IRQ; idle_stat->prev_idle = wall_idle; idle_stat->prev_wall = wall; @@ -4357,6 +4357,7 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) struct lpfc_sli4_hdw_queue *qp; struct lpfc_io_buf *lpfc_cmd; int idx, cnt; + unsigned long iflags; qp = phba->sli4_hba.hdwq; cnt = 0; @@ -4371,12 +4372,13 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) lpfc_cmd->hdwq_no = idx; lpfc_cmd->hdwq = qp; lpfc_cmd->cur_iocbq.cmd_cmpl = NULL; - spin_lock(&qp->io_buf_list_put_lock); + spin_lock_irqsave(&qp->io_buf_list_put_lock, iflags); list_add_tail(&lpfc_cmd->list, &qp->lpfc_io_buf_list_put); qp->put_io_bufs++; qp->total_io_bufs++; - spin_unlock(&qp->io_buf_list_put_lock); + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, + iflags); } } return cnt; @@ -13117,8 +13119,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) } eqhdl->irq = rc; - rc = request_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, 0, - name, eqhdl); + rc = request_threaded_irq(eqhdl->irq, + &lpfc_sli4_hba_intr_handler, + &lpfc_sli4_hba_intr_handler_th, + IRQF_ONESHOT, name, eqhdl); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0486 MSI-X fast-path (%d) " |