summaryrefslogtreecommitdiff
path: root/drivers/scsi/lpfc/lpfc_sli.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c193
1 files changed, 104 insertions, 89 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index aecd2399005d..5f5528a12308 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -475,28 +475,30 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *temp_hrqe;
struct lpfc_rqe *temp_drqe;
struct lpfc_register doorbell;
- int put_index;
+ int hq_put_index;
+ int dq_put_index;
/* sanity check on queue memory */
if (unlikely(!hq) || unlikely(!dq))
return -ENOMEM;
- put_index = hq->host_index;
- temp_hrqe = hq->qe[put_index].rqe;
- temp_drqe = dq->qe[dq->host_index].rqe;
+ hq_put_index = hq->host_index;
+ dq_put_index = dq->host_index;
+ temp_hrqe = hq->qe[hq_put_index].rqe;
+ temp_drqe = dq->qe[dq_put_index].rqe;
if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
return -EINVAL;
- if (put_index != dq->host_index)
+ if (hq_put_index != dq_put_index)
return -EINVAL;
/* If the host has not yet processed the next entry then we are done */
- if (((put_index + 1) % hq->entry_count) == hq->hba_index)
+ if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
return -EBUSY;
lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
/* Update the host index to point to the next slot */
- hq->host_index = ((put_index + 1) % hq->entry_count);
- dq->host_index = ((dq->host_index + 1) % dq->entry_count);
+ hq->host_index = ((hq_put_index + 1) % hq->entry_count);
+ dq->host_index = ((dq_put_index + 1) % dq->entry_count);
hq->RQ_buf_posted++;
/* Ring The Header Receive Queue Doorbell */
@@ -517,7 +519,7 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
}
writel(doorbell.word0, hq->db_regaddr);
}
- return put_index;
+ return hq_put_index;
}
/**
@@ -12318,41 +12320,6 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked by the worker thread to process all the pending
- * SLI4 NVME abort XRI events.
- **/
-void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
-{
- struct lpfc_cq_event *cq_event;
-
- /* First, declare the fcp xri abort event has been handled */
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
- spin_unlock_irq(&phba->hbalock);
- /* Now, handle all the fcp xri abort events */
- while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
- /* Get the first event from the head of the event queue */
- spin_lock_irq(&phba->hbalock);
- list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
- cq_event, struct lpfc_cq_event, list);
- spin_unlock_irq(&phba->hbalock);
- /* Notify aborted XRI for NVME work queue */
- if (phba->nvmet_support) {
- lpfc_sli4_nvmet_xri_aborted(phba,
- &cq_event->cqe.wcqe_axri);
- } else {
- lpfc_sli4_nvme_xri_aborted(phba,
- &cq_event->cqe.wcqe_axri);
- }
- /* Free the event processed back to the free pool */
- lpfc_sli4_cq_event_release(phba, cq_event);
- }
-}
-
-/**
* lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
* @phba: pointer to lpfc hba data structure.
*
@@ -12548,6 +12515,24 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
return irspiocbq;
}
+inline struct lpfc_cq_event *
+lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
+{
+ struct lpfc_cq_event *cq_event;
+
+ /* Allocate a new internal CQ_EVENT entry */
+ cq_event = lpfc_sli4_cq_event_alloc(phba);
+ if (!cq_event) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0602 Failed to alloc CQ_EVENT entry\n");
+ return NULL;
+ }
+
+ /* Move the CQE into the event */
+ memcpy(&cq_event->cqe, entry, size);
+ return cq_event;
+}
+
/**
* lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
* @phba: Pointer to HBA context object.
@@ -12569,16 +12554,9 @@ lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
"word2:x%x, word3:x%x\n", mcqe->word0,
mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
- /* Allocate a new internal CQ_EVENT entry */
- cq_event = lpfc_sli4_cq_event_alloc(phba);
- if (!cq_event) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0394 Failed to allocate CQ_EVENT entry\n");
+ cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
+ if (!cq_event)
return false;
- }
-
- /* Move the CQE into an asynchronous event entry */
- memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
/* Set the async event flag */
@@ -12824,18 +12802,12 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
struct lpfc_cq_event *cq_event;
unsigned long iflags;
- /* Allocate a new internal CQ_EVENT entry */
- cq_event = lpfc_sli4_cq_event_alloc(phba);
- if (!cq_event) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0602 Failed to allocate CQ_EVENT entry\n");
- return false;
- }
-
- /* Move the CQE into the proper xri abort event list */
- memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
switch (cq->subtype) {
case LPFC_FCP:
+ cq_event = lpfc_cq_event_setup(
+ phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+ if (!cq_event)
+ return false;
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
@@ -12844,7 +12816,12 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
+ case LPFC_NVME_LS: /* NVME LS uses ELS resources */
case LPFC_ELS:
+ cq_event = lpfc_cq_event_setup(
+ phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+ if (!cq_event)
+ return false;
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_els_xri_aborted_work_queue);
@@ -12854,13 +12831,13 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
workposted = true;
break;
case LPFC_NVME:
- spin_lock_irqsave(&phba->hbalock, iflags);
- list_add_tail(&cq_event->list,
- &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
- /* Set the nvme xri abort event flag */
- phba->hba_flag |= NVME_XRI_ABORT_EVENT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- workposted = true;
+ /* Notify aborted XRI for NVME work queue */
+ if (phba->nvmet_support)
+ lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
+ else
+ lpfc_sli4_nvme_xri_aborted(phba, wcqe);
+
+ workposted = false;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12868,7 +12845,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
"%08x %08x %08x %08x\n",
cq->subtype, wcqe->word0, wcqe->parameter,
wcqe->word2, wcqe->word3);
- lpfc_sli4_cq_event_release(phba, cq_event);
workposted = false;
break;
}
@@ -12913,8 +12889,8 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2537 Receive Frame Truncated!!\n");
case FC_STATUS_RQ_SUCCESS:
- lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_sli4_rq_release(hrq, drq);
dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
if (!dma_buf) {
hrq->RQ_no_buf_found++;
@@ -13316,8 +13292,8 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
"6126 Receive Frame Truncated!!\n");
/* Drop thru */
case FC_STATUS_RQ_SUCCESS:
- lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_sli4_rq_release(hrq, drq);
dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
if (!dma_buf) {
hrq->RQ_no_buf_found++;
@@ -13919,7 +13895,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
while (!list_empty(&queue->page_list)) {
list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
list);
- dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
+ dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
}
@@ -13938,6 +13914,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
/**
* lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
* @phba: The HBA that this queue is being created on.
+ * @page_size: The size of a queue page
* @entry_size: The size of each queue entry for this queue.
* @entry count: The number of entries that this queue will handle.
*
@@ -13946,8 +13923,8 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
* queue on the HBA.
**/
struct lpfc_queue *
-lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
- uint32_t entry_count)
+lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
+ uint32_t entry_size, uint32_t entry_count)
{
struct lpfc_queue *queue;
struct lpfc_dmabuf *dmabuf;
@@ -13956,7 +13933,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
+ hw_page_size = page_size;
queue = kzalloc(sizeof(struct lpfc_queue) +
(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
@@ -13973,6 +13950,15 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
INIT_LIST_HEAD(&queue->wq_list);
INIT_LIST_HEAD(&queue->page_list);
INIT_LIST_HEAD(&queue->child_list);
+
+ /* Set queue parameters now. If the system cannot provide memory
+ * resources, the free routine needs to know what was allocated.
+ */
+ queue->entry_size = entry_size;
+ queue->entry_count = entry_count;
+ queue->page_size = hw_page_size;
+ queue->phba = phba;
+
for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!dmabuf)
@@ -13994,9 +13980,6 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
queue->qe[total_qe_count].address = dma_pointer;
}
}
- queue->entry_size = entry_size;
- queue->entry_count = entry_count;
- queue->phba = phba;
INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
@@ -14299,7 +14282,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
if (!cq || !eq)
return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
+ hw_page_size = cq->page_size;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14318,8 +14301,8 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.cqv);
if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
- /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
- bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
+ bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
+ (cq->page_size / SLI4_PAGE_SIZE));
bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
eq->queue_id);
} else {
@@ -14327,6 +14310,18 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
eq->queue_id);
}
switch (cq->entry_count) {
+ case 2048:
+ case 4096:
+ if (phba->sli4_hba.pc_sli4_params.cqv ==
+ LPFC_Q_CREATE_VERSION_2) {
+ cq_create->u.request.context.lpfc_cq_context_count =
+ cq->entry_count;
+ bf_set(lpfc_cq_context_count,
+ &cq_create->u.request.context,
+ LPFC_CQ_CNT_WORD7);
+ break;
+ }
+ /* Fall Thru */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0361 Unsupported CQ count: "
@@ -14352,7 +14347,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
break;
}
list_for_each_entry(dmabuf, &cq->page_list, list) {
- memset(dmabuf->virt, 0, hw_page_size);
+ memset(dmabuf->virt, 0, cq->page_size);
cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -14433,8 +14428,6 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
numcq = phba->cfg_nvmet_mrq;
if (!cqp || !eqp || !numcq)
return -ENODEV;
- if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14465,6 +14458,8 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
status = -ENOMEM;
goto out;
}
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = cq->page_size;
switch (idx) {
case 0:
@@ -14482,6 +14477,19 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
bf_set(lpfc_mbx_cq_create_set_num_cq,
&cq_set->u.request, numcq);
switch (cq->entry_count) {
+ case 2048:
+ case 4096:
+ if (phba->sli4_hba.pc_sli4_params.cqv ==
+ LPFC_Q_CREATE_VERSION_2) {
+ bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+ &cq_set->u.request,
+ cq->entry_count);
+ bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+ &cq_set->u.request,
+ LPFC_CQ_CNT_WORD7);
+ break;
+ }
+ /* Fall Thru */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3118 Bad CQ count. (%d)\n",
@@ -14578,6 +14586,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
cq->host_index = 0;
cq->hba_index = 0;
cq->entry_repost = LPFC_CQ_REPOST;
+ cq->chann = idx;
rc = 0;
list_for_each_entry(dmabuf, &cq->page_list, list) {
@@ -14872,12 +14881,13 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
void __iomem *bar_memmap_p;
uint32_t db_offset;
uint16_t pci_barset;
+ uint8_t wq_create_version;
/* sanity check on queue memory */
if (!wq || !cq)
return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
+ hw_page_size = wq->page_size;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14898,7 +14908,12 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.wqv);
- switch (phba->sli4_hba.pc_sli4_params.wqv) {
+ if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
+ wq_create_version = LPFC_Q_CREATE_VERSION_1;
+ else
+ wq_create_version = LPFC_Q_CREATE_VERSION_0;
+
+ switch (wq_create_version) {
case LPFC_Q_CREATE_VERSION_0:
switch (wq->entry_size) {
default:
@@ -14956,7 +14971,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
}
bf_set(lpfc_mbx_wq_create_page_size,
&wq_create->u.request_1,
- LPFC_WQ_PAGE_SIZE_4096);
+ (wq->page_size / SLI4_PAGE_SIZE));
page = wq_create->u.request_1.page;
break;
default: