summaryrefslogtreecommitdiff
path: root/drivers/scsi/bnx2fc/bnx2fc_hwi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/bnx2fc/bnx2fc_hwi.c')
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c298
1 files changed, 158 insertions, 140 deletions
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index c0d035a8f8f9..090d436bcef8 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1,8 +1,10 @@
-/* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
+/* bnx2fc_hwi.c: QLogic Linux FCoE offload driver.
* This file contains the code that low level functions that interact
* with 57712 FCoE firmware.
*
- * Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -483,7 +485,7 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
/**
* bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
*
- * @port: port structure pointer
+ * @hba: adapter structure pointer
* @tgt: bnx2fc_rport structure pointer
*/
int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
@@ -631,10 +633,8 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
u16 xid;
u32 frame_len, len;
struct bnx2fc_cmd *io_req = NULL;
- struct fcoe_task_ctx_entry *task, *task_page;
struct bnx2fc_interface *interface = tgt->port->priv;
struct bnx2fc_hba *hba = interface->hba;
- int task_idx, index;
int rc = 0;
u64 err_warn_bit_map;
u8 err_warn = 0xff;
@@ -700,18 +700,12 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
-
if (xid > hba->max_xid) {
BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
xid);
goto ret_err_rqe;
}
- task_idx = xid / BNX2FC_TASKS_PER_PAGE;
- index = xid % BNX2FC_TASKS_PER_PAGE;
- task_page = (struct fcoe_task_ctx_entry *)
- hba->task_ctx[task_idx];
- task = &(task_page[index]);
io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
if (!io_req)
@@ -776,7 +770,6 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
} else
printk(KERN_ERR PFX "SRR in progress\n");
goto ret_err_rqe;
- break;
default:
break;
}
@@ -828,18 +821,13 @@ ret_err_rqe:
((u64)err_entry->data.err_warn_bitmap_hi << 32) |
(u64)err_entry->data.err_warn_bitmap_lo;
for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
- if (err_warn_bit_map & (u64) (1 << i)) {
+ if (err_warn_bit_map & ((u64)1 << i)) {
err_warn = i;
break;
}
}
BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
- task_idx = xid / BNX2FC_TASKS_PER_PAGE;
- index = xid % BNX2FC_TASKS_PER_PAGE;
- task_page = (struct fcoe_task_ctx_entry *)
- interface->hba->task_ctx[task_idx];
- task = &(task_page[index]);
io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
if (!io_req)
goto ret_warn_rqe;
@@ -868,36 +856,22 @@ ret_warn_rqe:
}
}
-void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe,
+ unsigned char *rq_data, u8 num_rq,
+ struct fcoe_task_ctx_entry *task)
{
- struct fcoe_task_ctx_entry *task;
- struct fcoe_task_ctx_entry *task_page;
struct fcoe_port *port = tgt->port;
struct bnx2fc_interface *interface = port->priv;
struct bnx2fc_hba *hba = interface->hba;
struct bnx2fc_cmd *io_req;
- int task_idx, index;
+
u16 xid;
u8 cmd_type;
u8 rx_state = 0;
- u8 num_rq;
spin_lock_bh(&tgt->tgt_lock);
- xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
- if (xid >= hba->max_tasks) {
- printk(KERN_ERR PFX "ERROR:xid out of range\n");
- spin_unlock_bh(&tgt->tgt_lock);
- return;
- }
- task_idx = xid / BNX2FC_TASKS_PER_PAGE;
- index = xid % BNX2FC_TASKS_PER_PAGE;
- task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
- task = &(task_page[index]);
-
- num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
- FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
- FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
+ xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
if (io_req == NULL) {
@@ -917,7 +891,8 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
switch (cmd_type) {
case BNX2FC_SCSI_CMD:
if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
- bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
+ bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq,
+ rq_data);
spin_unlock_bh(&tgt->tgt_lock);
return;
}
@@ -934,7 +909,7 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
case BNX2FC_TASK_MGMT_CMD:
BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
- bnx2fc_process_tm_compl(io_req, task, num_rq);
+ bnx2fc_process_tm_compl(io_req, task, num_rq, rq_data);
break;
case BNX2FC_ABTS:
@@ -989,11 +964,12 @@ void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
FCOE_CQE_TOGGLE_BIT_SHIFT);
msg = *((u32 *)rx_db);
writel(cpu_to_le32(msg), tgt->ctx_base);
- mmiowb();
}
-struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
+static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe,
+ unsigned char *rq_data, u8 num_rq,
+ struct fcoe_task_ctx_entry *task)
{
struct bnx2fc_work *work;
work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
@@ -1003,9 +979,88 @@ struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
INIT_LIST_HEAD(&work->list);
work->tgt = tgt;
work->wqe = wqe;
+ work->num_rq = num_rq;
+ work->task = task;
+ if (rq_data)
+ memcpy(work->rq_data, rq_data, BNX2FC_RQ_BUF_SZ);
+
return work;
}
+/* Pending work request completion */
+static bool bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
+{
+ unsigned int cpu = wqe % num_possible_cpus();
+ struct bnx2fc_percpu_s *fps;
+ struct bnx2fc_work *work;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ unsigned char *rq_data = NULL;
+ unsigned char rq_data_buff[BNX2FC_RQ_BUF_SZ];
+ int task_idx, index;
+ u16 xid;
+ u8 num_rq;
+ int i;
+
+ xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
+ if (xid >= hba->max_tasks) {
+ pr_err(PFX "ERROR:xid out of range\n");
+ return false;
+ }
+
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+ task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
+ task = &task_page[index];
+
+ num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
+ FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
+ FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
+
+ memset(rq_data_buff, 0, BNX2FC_RQ_BUF_SZ);
+
+ if (!num_rq)
+ goto num_rq_zero;
+
+ rq_data = bnx2fc_get_next_rqe(tgt, 1);
+
+ if (num_rq > 1) {
+ /* We do not need extra sense data */
+ for (i = 1; i < num_rq; i++)
+ bnx2fc_get_next_rqe(tgt, 1);
+ }
+
+ if (rq_data)
+ memcpy(rq_data_buff, rq_data, BNX2FC_RQ_BUF_SZ);
+
+ /* return RQ entries */
+ for (i = 0; i < num_rq; i++)
+ bnx2fc_return_rqe(tgt, 1);
+
+num_rq_zero:
+
+ fps = &per_cpu(bnx2fc_percpu, cpu);
+ spin_lock_bh(&fps->fp_work_lock);
+ if (fps->iothread) {
+ work = bnx2fc_alloc_work(tgt, wqe, rq_data_buff,
+ num_rq, task);
+ if (work) {
+ list_add_tail(&work->list, &fps->work_list);
+ wake_up_process(fps->iothread);
+ spin_unlock_bh(&fps->fp_work_lock);
+ return true;
+ }
+ }
+ spin_unlock_bh(&fps->fp_work_lock);
+ bnx2fc_process_cq_compl(tgt, wqe,
+ rq_data_buff, num_rq, task);
+
+ return true;
+}
+
int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
{
struct fcoe_cqe *cq;
@@ -1040,29 +1095,8 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
/* Unsolicited event notification */
bnx2fc_process_unsol_compl(tgt, wqe);
} else {
- /* Pending work request completion */
- struct bnx2fc_work *work = NULL;
- struct bnx2fc_percpu_s *fps = NULL;
- unsigned int cpu = wqe % num_possible_cpus();
-
- fps = &per_cpu(bnx2fc_percpu, cpu);
- spin_lock_bh(&fps->fp_work_lock);
- if (unlikely(!fps->iothread))
- goto unlock;
-
- work = bnx2fc_alloc_work(tgt, wqe);
- if (work)
- list_add_tail(&work->list,
- &fps->work_list);
-unlock:
- spin_unlock_bh(&fps->fp_work_lock);
-
- /* Pending work request completion */
- if (fps->iothread && work)
- wake_up_process(fps->iothread);
- else
- bnx2fc_process_cq_compl(tgt, wqe);
- num_free_sqes++;
+ if (bnx2fc_pending_work(tgt, wqe))
+ num_free_sqes++;
}
cqe++;
tgt->cq_cons_idx++;
@@ -1120,7 +1154,6 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
struct fcoe_kcqe *ofld_kcqe)
{
struct bnx2fc_rport *tgt;
- struct fcoe_port *port;
struct bnx2fc_interface *interface;
u32 conn_id;
u32 context_id;
@@ -1134,10 +1167,9 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
}
BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
ofld_kcqe->fcoe_conn_context_id);
- port = tgt->port;
interface = tgt->port->priv;
if (hba != interface->hba) {
- printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
+ printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mismatch\n");
goto ofld_cmpl_err;
}
/*
@@ -1194,12 +1226,12 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
* and enable
*/
if (tgt->context_id != context_id) {
- printk(KERN_ERR PFX "context id mis-match\n");
+ printk(KERN_ERR PFX "context id mismatch\n");
return;
}
interface = tgt->port->priv;
if (hba != interface->hba) {
- printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
+ printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mismatch\n");
goto enbl_cmpl_err;
}
if (!ofld_kcqe->completion_status)
@@ -1299,10 +1331,10 @@ static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
}
/**
- * bnx2fc_indicae_kcqe - process KCQE
+ * bnx2fc_indicate_kcqe() - process KCQE
*
- * @hba: adapter structure pointer
- * @kcqe: kcqe pointer
+ * @context: adapter structure pointer
+ * @kcq: kcqe pointer
* @num_cqe: Number of completion queue elements
*
* Generic KCQ event handler
@@ -1371,7 +1403,6 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
break;
case FCOE_KCQE_OPCODE_FCOE_ERROR:
- /* fall thru */
default:
printk(KERN_ERR PFX "unknown opcode 0x%x\n",
kcqe->op_code);
@@ -1406,7 +1437,6 @@ void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
(tgt->sq_curr_toggle_bit << 15);
msg = *((u32 *)sq_db);
writel(cpu_to_le32(msg), tgt->ctx_base);
- mmiowb();
}
@@ -1421,9 +1451,8 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
reg_base = pci_resource_start(hba->pcidev,
BNX2X_DOORBELL_PCI_BAR);
- reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
- (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
- tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
+ tgt->ctx_base = ioremap(reg_base + reg_off, 4);
if (!tgt->ctx_base)
return -ENOMEM;
return 0;
@@ -1463,10 +1492,7 @@ void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
{
struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
- struct bnx2fc_interface *interface = tgt->port->priv;
struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
- struct fcoe_task_ctx_entry *orig_task;
- struct fcoe_task_ctx_entry *task_page;
struct fcoe_ext_mul_sges_ctx *sgl;
u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
u8 orig_task_type;
@@ -1475,7 +1501,6 @@ void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
u32 orig_offset = offset;
int bd_count;
- int orig_task_idx, index;
int i;
memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
@@ -1525,12 +1550,6 @@ void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
offset; /* adjusted offset */
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
} else {
- orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
- index = orig_xid % BNX2FC_TASKS_PER_PAGE;
-
- task_page = (struct fcoe_task_ctx_entry *)
- interface->hba->task_ctx[orig_task_idx];
- orig_task = &(task_page[index]);
/* Multiple SGEs were used for this IO */
sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
@@ -1690,7 +1709,8 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
struct fcoe_cached_sge_ctx *cached_sge;
struct fcoe_ext_mul_sges_ctx *sgl;
int dev_type = tgt->dev_type;
- u64 *fcp_cmnd;
+ struct fcp_cmnd *fcp_cmnd;
+ u64 *raw_fcp_cmnd;
u64 tmp_fcp_cmnd[4];
u32 context_id;
int cnt, i;
@@ -1759,16 +1779,19 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
/* Fill FCP_CMND IU */
- fcp_cmnd = (u64 *)
+ fcp_cmnd = (struct fcp_cmnd *)&tmp_fcp_cmnd;
+ bnx2fc_build_fcp_cmnd(io_req, fcp_cmnd);
+ int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun);
+ memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
+ raw_fcp_cmnd = (u64 *)
task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
- bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
/* swap fcp_cmnd */
cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
for (i = 0; i < cnt; i++) {
- *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
- fcp_cmnd++;
+ *raw_fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
+ raw_fcp_cmnd++;
}
/* Rx Write Tx Read */
@@ -1864,7 +1887,6 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
rc = -1;
goto out;
}
- memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
/*
* Allocate task_ctx which is an array of pointers pointing to
@@ -1902,7 +1924,6 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
rc = -1;
goto out3;
}
- memset(hba->task_ctx[i], 0, PAGE_SIZE);
addr = (u64)hba->task_ctx_dma[i];
task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
task_ctx_bdt->lo = cpu_to_le32((u32)addr);
@@ -1967,26 +1988,29 @@ static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
{
int i;
int segment_count;
- int hash_table_size;
u32 *pbl;
- segment_count = hba->hash_tbl_segment_count;
- hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
- sizeof(struct fcoe_hash_table_entry);
+ if (hba->hash_tbl_segments) {
- pbl = hba->hash_tbl_pbl;
- for (i = 0; i < segment_count; ++i) {
- dma_addr_t dma_address;
+ pbl = hba->hash_tbl_pbl;
+ if (pbl) {
+ segment_count = hba->hash_tbl_segment_count;
+ for (i = 0; i < segment_count; ++i) {
+ dma_addr_t dma_address;
- dma_address = le32_to_cpu(*pbl);
- ++pbl;
- dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
- ++pbl;
- dma_free_coherent(&hba->pcidev->dev,
- BNX2FC_HASH_TBL_CHUNK_SIZE,
- hba->hash_tbl_segments[i],
- dma_address);
+ dma_address = le32_to_cpu(*pbl);
+ ++pbl;
+ dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
+ ++pbl;
+ dma_free_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ hba->hash_tbl_segments[i],
+ dma_address);
+ }
+ }
+ kfree(hba->hash_tbl_segments);
+ hba->hash_tbl_segments = NULL;
}
if (hba->hash_tbl_pbl) {
@@ -2024,41 +2048,27 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
if (!dma_segment_array) {
printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
- return -ENOMEM;
+ goto cleanup_ht;
}
for (i = 0; i < segment_count; ++i) {
- hba->hash_tbl_segments[i] =
- dma_alloc_coherent(&hba->pcidev->dev,
- BNX2FC_HASH_TBL_CHUNK_SIZE,
- &dma_segment_array[i],
- GFP_KERNEL);
+ hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ &dma_segment_array[i],
+ GFP_KERNEL);
if (!hba->hash_tbl_segments[i]) {
printk(KERN_ERR PFX "hash segment alloc failed\n");
- while (--i >= 0) {
- dma_free_coherent(&hba->pcidev->dev,
- BNX2FC_HASH_TBL_CHUNK_SIZE,
- hba->hash_tbl_segments[i],
- dma_segment_array[i]);
- hba->hash_tbl_segments[i] = NULL;
- }
- kfree(dma_segment_array);
- return -ENOMEM;
+ goto cleanup_dma;
}
- memset(hba->hash_tbl_segments[i], 0,
- BNX2FC_HASH_TBL_CHUNK_SIZE);
}
- hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
+ hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
&hba->hash_tbl_pbl_dma,
GFP_KERNEL);
if (!hba->hash_tbl_pbl) {
printk(KERN_ERR PFX "hash table pbl alloc failed\n");
- kfree(dma_segment_array);
- return -ENOMEM;
+ goto cleanup_dma;
}
- memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
pbl = hba->hash_tbl_pbl;
for (i = 0; i < segment_count; ++i) {
@@ -2071,16 +2081,28 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
pbl = hba->hash_tbl_pbl;
i = 0;
while (*pbl && *(pbl + 1)) {
- u32 lo;
- u32 hi;
- lo = *pbl;
++pbl;
- hi = *pbl;
++pbl;
++i;
}
kfree(dma_segment_array);
return 0;
+
+cleanup_dma:
+ for (i = 0; i < segment_count; ++i) {
+ if (hba->hash_tbl_segments[i])
+ dma_free_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ hba->hash_tbl_segments[i],
+ dma_segment_array[i]);
+ }
+
+ kfree(dma_segment_array);
+
+cleanup_ht:
+ kfree(hba->hash_tbl_segments);
+ hba->hash_tbl_segments = NULL;
+ return -ENOMEM;
}
/**
@@ -2107,7 +2129,6 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
mem_size = BNX2FC_NUM_MAX_SESS *
sizeof(struct fcoe_t2_hash_table_entry);
@@ -2119,7 +2140,6 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- memset(hba->t2_hash_tbl, 0x00, mem_size);
for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
addr = (unsigned long) hba->t2_hash_tbl_dma +
((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
@@ -2136,8 +2156,7 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
return -ENOMEM;
}
- hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
+ hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
&hba->stats_buf_dma,
GFP_KERNEL);
if (!hba->stats_buffer) {
@@ -2145,7 +2164,6 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- memset(hba->stats_buffer, 0x00, PAGE_SIZE);
return 0;
}